aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2011-07-14 10:56:40 -0400
committerDavid S. Miller <davem@davemloft.net>2011-07-14 10:56:40 -0400
commit6a7ebdf2fd15417e87b4fd02ff411aeaca34da5f (patch)
tree86b15d8cd3e25c97b348b5a61bdb16c02726a480 /drivers/scsi
parentf6b72b6217f8c24f2a54988e58af858b4e66024d (diff)
parent51414d41084496aaefd06d7f19eb8206e8bfac2d (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: net/bluetooth/l2cap_core.c
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/Kconfig13
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/hpsa.c16
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c4
-rw-r--r--drivers/scsi/isci/Makefile8
-rw-r--r--drivers/scsi/isci/firmware/Makefile19
-rw-r--r--drivers/scsi/isci/firmware/README36
-rw-r--r--drivers/scsi/isci/firmware/create_fw.c99
-rw-r--r--drivers/scsi/isci/firmware/create_fw.h77
-rw-r--r--drivers/scsi/isci/host.c2751
-rw-r--r--drivers/scsi/isci/host.h542
-rw-r--r--drivers/scsi/isci/init.c565
-rw-r--r--drivers/scsi/isci/isci.h538
-rw-r--r--drivers/scsi/isci/phy.c1312
-rw-r--r--drivers/scsi/isci/phy.h504
-rw-r--r--drivers/scsi/isci/port.c1757
-rw-r--r--drivers/scsi/isci/port.h306
-rw-r--r--drivers/scsi/isci/port_config.c754
-rw-r--r--drivers/scsi/isci/probe_roms.c243
-rw-r--r--drivers/scsi/isci/probe_roms.h249
-rw-r--r--drivers/scsi/isci/registers.h1934
-rw-r--r--drivers/scsi/isci/remote_device.c1501
-rw-r--r--drivers/scsi/isci/remote_device.h352
-rw-r--r--drivers/scsi/isci/remote_node_context.c627
-rw-r--r--drivers/scsi/isci/remote_node_context.h224
-rw-r--r--drivers/scsi/isci/remote_node_table.c598
-rw-r--r--drivers/scsi/isci/remote_node_table.h188
-rw-r--r--drivers/scsi/isci/request.c3391
-rw-r--r--drivers/scsi/isci/request.h448
-rw-r--r--drivers/scsi/isci/sas.h219
-rw-r--r--drivers/scsi/isci/scu_completion_codes.h283
-rw-r--r--drivers/scsi/isci/scu_event_codes.h336
-rw-r--r--drivers/scsi/isci/scu_remote_node_context.h229
-rw-r--r--drivers/scsi/isci/scu_task_context.h942
-rw-r--r--drivers/scsi/isci/task.c1676
-rw-r--r--drivers/scsi/isci/task.h367
-rw-r--r--drivers/scsi/isci/unsolicited_frame_control.c225
-rw-r--r--drivers/scsi/isci/unsolicited_frame_control.h278
38 files changed, 23605 insertions, 7 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 4a1f029c4fe9..8d9dae89f065 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -830,6 +830,19 @@ config SCSI_GDTH
830 To compile this driver as a module, choose M here: the 830 To compile this driver as a module, choose M here: the
831 module will be called gdth. 831 module will be called gdth.
832 832
833config SCSI_ISCI
834 tristate "Intel(R) C600 Series Chipset SAS Controller"
835 depends on PCI && SCSI
836 depends on X86
837 # (temporary): known alpha quality driver
838 depends on EXPERIMENTAL
839 select SCSI_SAS_LIBSAS
840 ---help---
841 This driver supports the 6Gb/s SAS capabilities of the storage
842 control unit found in the Intel(R) C600 series chipset.
843
844 The experimental tag will be removed after the driver exits alpha
845
833config SCSI_GENERIC_NCR5380 846config SCSI_GENERIC_NCR5380
834 tristate "Generic NCR5380/53c400 SCSI PIO support" 847 tristate "Generic NCR5380/53c400 SCSI PIO support"
835 depends on ISA && SCSI 848 depends on ISA && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 7ad0b8a79ae8..3c08f5352b2d 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -73,6 +73,7 @@ obj-$(CONFIG_SCSI_AACRAID) += aacraid/
73obj-$(CONFIG_SCSI_AIC7XXX_OLD) += aic7xxx_old.o 73obj-$(CONFIG_SCSI_AIC7XXX_OLD) += aic7xxx_old.o
74obj-$(CONFIG_SCSI_AIC94XX) += aic94xx/ 74obj-$(CONFIG_SCSI_AIC94XX) += aic94xx/
75obj-$(CONFIG_SCSI_PM8001) += pm8001/ 75obj-$(CONFIG_SCSI_PM8001) += pm8001/
76obj-$(CONFIG_SCSI_ISCI) += isci/
76obj-$(CONFIG_SCSI_IPS) += ips.o 77obj-$(CONFIG_SCSI_IPS) += ips.o
77obj-$(CONFIG_SCSI_FD_MCS) += fd_mcs.o 78obj-$(CONFIG_SCSI_FD_MCS) += fd_mcs.o
78obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o 79obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index c6c0434d8034..6bba23a26303 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -1037,6 +1037,7 @@ static void complete_scsi_command(struct CommandList *cp)
1037 unsigned char sense_key; 1037 unsigned char sense_key;
1038 unsigned char asc; /* additional sense code */ 1038 unsigned char asc; /* additional sense code */
1039 unsigned char ascq; /* additional sense code qualifier */ 1039 unsigned char ascq; /* additional sense code qualifier */
1040 unsigned long sense_data_size;
1040 1041
1041 ei = cp->err_info; 1042 ei = cp->err_info;
1042 cmd = (struct scsi_cmnd *) cp->scsi_cmd; 1043 cmd = (struct scsi_cmnd *) cp->scsi_cmd;
@@ -1051,10 +1052,14 @@ static void complete_scsi_command(struct CommandList *cp)
1051 cmd->result |= ei->ScsiStatus; 1052 cmd->result |= ei->ScsiStatus;
1052 1053
1053 /* copy the sense data whether we need to or not. */ 1054 /* copy the sense data whether we need to or not. */
1054 memcpy(cmd->sense_buffer, ei->SenseInfo, 1055 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
1055 ei->SenseLen > SCSI_SENSE_BUFFERSIZE ? 1056 sense_data_size = SCSI_SENSE_BUFFERSIZE;
1056 SCSI_SENSE_BUFFERSIZE : 1057 else
1057 ei->SenseLen); 1058 sense_data_size = sizeof(ei->SenseInfo);
1059 if (ei->SenseLen < sense_data_size)
1060 sense_data_size = ei->SenseLen;
1061
1062 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
1058 scsi_set_resid(cmd, ei->ResidualCnt); 1063 scsi_set_resid(cmd, ei->ResidualCnt);
1059 1064
1060 if (ei->CommandStatus == 0) { 1065 if (ei->CommandStatus == 0) {
@@ -2580,7 +2585,8 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
2580 c->SG[0].Ext = 0; /* we are not chaining*/ 2585 c->SG[0].Ext = 0; /* we are not chaining*/
2581 } 2586 }
2582 hpsa_scsi_do_simple_cmd_core(h, c); 2587 hpsa_scsi_do_simple_cmd_core(h, c);
2583 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL); 2588 if (iocommand.buf_size > 0)
2589 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
2584 check_ioctl_unit_attention(h, c); 2590 check_ioctl_unit_attention(h, c);
2585 2591
2586 /* Copy the error information out */ 2592 /* Copy the error information out */
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index b7650613b8c2..bdfa223a7dbb 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -4306,8 +4306,8 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
4306 spin_lock_irqsave(vhost->host->host_lock, flags); 4306 spin_lock_irqsave(vhost->host->host_lock, flags);
4307 if (rc == H_CLOSED) 4307 if (rc == H_CLOSED)
4308 vio_enable_interrupts(to_vio_dev(vhost->dev)); 4308 vio_enable_interrupts(to_vio_dev(vhost->dev));
4309 else if (rc || (rc = ibmvfc_send_crq_init(vhost)) || 4309 if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
4310 (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) { 4310 (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
4311 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); 4311 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4312 dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc); 4312 dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
4313 } 4313 }
diff --git a/drivers/scsi/isci/Makefile b/drivers/scsi/isci/Makefile
new file mode 100644
index 000000000000..3359e10e0d8f
--- /dev/null
+++ b/drivers/scsi/isci/Makefile
@@ -0,0 +1,8 @@
1obj-$(CONFIG_SCSI_ISCI) += isci.o
2isci-objs := init.o phy.o request.o \
3 remote_device.o port.o \
4 host.o task.o probe_roms.o \
5 remote_node_context.o \
6 remote_node_table.o \
7 unsolicited_frame_control.o \
8 port_config.o \
diff --git a/drivers/scsi/isci/firmware/Makefile b/drivers/scsi/isci/firmware/Makefile
new file mode 100644
index 000000000000..5f54461cabc5
--- /dev/null
+++ b/drivers/scsi/isci/firmware/Makefile
@@ -0,0 +1,19 @@
1# Makefile for create_fw
2#
3CC=gcc
4CFLAGS=-c -Wall -O2 -g
5LDFLAGS=
6SOURCES=create_fw.c
7OBJECTS=$(SOURCES:.cpp=.o)
8EXECUTABLE=create_fw
9
10all: $(SOURCES) $(EXECUTABLE)
11
12$(EXECUTABLE): $(OBJECTS)
13 $(CC) $(LDFLAGS) $(OBJECTS) -o $@
14
15.c.o:
16 $(CC) $(CFLAGS) $< -O $@
17
18clean:
19 rm -f *.o $(EXECUTABLE)
diff --git a/drivers/scsi/isci/firmware/README b/drivers/scsi/isci/firmware/README
new file mode 100644
index 000000000000..8056d2bd233b
--- /dev/null
+++ b/drivers/scsi/isci/firmware/README
@@ -0,0 +1,36 @@
1This defines the temporary binary blow we are to pass to the SCU
2driver to emulate the binary firmware that we will eventually be
3able to access via NVRAM on the SCU controller.
4
5The current size of the binary blob is expected to be 149 bytes or larger
6
7Header Types:
80x1: Phy Masks
90x2: Phy Gens
100x3: SAS Addrs
110xff: End of Data
12
13ID string - u8[12]: "#SCU MAGIC#\0"
14Version - u8: 1
15SubVersion - u8: 0
16
17Header Type - u8: 0x1
18Size - u8: 8
19Phy Mask - u32[8]
20
21Header Type - u8: 0x2
22Size - u8: 8
23Phy Gen - u32[8]
24
25Header Type - u8: 0x3
26Size - u8: 8
27Sas Addr - u64[8]
28
29Header Type - u8: 0xf
30
31
32==============================================================================
33
34Place isci_firmware.bin in /lib/firmware
35Be sure to recreate the initramfs image to include the firmware.
36
diff --git a/drivers/scsi/isci/firmware/create_fw.c b/drivers/scsi/isci/firmware/create_fw.c
new file mode 100644
index 000000000000..c7a2887a7e95
--- /dev/null
+++ b/drivers/scsi/isci/firmware/create_fw.c
@@ -0,0 +1,99 @@
1#include <stdio.h>
2#include <stdlib.h>
3#include <unistd.h>
4#include <sys/types.h>
5#include <sys/stat.h>
6#include <fcntl.h>
7#include <string.h>
8#include <errno.h>
9#include <asm/types.h>
10#include <strings.h>
11#include <stdint.h>
12
13#include "create_fw.h"
14#include "../probe_roms.h"
15
16int write_blob(struct isci_orom *isci_orom)
17{
18 FILE *fd;
19 int err;
20 size_t count;
21
22 fd = fopen(blob_name, "w+");
23 if (!fd) {
24 perror("Open file for write failed");
25 fclose(fd);
26 return -EIO;
27 }
28
29 count = fwrite(isci_orom, sizeof(struct isci_orom), 1, fd);
30 if (count != 1) {
31 perror("Write data failed");
32 fclose(fd);
33 return -EIO;
34 }
35
36 fclose(fd);
37
38 return 0;
39}
40
41void set_binary_values(struct isci_orom *isci_orom)
42{
43 int ctrl_idx, phy_idx, port_idx;
44
45 /* setting OROM signature */
46 strncpy(isci_orom->hdr.signature, sig, strlen(sig));
47 isci_orom->hdr.version = version;
48 isci_orom->hdr.total_block_length = sizeof(struct isci_orom);
49 isci_orom->hdr.hdr_length = sizeof(struct sci_bios_oem_param_block_hdr);
50 isci_orom->hdr.num_elements = num_elements;
51
52 for (ctrl_idx = 0; ctrl_idx < 2; ctrl_idx++) {
53 isci_orom->ctrl[ctrl_idx].controller.mode_type = mode_type;
54 isci_orom->ctrl[ctrl_idx].controller.max_concurrent_dev_spin_up =
55 max_num_concurrent_dev_spin_up;
56 isci_orom->ctrl[ctrl_idx].controller.do_enable_ssc =
57 enable_ssc;
58
59 for (port_idx = 0; port_idx < 4; port_idx++)
60 isci_orom->ctrl[ctrl_idx].ports[port_idx].phy_mask =
61 phy_mask[ctrl_idx][port_idx];
62
63 for (phy_idx = 0; phy_idx < 4; phy_idx++) {
64 isci_orom->ctrl[ctrl_idx].phys[phy_idx].sas_address.high =
65 (__u32)(sas_addr[ctrl_idx][phy_idx] >> 32);
66 isci_orom->ctrl[ctrl_idx].phys[phy_idx].sas_address.low =
67 (__u32)(sas_addr[ctrl_idx][phy_idx]);
68
69 isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control0 =
70 afe_tx_amp_control0;
71 isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control1 =
72 afe_tx_amp_control1;
73 isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control2 =
74 afe_tx_amp_control2;
75 isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control3 =
76 afe_tx_amp_control3;
77 }
78 }
79}
80
81int main(void)
82{
83 int err;
84 struct isci_orom *isci_orom;
85
86 isci_orom = malloc(sizeof(struct isci_orom));
87 memset(isci_orom, 0, sizeof(struct isci_orom));
88
89 set_binary_values(isci_orom);
90
91 err = write_blob(isci_orom);
92 if (err < 0) {
93 free(isci_orom);
94 return err;
95 }
96
97 free(isci_orom);
98 return 0;
99}
diff --git a/drivers/scsi/isci/firmware/create_fw.h b/drivers/scsi/isci/firmware/create_fw.h
new file mode 100644
index 000000000000..5f298828d22e
--- /dev/null
+++ b/drivers/scsi/isci/firmware/create_fw.h
@@ -0,0 +1,77 @@
1#ifndef _CREATE_FW_H_
2#define _CREATE_FW_H_
3#include "../probe_roms.h"
4
5
6/* we are configuring for 2 SCUs */
7static const int num_elements = 2;
8
9/*
10 * For all defined arrays:
11 * elements 0-3 are for SCU0, ports 0-3
12 * elements 4-7 are for SCU1, ports 0-3
13 *
14 * valid configurations for one SCU are:
15 * P0 P1 P2 P3
16 * ----------------
17 * 0xF,0x0,0x0,0x0 # 1 x4 port
18 * 0x3,0x0,0x4,0x8 # Phys 0 and 1 are a x2 port, phy 2 and phy 3 are each x1
19 * # ports
20 * 0x1,0x2,0xC,0x0 # Phys 0 and 1 are each x1 ports, phy 2 and phy 3 are a x2
21 * # port
22 * 0x3,0x0,0xC,0x0 # Phys 0 and 1 are a x2 port, phy 2 and phy 3 are a x2 port
23 * 0x1,0x2,0x4,0x8 # Each phy is a x1 port (this is the default configuration)
24 *
25 * if there is a port/phy on which you do not wish to override the default
26 * values, use the value assigned to UNINIT_PARAM (255).
27 */
28
29/* discovery mode type (port auto config mode by default ) */
30
31/*
32 * if there is a port/phy on which you do not wish to override the default
33 * values, use the value "0000000000000000". SAS address of zero's is
34 * considered invalid and will not be used.
35 */
36#ifdef MPC
37static const int mode_type = SCIC_PORT_MANUAL_CONFIGURATION_MODE;
38static const __u8 phy_mask[2][4] = { {1, 2, 4, 8},
39 {1, 2, 4, 8} };
40static const unsigned long long sas_addr[2][4] = { { 0x5FCFFFFFF0000001ULL,
41 0x5FCFFFFFF0000002ULL,
42 0x5FCFFFFFF0000003ULL,
43 0x5FCFFFFFF0000004ULL },
44 { 0x5FCFFFFFF0000005ULL,
45 0x5FCFFFFFF0000006ULL,
46 0x5FCFFFFFF0000007ULL,
47 0x5FCFFFFFF0000008ULL } };
48#else /* APC (default) */
49static const int mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
50static const __u8 phy_mask[2][4];
51static const unsigned long long sas_addr[2][4] = { { 0x5FCFFFFF00000001ULL,
52 0x5FCFFFFF00000001ULL,
53 0x5FCFFFFF00000001ULL,
54 0x5FCFFFFF00000001ULL },
55 { 0x5FCFFFFF00000002ULL,
56 0x5FCFFFFF00000002ULL,
57 0x5FCFFFFF00000002ULL,
58 0x5FCFFFFF00000002ULL } };
59#endif
60
61/* Maximum number of concurrent device spin up */
62static const int max_num_concurrent_dev_spin_up = 1;
63
64/* enable of ssc operation */
65static const int enable_ssc;
66
67/* AFE_TX_AMP_CONTROL */
68static const unsigned int afe_tx_amp_control0 = 0x000bdd08;
69static const unsigned int afe_tx_amp_control1 = 0x000ffc00;
70static const unsigned int afe_tx_amp_control2 = 0x000b7c09;
71static const unsigned int afe_tx_amp_control3 = 0x000afc6e;
72
73static const char blob_name[] = "isci_firmware.bin";
74static const char sig[] = "ISCUOEMB";
75static const unsigned char version = 0x10;
76
77#endif
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
new file mode 100644
index 000000000000..26072f1e9852
--- /dev/null
+++ b/drivers/scsi/isci/host.c
@@ -0,0 +1,2751 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55#include <linux/circ_buf.h>
56#include <linux/device.h>
57#include <scsi/sas.h>
58#include "host.h"
59#include "isci.h"
60#include "port.h"
61#include "host.h"
62#include "probe_roms.h"
63#include "remote_device.h"
64#include "request.h"
65#include "scu_completion_codes.h"
66#include "scu_event_codes.h"
67#include "registers.h"
68#include "scu_remote_node_context.h"
69#include "scu_task_context.h"
70
71#define SCU_CONTEXT_RAM_INIT_STALL_TIME 200
72
73#define smu_max_ports(dcc_value) \
74 (\
75 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \
76 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT) + 1 \
77 )
78
79#define smu_max_task_contexts(dcc_value) \
80 (\
81 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \
82 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT) + 1 \
83 )
84
85#define smu_max_rncs(dcc_value) \
86 (\
87 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \
88 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT) + 1 \
89 )
90
91#define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT 100
92
93/**
94 *
95 *
96 * The number of milliseconds to wait while a given phy is consuming power
97 * before allowing another set of phys to consume power. Ultimately, this will
98 * be specified by OEM parameter.
99 */
100#define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500
101
102/**
103 * NORMALIZE_PUT_POINTER() -
104 *
105 * This macro will normalize the completion queue put pointer so its value can
106 * be used as an array inde
107 */
108#define NORMALIZE_PUT_POINTER(x) \
109 ((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK)
110
111
112/**
113 * NORMALIZE_EVENT_POINTER() -
114 *
115 * This macro will normalize the completion queue event entry so its value can
116 * be used as an index.
117 */
118#define NORMALIZE_EVENT_POINTER(x) \
119 (\
120 ((x) & SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK) \
121 >> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT \
122 )
123
124/**
125 * NORMALIZE_GET_POINTER() -
126 *
127 * This macro will normalize the completion queue get pointer so its value can
128 * be used as an index into an array
129 */
130#define NORMALIZE_GET_POINTER(x) \
131 ((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK)
132
133/**
134 * NORMALIZE_GET_POINTER_CYCLE_BIT() -
135 *
136 * This macro will normalize the completion queue cycle pointer so it matches
137 * the completion queue cycle bit
138 */
139#define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \
140 ((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT))
141
142/**
143 * COMPLETION_QUEUE_CYCLE_BIT() -
144 *
145 * This macro will return the cycle bit of the completion queue entry
146 */
147#define COMPLETION_QUEUE_CYCLE_BIT(x) ((x) & 0x80000000)
148
149/* Init the state machine and call the state entry function (if any) */
150void sci_init_sm(struct sci_base_state_machine *sm,
151 const struct sci_base_state *state_table, u32 initial_state)
152{
153 sci_state_transition_t handler;
154
155 sm->initial_state_id = initial_state;
156 sm->previous_state_id = initial_state;
157 sm->current_state_id = initial_state;
158 sm->state_table = state_table;
159
160 handler = sm->state_table[initial_state].enter_state;
161 if (handler)
162 handler(sm);
163}
164
165/* Call the state exit fn, update the current state, call the state entry fn */
166void sci_change_state(struct sci_base_state_machine *sm, u32 next_state)
167{
168 sci_state_transition_t handler;
169
170 handler = sm->state_table[sm->current_state_id].exit_state;
171 if (handler)
172 handler(sm);
173
174 sm->previous_state_id = sm->current_state_id;
175 sm->current_state_id = next_state;
176
177 handler = sm->state_table[sm->current_state_id].enter_state;
178 if (handler)
179 handler(sm);
180}
181
182static bool sci_controller_completion_queue_has_entries(struct isci_host *ihost)
183{
184 u32 get_value = ihost->completion_queue_get;
185 u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK;
186
187 if (NORMALIZE_GET_POINTER_CYCLE_BIT(get_value) ==
188 COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index]))
189 return true;
190
191 return false;
192}
193
194static bool sci_controller_isr(struct isci_host *ihost)
195{
196 if (sci_controller_completion_queue_has_entries(ihost)) {
197 return true;
198 } else {
199 /*
200 * we have a spurious interrupt it could be that we have already
201 * emptied the completion queue from a previous interrupt */
202 writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
203
204 /*
205 * There is a race in the hardware that could cause us not to be notified
206 * of an interrupt completion if we do not take this step. We will mask
207 * then unmask the interrupts so if there is another interrupt pending
208 * the clearing of the interrupt source we get the next interrupt message. */
209 writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
210 writel(0, &ihost->smu_registers->interrupt_mask);
211 }
212
213 return false;
214}
215
216irqreturn_t isci_msix_isr(int vec, void *data)
217{
218 struct isci_host *ihost = data;
219
220 if (sci_controller_isr(ihost))
221 tasklet_schedule(&ihost->completion_tasklet);
222
223 return IRQ_HANDLED;
224}
225
226static bool sci_controller_error_isr(struct isci_host *ihost)
227{
228 u32 interrupt_status;
229
230 interrupt_status =
231 readl(&ihost->smu_registers->interrupt_status);
232 interrupt_status &= (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND);
233
234 if (interrupt_status != 0) {
235 /*
236 * There is an error interrupt pending so let it through and handle
237 * in the callback */
238 return true;
239 }
240
241 /*
242 * There is a race in the hardware that could cause us not to be notified
243 * of an interrupt completion if we do not take this step. We will mask
244 * then unmask the error interrupts so if there was another interrupt
245 * pending we will be notified.
246 * Could we write the value of (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND)? */
247 writel(0xff, &ihost->smu_registers->interrupt_mask);
248 writel(0, &ihost->smu_registers->interrupt_mask);
249
250 return false;
251}
252
253static void sci_controller_task_completion(struct isci_host *ihost, u32 ent)
254{
255 u32 index = SCU_GET_COMPLETION_INDEX(ent);
256 struct isci_request *ireq = ihost->reqs[index];
257
258 /* Make sure that we really want to process this IO request */
259 if (test_bit(IREQ_ACTIVE, &ireq->flags) &&
260 ireq->io_tag != SCI_CONTROLLER_INVALID_IO_TAG &&
261 ISCI_TAG_SEQ(ireq->io_tag) == ihost->io_request_sequence[index])
262 /* Yep this is a valid io request pass it along to the
263 * io request handler
264 */
265 sci_io_request_tc_completion(ireq, ent);
266}
267
268static void sci_controller_sdma_completion(struct isci_host *ihost, u32 ent)
269{
270 u32 index;
271 struct isci_request *ireq;
272 struct isci_remote_device *idev;
273
274 index = SCU_GET_COMPLETION_INDEX(ent);
275
276 switch (scu_get_command_request_type(ent)) {
277 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC:
278 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC:
279 ireq = ihost->reqs[index];
280 dev_warn(&ihost->pdev->dev, "%s: %x for io request %p\n",
281 __func__, ent, ireq);
282 /* @todo For a post TC operation we need to fail the IO
283 * request
284 */
285 break;
286 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC:
287 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC:
288 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC:
289 idev = ihost->device_table[index];
290 dev_warn(&ihost->pdev->dev, "%s: %x for device %p\n",
291 __func__, ent, idev);
292 /* @todo For a port RNC operation we need to fail the
293 * device
294 */
295 break;
296 default:
297 dev_warn(&ihost->pdev->dev, "%s: unknown completion type %x\n",
298 __func__, ent);
299 break;
300 }
301}
302
303static void sci_controller_unsolicited_frame(struct isci_host *ihost, u32 ent)
304{
305 u32 index;
306 u32 frame_index;
307
308 struct scu_unsolicited_frame_header *frame_header;
309 struct isci_phy *iphy;
310 struct isci_remote_device *idev;
311
312 enum sci_status result = SCI_FAILURE;
313
314 frame_index = SCU_GET_FRAME_INDEX(ent);
315
316 frame_header = ihost->uf_control.buffers.array[frame_index].header;
317 ihost->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE;
318
319 if (SCU_GET_FRAME_ERROR(ent)) {
320 /*
321 * / @todo If the IAF frame or SIGNATURE FIS frame has an error will
322 * / this cause a problem? We expect the phy initialization will
323 * / fail if there is an error in the frame. */
324 sci_controller_release_frame(ihost, frame_index);
325 return;
326 }
327
328 if (frame_header->is_address_frame) {
329 index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
330 iphy = &ihost->phys[index];
331 result = sci_phy_frame_handler(iphy, frame_index);
332 } else {
333
334 index = SCU_GET_COMPLETION_INDEX(ent);
335
336 if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
337 /*
338 * This is a signature fis or a frame from a direct attached SATA
339 * device that has not yet been created. In either case forwared
340 * the frame to the PE and let it take care of the frame data. */
341 index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
342 iphy = &ihost->phys[index];
343 result = sci_phy_frame_handler(iphy, frame_index);
344 } else {
345 if (index < ihost->remote_node_entries)
346 idev = ihost->device_table[index];
347 else
348 idev = NULL;
349
350 if (idev != NULL)
351 result = sci_remote_device_frame_handler(idev, frame_index);
352 else
353 sci_controller_release_frame(ihost, frame_index);
354 }
355 }
356
357 if (result != SCI_SUCCESS) {
358 /*
359 * / @todo Is there any reason to report some additional error message
360 * / when we get this failure notifiction? */
361 }
362}
363
364static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
365{
366 struct isci_remote_device *idev;
367 struct isci_request *ireq;
368 struct isci_phy *iphy;
369 u32 index;
370
371 index = SCU_GET_COMPLETION_INDEX(ent);
372
373 switch (scu_get_event_type(ent)) {
374 case SCU_EVENT_TYPE_SMU_COMMAND_ERROR:
375 /* / @todo The driver did something wrong and we need to fix the condtion. */
376 dev_err(&ihost->pdev->dev,
377 "%s: SCIC Controller 0x%p received SMU command error "
378 "0x%x\n",
379 __func__,
380 ihost,
381 ent);
382 break;
383
384 case SCU_EVENT_TYPE_SMU_PCQ_ERROR:
385 case SCU_EVENT_TYPE_SMU_ERROR:
386 case SCU_EVENT_TYPE_FATAL_MEMORY_ERROR:
387 /*
388 * / @todo This is a hardware failure and its likely that we want to
389 * / reset the controller. */
390 dev_err(&ihost->pdev->dev,
391 "%s: SCIC Controller 0x%p received fatal controller "
392 "event 0x%x\n",
393 __func__,
394 ihost,
395 ent);
396 break;
397
398 case SCU_EVENT_TYPE_TRANSPORT_ERROR:
399 ireq = ihost->reqs[index];
400 sci_io_request_event_handler(ireq, ent);
401 break;
402
403 case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
404 switch (scu_get_event_specifier(ent)) {
405 case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE:
406 case SCU_EVENT_SPECIFIC_TASK_TIMEOUT:
407 ireq = ihost->reqs[index];
408 if (ireq != NULL)
409 sci_io_request_event_handler(ireq, ent);
410 else
411 dev_warn(&ihost->pdev->dev,
412 "%s: SCIC Controller 0x%p received "
413 "event 0x%x for io request object "
414 "that doesnt exist.\n",
415 __func__,
416 ihost,
417 ent);
418
419 break;
420
421 case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT:
422 idev = ihost->device_table[index];
423 if (idev != NULL)
424 sci_remote_device_event_handler(idev, ent);
425 else
426 dev_warn(&ihost->pdev->dev,
427 "%s: SCIC Controller 0x%p received "
428 "event 0x%x for remote device object "
429 "that doesnt exist.\n",
430 __func__,
431 ihost,
432 ent);
433
434 break;
435 }
436 break;
437
438 case SCU_EVENT_TYPE_BROADCAST_CHANGE:
439 /*
440 * direct the broadcast change event to the phy first and then let
441 * the phy redirect the broadcast change to the port object */
442 case SCU_EVENT_TYPE_ERR_CNT_EVENT:
443 /*
444 * direct error counter event to the phy object since that is where
445 * we get the event notification. This is a type 4 event. */
446 case SCU_EVENT_TYPE_OSSP_EVENT:
447 index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
448 iphy = &ihost->phys[index];
449 sci_phy_event_handler(iphy, ent);
450 break;
451
452 case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
453 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
454 case SCU_EVENT_TYPE_RNC_OPS_MISC:
455 if (index < ihost->remote_node_entries) {
456 idev = ihost->device_table[index];
457
458 if (idev != NULL)
459 sci_remote_device_event_handler(idev, ent);
460 } else
461 dev_err(&ihost->pdev->dev,
462 "%s: SCIC Controller 0x%p received event 0x%x "
463 "for remote device object 0x%0x that doesnt "
464 "exist.\n",
465 __func__,
466 ihost,
467 ent,
468 index);
469
470 break;
471
472 default:
473 dev_warn(&ihost->pdev->dev,
474 "%s: SCIC Controller received unknown event code %x\n",
475 __func__,
476 ent);
477 break;
478 }
479}
480
481static void sci_controller_process_completions(struct isci_host *ihost)
482{
483 u32 completion_count = 0;
484 u32 ent;
485 u32 get_index;
486 u32 get_cycle;
487 u32 event_get;
488 u32 event_cycle;
489
490 dev_dbg(&ihost->pdev->dev,
491 "%s: completion queue begining get:0x%08x\n",
492 __func__,
493 ihost->completion_queue_get);
494
495 /* Get the component parts of the completion queue */
496 get_index = NORMALIZE_GET_POINTER(ihost->completion_queue_get);
497 get_cycle = SMU_CQGR_CYCLE_BIT & ihost->completion_queue_get;
498
499 event_get = NORMALIZE_EVENT_POINTER(ihost->completion_queue_get);
500 event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & ihost->completion_queue_get;
501
502 while (
503 NORMALIZE_GET_POINTER_CYCLE_BIT(get_cycle)
504 == COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index])
505 ) {
506 completion_count++;
507
508 ent = ihost->completion_queue[get_index];
509
510 /* increment the get pointer and check for rollover to toggle the cycle bit */
511 get_cycle ^= ((get_index+1) & SCU_MAX_COMPLETION_QUEUE_ENTRIES) <<
512 (SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT - SCU_MAX_COMPLETION_QUEUE_SHIFT);
513 get_index = (get_index+1) & (SCU_MAX_COMPLETION_QUEUE_ENTRIES-1);
514
515 dev_dbg(&ihost->pdev->dev,
516 "%s: completion queue entry:0x%08x\n",
517 __func__,
518 ent);
519
520 switch (SCU_GET_COMPLETION_TYPE(ent)) {
521 case SCU_COMPLETION_TYPE_TASK:
522 sci_controller_task_completion(ihost, ent);
523 break;
524
525 case SCU_COMPLETION_TYPE_SDMA:
526 sci_controller_sdma_completion(ihost, ent);
527 break;
528
529 case SCU_COMPLETION_TYPE_UFI:
530 sci_controller_unsolicited_frame(ihost, ent);
531 break;
532
533 case SCU_COMPLETION_TYPE_EVENT:
534 case SCU_COMPLETION_TYPE_NOTIFY: {
535 event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) <<
536 (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT);
537 event_get = (event_get+1) & (SCU_MAX_EVENTS-1);
538
539 sci_controller_event_completion(ihost, ent);
540 break;
541 }
542 default:
543 dev_warn(&ihost->pdev->dev,
544 "%s: SCIC Controller received unknown "
545 "completion type %x\n",
546 __func__,
547 ent);
548 break;
549 }
550 }
551
552 /* Update the get register if we completed one or more entries */
553 if (completion_count > 0) {
554 ihost->completion_queue_get =
555 SMU_CQGR_GEN_BIT(ENABLE) |
556 SMU_CQGR_GEN_BIT(EVENT_ENABLE) |
557 event_cycle |
558 SMU_CQGR_GEN_VAL(EVENT_POINTER, event_get) |
559 get_cycle |
560 SMU_CQGR_GEN_VAL(POINTER, get_index);
561
562 writel(ihost->completion_queue_get,
563 &ihost->smu_registers->completion_queue_get);
564
565 }
566
567 dev_dbg(&ihost->pdev->dev,
568 "%s: completion queue ending get:0x%08x\n",
569 __func__,
570 ihost->completion_queue_get);
571
572}
573
574static void sci_controller_error_handler(struct isci_host *ihost)
575{
576 u32 interrupt_status;
577
578 interrupt_status =
579 readl(&ihost->smu_registers->interrupt_status);
580
581 if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) &&
582 sci_controller_completion_queue_has_entries(ihost)) {
583
584 sci_controller_process_completions(ihost);
585 writel(SMU_ISR_QUEUE_SUSPEND, &ihost->smu_registers->interrupt_status);
586 } else {
587 dev_err(&ihost->pdev->dev, "%s: status: %#x\n", __func__,
588 interrupt_status);
589
590 sci_change_state(&ihost->sm, SCIC_FAILED);
591
592 return;
593 }
594
595 /* If we dont process any completions I am not sure that we want to do this.
596 * We are in the middle of a hardware fault and should probably be reset.
597 */
598 writel(0, &ihost->smu_registers->interrupt_mask);
599}
600
601irqreturn_t isci_intx_isr(int vec, void *data)
602{
603 irqreturn_t ret = IRQ_NONE;
604 struct isci_host *ihost = data;
605
606 if (sci_controller_isr(ihost)) {
607 writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
608 tasklet_schedule(&ihost->completion_tasklet);
609 ret = IRQ_HANDLED;
610 } else if (sci_controller_error_isr(ihost)) {
611 spin_lock(&ihost->scic_lock);
612 sci_controller_error_handler(ihost);
613 spin_unlock(&ihost->scic_lock);
614 ret = IRQ_HANDLED;
615 }
616
617 return ret;
618}
619
620irqreturn_t isci_error_isr(int vec, void *data)
621{
622 struct isci_host *ihost = data;
623
624 if (sci_controller_error_isr(ihost))
625 sci_controller_error_handler(ihost);
626
627 return IRQ_HANDLED;
628}
629
630/**
631 * isci_host_start_complete() - This function is called by the core library,
632 * through the ISCI Module, to indicate controller start status.
633 * @isci_host: This parameter specifies the ISCI host object
634 * @completion_status: This parameter specifies the completion status from the
635 * core library.
636 *
637 */
638static void isci_host_start_complete(struct isci_host *ihost, enum sci_status completion_status)
639{
640 if (completion_status != SCI_SUCCESS)
641 dev_info(&ihost->pdev->dev,
642 "controller start timed out, continuing...\n");
643 isci_host_change_state(ihost, isci_ready);
644 clear_bit(IHOST_START_PENDING, &ihost->flags);
645 wake_up(&ihost->eventq);
646}
647
648int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
649{
650 struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
651
652 if (test_bit(IHOST_START_PENDING, &ihost->flags))
653 return 0;
654
655 /* todo: use sas_flush_discovery once it is upstream */
656 scsi_flush_work(shost);
657
658 scsi_flush_work(shost);
659
660 dev_dbg(&ihost->pdev->dev,
661 "%s: ihost->status = %d, time = %ld\n",
662 __func__, isci_host_get_state(ihost), time);
663
664 return 1;
665
666}
667
668/**
669 * sci_controller_get_suggested_start_timeout() - This method returns the
670 * suggested sci_controller_start() timeout amount. The user is free to
671 * use any timeout value, but this method provides the suggested minimum
672 * start timeout value. The returned value is based upon empirical
673 * information determined as a result of interoperability testing.
674 * @controller: the handle to the controller object for which to return the
675 * suggested start timeout.
676 *
677 * This method returns the number of milliseconds for the suggested start
678 * operation timeout.
679 */
680static u32 sci_controller_get_suggested_start_timeout(struct isci_host *ihost)
681{
682 /* Validate the user supplied parameters. */
683 if (!ihost)
684 return 0;
685
686 /*
687 * The suggested minimum timeout value for a controller start operation:
688 *
689 * Signature FIS Timeout
690 * + Phy Start Timeout
691 * + Number of Phy Spin Up Intervals
692 * ---------------------------------
693 * Number of milliseconds for the controller start operation.
694 *
695 * NOTE: The number of phy spin up intervals will be equivalent
696 * to the number of phys divided by the number phys allowed
697 * per interval - 1 (once OEM parameters are supported).
698 * Currently we assume only 1 phy per interval. */
699
700 return SCIC_SDS_SIGNATURE_FIS_TIMEOUT
701 + SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT
702 + ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
703}
704
705static void sci_controller_enable_interrupts(struct isci_host *ihost)
706{
707 BUG_ON(ihost->smu_registers == NULL);
708 writel(0, &ihost->smu_registers->interrupt_mask);
709}
710
711void sci_controller_disable_interrupts(struct isci_host *ihost)
712{
713 BUG_ON(ihost->smu_registers == NULL);
714 writel(0xffffffff, &ihost->smu_registers->interrupt_mask);
715}
716
717static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost)
718{
719 u32 port_task_scheduler_value;
720
721 port_task_scheduler_value =
722 readl(&ihost->scu_registers->peg0.ptsg.control);
723 port_task_scheduler_value |=
724 (SCU_PTSGCR_GEN_BIT(ETM_ENABLE) |
725 SCU_PTSGCR_GEN_BIT(PTSG_ENABLE));
726 writel(port_task_scheduler_value,
727 &ihost->scu_registers->peg0.ptsg.control);
728}
729
730static void sci_controller_assign_task_entries(struct isci_host *ihost)
731{
732 u32 task_assignment;
733
734 /*
735 * Assign all the TCs to function 0
736 * TODO: Do we actually need to read this register to write it back?
737 */
738
739 task_assignment =
740 readl(&ihost->smu_registers->task_context_assignment[0]);
741
742 task_assignment |= (SMU_TCA_GEN_VAL(STARTING, 0)) |
743 (SMU_TCA_GEN_VAL(ENDING, ihost->task_context_entries - 1)) |
744 (SMU_TCA_GEN_BIT(RANGE_CHECK_ENABLE));
745
746 writel(task_assignment,
747 &ihost->smu_registers->task_context_assignment[0]);
748
749}
750
751static void sci_controller_initialize_completion_queue(struct isci_host *ihost)
752{
753 u32 index;
754 u32 completion_queue_control_value;
755 u32 completion_queue_get_value;
756 u32 completion_queue_put_value;
757
758 ihost->completion_queue_get = 0;
759
760 completion_queue_control_value =
761 (SMU_CQC_QUEUE_LIMIT_SET(SCU_MAX_COMPLETION_QUEUE_ENTRIES - 1) |
762 SMU_CQC_EVENT_LIMIT_SET(SCU_MAX_EVENTS - 1));
763
764 writel(completion_queue_control_value,
765 &ihost->smu_registers->completion_queue_control);
766
767
768 /* Set the completion queue get pointer and enable the queue */
769 completion_queue_get_value = (
770 (SMU_CQGR_GEN_VAL(POINTER, 0))
771 | (SMU_CQGR_GEN_VAL(EVENT_POINTER, 0))
772 | (SMU_CQGR_GEN_BIT(ENABLE))
773 | (SMU_CQGR_GEN_BIT(EVENT_ENABLE))
774 );
775
776 writel(completion_queue_get_value,
777 &ihost->smu_registers->completion_queue_get);
778
779 /* Set the completion queue put pointer */
780 completion_queue_put_value = (
781 (SMU_CQPR_GEN_VAL(POINTER, 0))
782 | (SMU_CQPR_GEN_VAL(EVENT_POINTER, 0))
783 );
784
785 writel(completion_queue_put_value,
786 &ihost->smu_registers->completion_queue_put);
787
788 /* Initialize the cycle bit of the completion queue entries */
789 for (index = 0; index < SCU_MAX_COMPLETION_QUEUE_ENTRIES; index++) {
790 /*
791 * If get.cycle_bit != completion_queue.cycle_bit
792 * its not a valid completion queue entry
793 * so at system start all entries are invalid */
794 ihost->completion_queue[index] = 0x80000000;
795 }
796}
797
798static void sci_controller_initialize_unsolicited_frame_queue(struct isci_host *ihost)
799{
800 u32 frame_queue_control_value;
801 u32 frame_queue_get_value;
802 u32 frame_queue_put_value;
803
804 /* Write the queue size */
805 frame_queue_control_value =
806 SCU_UFQC_GEN_VAL(QUEUE_SIZE, SCU_MAX_UNSOLICITED_FRAMES);
807
808 writel(frame_queue_control_value,
809 &ihost->scu_registers->sdma.unsolicited_frame_queue_control);
810
811 /* Setup the get pointer for the unsolicited frame queue */
812 frame_queue_get_value = (
813 SCU_UFQGP_GEN_VAL(POINTER, 0)
814 | SCU_UFQGP_GEN_BIT(ENABLE_BIT)
815 );
816
817 writel(frame_queue_get_value,
818 &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
819 /* Setup the put pointer for the unsolicited frame queue */
820 frame_queue_put_value = SCU_UFQPP_GEN_VAL(POINTER, 0);
821 writel(frame_queue_put_value,
822 &ihost->scu_registers->sdma.unsolicited_frame_put_pointer);
823}
824
825static void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status)
826{
827 if (ihost->sm.current_state_id == SCIC_STARTING) {
828 /*
829 * We move into the ready state, because some of the phys/ports
830 * may be up and operational.
831 */
832 sci_change_state(&ihost->sm, SCIC_READY);
833
834 isci_host_start_complete(ihost, status);
835 }
836}
837
838static bool is_phy_starting(struct isci_phy *iphy)
839{
840 enum sci_phy_states state;
841
842 state = iphy->sm.current_state_id;
843 switch (state) {
844 case SCI_PHY_STARTING:
845 case SCI_PHY_SUB_INITIAL:
846 case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
847 case SCI_PHY_SUB_AWAIT_IAF_UF:
848 case SCI_PHY_SUB_AWAIT_SAS_POWER:
849 case SCI_PHY_SUB_AWAIT_SATA_POWER:
850 case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
851 case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
852 case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
853 case SCI_PHY_SUB_FINAL:
854 return true;
855 default:
856 return false;
857 }
858}
859
860/**
861 * sci_controller_start_next_phy - start phy
862 * @scic: controller
863 *
864 * If all the phys have been started, then attempt to transition the
865 * controller to the READY state and inform the user
866 * (sci_cb_controller_start_complete()).
867 */
868static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost)
869{
870 struct sci_oem_params *oem = &ihost->oem_parameters;
871 struct isci_phy *iphy;
872 enum sci_status status;
873
874 status = SCI_SUCCESS;
875
876 if (ihost->phy_startup_timer_pending)
877 return status;
878
879 if (ihost->next_phy_to_start >= SCI_MAX_PHYS) {
880 bool is_controller_start_complete = true;
881 u32 state;
882 u8 index;
883
884 for (index = 0; index < SCI_MAX_PHYS; index++) {
885 iphy = &ihost->phys[index];
886 state = iphy->sm.current_state_id;
887
888 if (!phy_get_non_dummy_port(iphy))
889 continue;
890
891 /* The controller start operation is complete iff:
892 * - all links have been given an opportunity to start
893 * - have no indication of a connected device
894 * - have an indication of a connected device and it has
895 * finished the link training process.
896 */
897 if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
898 (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
899 (iphy->is_in_link_training == true && is_phy_starting(iphy))) {
900 is_controller_start_complete = false;
901 break;
902 }
903 }
904
905 /*
906 * The controller has successfully finished the start process.
907 * Inform the SCI Core user and transition to the READY state. */
908 if (is_controller_start_complete == true) {
909 sci_controller_transition_to_ready(ihost, SCI_SUCCESS);
910 sci_del_timer(&ihost->phy_timer);
911 ihost->phy_startup_timer_pending = false;
912 }
913 } else {
914 iphy = &ihost->phys[ihost->next_phy_to_start];
915
916 if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
917 if (phy_get_non_dummy_port(iphy) == NULL) {
918 ihost->next_phy_to_start++;
919
920 /* Caution recursion ahead be forwarned
921 *
922 * The PHY was never added to a PORT in MPC mode
923 * so start the next phy in sequence This phy
924 * will never go link up and will not draw power
925 * the OEM parameters either configured the phy
926 * incorrectly for the PORT or it was never
927 * assigned to a PORT
928 */
929 return sci_controller_start_next_phy(ihost);
930 }
931 }
932
933 status = sci_phy_start(iphy);
934
935 if (status == SCI_SUCCESS) {
936 sci_mod_timer(&ihost->phy_timer,
937 SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT);
938 ihost->phy_startup_timer_pending = true;
939 } else {
940 dev_warn(&ihost->pdev->dev,
941 "%s: Controller stop operation failed "
942 "to stop phy %d because of status "
943 "%d.\n",
944 __func__,
945 ihost->phys[ihost->next_phy_to_start].phy_index,
946 status);
947 }
948
949 ihost->next_phy_to_start++;
950 }
951
952 return status;
953}
954
955static void phy_startup_timeout(unsigned long data)
956{
957 struct sci_timer *tmr = (struct sci_timer *)data;
958 struct isci_host *ihost = container_of(tmr, typeof(*ihost), phy_timer);
959 unsigned long flags;
960 enum sci_status status;
961
962 spin_lock_irqsave(&ihost->scic_lock, flags);
963
964 if (tmr->cancel)
965 goto done;
966
967 ihost->phy_startup_timer_pending = false;
968
969 do {
970 status = sci_controller_start_next_phy(ihost);
971 } while (status != SCI_SUCCESS);
972
973done:
974 spin_unlock_irqrestore(&ihost->scic_lock, flags);
975}
976
977static u16 isci_tci_active(struct isci_host *ihost)
978{
979 return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
980}
981
982static enum sci_status sci_controller_start(struct isci_host *ihost,
983 u32 timeout)
984{
985 enum sci_status result;
986 u16 index;
987
988 if (ihost->sm.current_state_id != SCIC_INITIALIZED) {
989 dev_warn(&ihost->pdev->dev,
990 "SCIC Controller start operation requested in "
991 "invalid state\n");
992 return SCI_FAILURE_INVALID_STATE;
993 }
994
995 /* Build the TCi free pool */
996 BUILD_BUG_ON(SCI_MAX_IO_REQUESTS > 1 << sizeof(ihost->tci_pool[0]) * 8);
997 ihost->tci_head = 0;
998 ihost->tci_tail = 0;
999 for (index = 0; index < ihost->task_context_entries; index++)
1000 isci_tci_free(ihost, index);
1001
1002 /* Build the RNi free pool */
1003 sci_remote_node_table_initialize(&ihost->available_remote_nodes,
1004 ihost->remote_node_entries);
1005
1006 /*
1007 * Before anything else lets make sure we will not be
1008 * interrupted by the hardware.
1009 */
1010 sci_controller_disable_interrupts(ihost);
1011
1012 /* Enable the port task scheduler */
1013 sci_controller_enable_port_task_scheduler(ihost);
1014
1015 /* Assign all the task entries to ihost physical function */
1016 sci_controller_assign_task_entries(ihost);
1017
1018 /* Now initialize the completion queue */
1019 sci_controller_initialize_completion_queue(ihost);
1020
1021 /* Initialize the unsolicited frame queue for use */
1022 sci_controller_initialize_unsolicited_frame_queue(ihost);
1023
1024 /* Start all of the ports on this controller */
1025 for (index = 0; index < ihost->logical_port_entries; index++) {
1026 struct isci_port *iport = &ihost->ports[index];
1027
1028 result = sci_port_start(iport);
1029 if (result)
1030 return result;
1031 }
1032
1033 sci_controller_start_next_phy(ihost);
1034
1035 sci_mod_timer(&ihost->timer, timeout);
1036
1037 sci_change_state(&ihost->sm, SCIC_STARTING);
1038
1039 return SCI_SUCCESS;
1040}
1041
1042void isci_host_scan_start(struct Scsi_Host *shost)
1043{
1044 struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
1045 unsigned long tmo = sci_controller_get_suggested_start_timeout(ihost);
1046
1047 set_bit(IHOST_START_PENDING, &ihost->flags);
1048
1049 spin_lock_irq(&ihost->scic_lock);
1050 sci_controller_start(ihost, tmo);
1051 sci_controller_enable_interrupts(ihost);
1052 spin_unlock_irq(&ihost->scic_lock);
1053}
1054
1055static void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status)
1056{
1057 isci_host_change_state(ihost, isci_stopped);
1058 sci_controller_disable_interrupts(ihost);
1059 clear_bit(IHOST_STOP_PENDING, &ihost->flags);
1060 wake_up(&ihost->eventq);
1061}
1062
1063static void sci_controller_completion_handler(struct isci_host *ihost)
1064{
1065 /* Empty out the completion queue */
1066 if (sci_controller_completion_queue_has_entries(ihost))
1067 sci_controller_process_completions(ihost);
1068
1069 /* Clear the interrupt and enable all interrupts again */
1070 writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
1071 /* Could we write the value of SMU_ISR_COMPLETION? */
1072 writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
1073 writel(0, &ihost->smu_registers->interrupt_mask);
1074}
1075
1076/**
1077 * isci_host_completion_routine() - This function is the delayed service
1078 * routine that calls the sci core library's completion handler. It's
1079 * scheduled as a tasklet from the interrupt service routine when interrupts
1080 * in use, or set as the timeout function in polled mode.
1081 * @data: This parameter specifies the ISCI host object
1082 *
1083 */
1084static void isci_host_completion_routine(unsigned long data)
1085{
1086 struct isci_host *ihost = (struct isci_host *)data;
1087 struct list_head completed_request_list;
1088 struct list_head errored_request_list;
1089 struct list_head *current_position;
1090 struct list_head *next_position;
1091 struct isci_request *request;
1092 struct isci_request *next_request;
1093 struct sas_task *task;
1094
1095 INIT_LIST_HEAD(&completed_request_list);
1096 INIT_LIST_HEAD(&errored_request_list);
1097
1098 spin_lock_irq(&ihost->scic_lock);
1099
1100 sci_controller_completion_handler(ihost);
1101
1102 /* Take the lists of completed I/Os from the host. */
1103
1104 list_splice_init(&ihost->requests_to_complete,
1105 &completed_request_list);
1106
1107 /* Take the list of errored I/Os from the host. */
1108 list_splice_init(&ihost->requests_to_errorback,
1109 &errored_request_list);
1110
1111 spin_unlock_irq(&ihost->scic_lock);
1112
1113 /* Process any completions in the lists. */
1114 list_for_each_safe(current_position, next_position,
1115 &completed_request_list) {
1116
1117 request = list_entry(current_position, struct isci_request,
1118 completed_node);
1119 task = isci_request_access_task(request);
1120
1121 /* Normal notification (task_done) */
1122 dev_dbg(&ihost->pdev->dev,
1123 "%s: Normal - request/task = %p/%p\n",
1124 __func__,
1125 request,
1126 task);
1127
1128 /* Return the task to libsas */
1129 if (task != NULL) {
1130
1131 task->lldd_task = NULL;
1132 if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1133
1134 /* If the task is already in the abort path,
1135 * the task_done callback cannot be called.
1136 */
1137 task->task_done(task);
1138 }
1139 }
1140
1141 spin_lock_irq(&ihost->scic_lock);
1142 isci_free_tag(ihost, request->io_tag);
1143 spin_unlock_irq(&ihost->scic_lock);
1144 }
1145 list_for_each_entry_safe(request, next_request, &errored_request_list,
1146 completed_node) {
1147
1148 task = isci_request_access_task(request);
1149
1150 /* Use sas_task_abort */
1151 dev_warn(&ihost->pdev->dev,
1152 "%s: Error - request/task = %p/%p\n",
1153 __func__,
1154 request,
1155 task);
1156
1157 if (task != NULL) {
1158
1159 /* Put the task into the abort path if it's not there
1160 * already.
1161 */
1162 if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED))
1163 sas_task_abort(task);
1164
1165 } else {
1166 /* This is a case where the request has completed with a
1167 * status such that it needed further target servicing,
1168 * but the sas_task reference has already been removed
1169 * from the request. Since it was errored, it was not
1170 * being aborted, so there is nothing to do except free
1171 * it.
1172 */
1173
1174 spin_lock_irq(&ihost->scic_lock);
1175 /* Remove the request from the remote device's list
1176 * of pending requests.
1177 */
1178 list_del_init(&request->dev_node);
1179 isci_free_tag(ihost, request->io_tag);
1180 spin_unlock_irq(&ihost->scic_lock);
1181 }
1182 }
1183
1184}
1185
1186/**
1187 * sci_controller_stop() - This method will stop an individual controller
1188 * object.This method will invoke the associated user callback upon
1189 * completion. The completion callback is called when the following
1190 * conditions are met: -# the method return status is SCI_SUCCESS. -# the
1191 * controller has been quiesced. This method will ensure that all IO
1192 * requests are quiesced, phys are stopped, and all additional operation by
1193 * the hardware is halted.
1194 * @controller: the handle to the controller object to stop.
1195 * @timeout: This parameter specifies the number of milliseconds in which the
1196 * stop operation should complete.
1197 *
1198 * The controller must be in the STARTED or STOPPED state. Indicate if the
1199 * controller stop method succeeded or failed in some way. SCI_SUCCESS if the
1200 * stop operation successfully began. SCI_WARNING_ALREADY_IN_STATE if the
1201 * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the
1202 * controller is not either in the STARTED or STOPPED states.
1203 */
1204static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout)
1205{
1206 if (ihost->sm.current_state_id != SCIC_READY) {
1207 dev_warn(&ihost->pdev->dev,
1208 "SCIC Controller stop operation requested in "
1209 "invalid state\n");
1210 return SCI_FAILURE_INVALID_STATE;
1211 }
1212
1213 sci_mod_timer(&ihost->timer, timeout);
1214 sci_change_state(&ihost->sm, SCIC_STOPPING);
1215 return SCI_SUCCESS;
1216}
1217
1218/**
1219 * sci_controller_reset() - This method will reset the supplied core
1220 * controller regardless of the state of said controller. This operation is
1221 * considered destructive. In other words, all current operations are wiped
1222 * out. No IO completions for outstanding devices occur. Outstanding IO
1223 * requests are not aborted or completed at the actual remote device.
1224 * @controller: the handle to the controller object to reset.
1225 *
1226 * Indicate if the controller reset method succeeded or failed in some way.
1227 * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if
1228 * the controller reset operation is unable to complete.
1229 */
1230static enum sci_status sci_controller_reset(struct isci_host *ihost)
1231{
1232 switch (ihost->sm.current_state_id) {
1233 case SCIC_RESET:
1234 case SCIC_READY:
1235 case SCIC_STOPPED:
1236 case SCIC_FAILED:
1237 /*
1238 * The reset operation is not a graceful cleanup, just
1239 * perform the state transition.
1240 */
1241 sci_change_state(&ihost->sm, SCIC_RESETTING);
1242 return SCI_SUCCESS;
1243 default:
1244 dev_warn(&ihost->pdev->dev,
1245 "SCIC Controller reset operation requested in "
1246 "invalid state\n");
1247 return SCI_FAILURE_INVALID_STATE;
1248 }
1249}
1250
1251void isci_host_deinit(struct isci_host *ihost)
1252{
1253 int i;
1254
1255 isci_host_change_state(ihost, isci_stopping);
1256 for (i = 0; i < SCI_MAX_PORTS; i++) {
1257 struct isci_port *iport = &ihost->ports[i];
1258 struct isci_remote_device *idev, *d;
1259
1260 list_for_each_entry_safe(idev, d, &iport->remote_dev_list, node) {
1261 if (test_bit(IDEV_ALLOCATED, &idev->flags))
1262 isci_remote_device_stop(ihost, idev);
1263 }
1264 }
1265
1266 set_bit(IHOST_STOP_PENDING, &ihost->flags);
1267
1268 spin_lock_irq(&ihost->scic_lock);
1269 sci_controller_stop(ihost, SCIC_CONTROLLER_STOP_TIMEOUT);
1270 spin_unlock_irq(&ihost->scic_lock);
1271
1272 wait_for_stop(ihost);
1273 sci_controller_reset(ihost);
1274
1275 /* Cancel any/all outstanding port timers */
1276 for (i = 0; i < ihost->logical_port_entries; i++) {
1277 struct isci_port *iport = &ihost->ports[i];
1278 del_timer_sync(&iport->timer.timer);
1279 }
1280
1281 /* Cancel any/all outstanding phy timers */
1282 for (i = 0; i < SCI_MAX_PHYS; i++) {
1283 struct isci_phy *iphy = &ihost->phys[i];
1284 del_timer_sync(&iphy->sata_timer.timer);
1285 }
1286
1287 del_timer_sync(&ihost->port_agent.timer.timer);
1288
1289 del_timer_sync(&ihost->power_control.timer.timer);
1290
1291 del_timer_sync(&ihost->timer.timer);
1292
1293 del_timer_sync(&ihost->phy_timer.timer);
1294}
1295
1296static void __iomem *scu_base(struct isci_host *isci_host)
1297{
1298 struct pci_dev *pdev = isci_host->pdev;
1299 int id = isci_host->id;
1300
1301 return pcim_iomap_table(pdev)[SCI_SCU_BAR * 2] + SCI_SCU_BAR_SIZE * id;
1302}
1303
1304static void __iomem *smu_base(struct isci_host *isci_host)
1305{
1306 struct pci_dev *pdev = isci_host->pdev;
1307 int id = isci_host->id;
1308
1309 return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id;
1310}
1311
1312static void isci_user_parameters_get(struct sci_user_parameters *u)
1313{
1314 int i;
1315
1316 for (i = 0; i < SCI_MAX_PHYS; i++) {
1317 struct sci_phy_user_params *u_phy = &u->phys[i];
1318
1319 u_phy->max_speed_generation = phy_gen;
1320
1321 /* we are not exporting these for now */
1322 u_phy->align_insertion_frequency = 0x7f;
1323 u_phy->in_connection_align_insertion_frequency = 0xff;
1324 u_phy->notify_enable_spin_up_insertion_frequency = 0x33;
1325 }
1326
1327 u->stp_inactivity_timeout = stp_inactive_to;
1328 u->ssp_inactivity_timeout = ssp_inactive_to;
1329 u->stp_max_occupancy_timeout = stp_max_occ_to;
1330 u->ssp_max_occupancy_timeout = ssp_max_occ_to;
1331 u->no_outbound_task_timeout = no_outbound_task_to;
1332 u->max_number_concurrent_device_spin_up = max_concurr_spinup;
1333}
1334
1335static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm)
1336{
1337 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1338
1339 sci_change_state(&ihost->sm, SCIC_RESET);
1340}
1341
1342static inline void sci_controller_starting_state_exit(struct sci_base_state_machine *sm)
1343{
1344 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1345
1346 sci_del_timer(&ihost->timer);
1347}
1348
1349#define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853
1350#define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS 1280
1351#define INTERRUPT_COALESCE_TIMEOUT_MAX_US 2700000
1352#define INTERRUPT_COALESCE_NUMBER_MAX 256
1353#define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN 7
1354#define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX 28
1355
1356/**
1357 * sci_controller_set_interrupt_coalescence() - This method allows the user to
1358 * configure the interrupt coalescence.
1359 * @controller: This parameter represents the handle to the controller object
1360 * for which its interrupt coalesce register is overridden.
1361 * @coalesce_number: Used to control the number of entries in the Completion
1362 * Queue before an interrupt is generated. If the number of entries exceed
1363 * this number, an interrupt will be generated. The valid range of the input
1364 * is [0, 256]. A setting of 0 results in coalescing being disabled.
1365 * @coalesce_timeout: Timeout value in microseconds. The valid range of the
1366 * input is [0, 2700000] . A setting of 0 is allowed and results in no
1367 * interrupt coalescing timeout.
1368 *
1369 * Indicate if the user successfully set the interrupt coalesce parameters.
1370 * SCI_SUCCESS The user successfully updated the interrutp coalescence.
1371 * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range.
1372 */
1373static enum sci_status
1374sci_controller_set_interrupt_coalescence(struct isci_host *ihost,
1375 u32 coalesce_number,
1376 u32 coalesce_timeout)
1377{
1378 u8 timeout_encode = 0;
1379 u32 min = 0;
1380 u32 max = 0;
1381
1382 /* Check if the input parameters fall in the range. */
1383 if (coalesce_number > INTERRUPT_COALESCE_NUMBER_MAX)
1384 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
1385
1386 /*
1387 * Defined encoding for interrupt coalescing timeout:
1388 * Value Min Max Units
1389 * ----- --- --- -----
1390 * 0 - - Disabled
1391 * 1 13.3 20.0 ns
1392 * 2 26.7 40.0
1393 * 3 53.3 80.0
1394 * 4 106.7 160.0
1395 * 5 213.3 320.0
1396 * 6 426.7 640.0
1397 * 7 853.3 1280.0
1398 * 8 1.7 2.6 us
1399 * 9 3.4 5.1
1400 * 10 6.8 10.2
1401 * 11 13.7 20.5
1402 * 12 27.3 41.0
1403 * 13 54.6 81.9
1404 * 14 109.2 163.8
1405 * 15 218.5 327.7
1406 * 16 436.9 655.4
1407 * 17 873.8 1310.7
1408 * 18 1.7 2.6 ms
1409 * 19 3.5 5.2
1410 * 20 7.0 10.5
1411 * 21 14.0 21.0
1412 * 22 28.0 41.9
1413 * 23 55.9 83.9
1414 * 24 111.8 167.8
1415 * 25 223.7 335.5
1416 * 26 447.4 671.1
1417 * 27 894.8 1342.2
1418 * 28 1.8 2.7 s
1419 * Others Undefined */
1420
1421 /*
1422 * Use the table above to decide the encode of interrupt coalescing timeout
1423 * value for register writing. */
1424 if (coalesce_timeout == 0)
1425 timeout_encode = 0;
1426 else{
1427 /* make the timeout value in unit of (10 ns). */
1428 coalesce_timeout = coalesce_timeout * 100;
1429 min = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS / 10;
1430 max = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS / 10;
1431
1432 /* get the encode of timeout for register writing. */
1433 for (timeout_encode = INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN;
1434 timeout_encode <= INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX;
1435 timeout_encode++) {
1436 if (min <= coalesce_timeout && max > coalesce_timeout)
1437 break;
1438 else if (coalesce_timeout >= max && coalesce_timeout < min * 2
1439 && coalesce_timeout <= INTERRUPT_COALESCE_TIMEOUT_MAX_US * 100) {
1440 if ((coalesce_timeout - max) < (2 * min - coalesce_timeout))
1441 break;
1442 else{
1443 timeout_encode++;
1444 break;
1445 }
1446 } else {
1447 max = max * 2;
1448 min = min * 2;
1449 }
1450 }
1451
1452 if (timeout_encode == INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX + 1)
1453 /* the value is out of range. */
1454 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
1455 }
1456
1457 writel(SMU_ICC_GEN_VAL(NUMBER, coalesce_number) |
1458 SMU_ICC_GEN_VAL(TIMER, timeout_encode),
1459 &ihost->smu_registers->interrupt_coalesce_control);
1460
1461
1462 ihost->interrupt_coalesce_number = (u16)coalesce_number;
1463 ihost->interrupt_coalesce_timeout = coalesce_timeout / 100;
1464
1465 return SCI_SUCCESS;
1466}
1467
1468
1469static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm)
1470{
1471 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1472
1473 /* set the default interrupt coalescence number and timeout value. */
1474 sci_controller_set_interrupt_coalescence(ihost, 0x10, 250);
1475}
1476
1477static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm)
1478{
1479 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1480
1481 /* disable interrupt coalescence. */
1482 sci_controller_set_interrupt_coalescence(ihost, 0, 0);
1483}
1484
1485static enum sci_status sci_controller_stop_phys(struct isci_host *ihost)
1486{
1487 u32 index;
1488 enum sci_status status;
1489 enum sci_status phy_status;
1490
1491 status = SCI_SUCCESS;
1492
1493 for (index = 0; index < SCI_MAX_PHYS; index++) {
1494 phy_status = sci_phy_stop(&ihost->phys[index]);
1495
1496 if (phy_status != SCI_SUCCESS &&
1497 phy_status != SCI_FAILURE_INVALID_STATE) {
1498 status = SCI_FAILURE;
1499
1500 dev_warn(&ihost->pdev->dev,
1501 "%s: Controller stop operation failed to stop "
1502 "phy %d because of status %d.\n",
1503 __func__,
1504 ihost->phys[index].phy_index, phy_status);
1505 }
1506 }
1507
1508 return status;
1509}
1510
1511static enum sci_status sci_controller_stop_ports(struct isci_host *ihost)
1512{
1513 u32 index;
1514 enum sci_status port_status;
1515 enum sci_status status = SCI_SUCCESS;
1516
1517 for (index = 0; index < ihost->logical_port_entries; index++) {
1518 struct isci_port *iport = &ihost->ports[index];
1519
1520 port_status = sci_port_stop(iport);
1521
1522 if ((port_status != SCI_SUCCESS) &&
1523 (port_status != SCI_FAILURE_INVALID_STATE)) {
1524 status = SCI_FAILURE;
1525
1526 dev_warn(&ihost->pdev->dev,
1527 "%s: Controller stop operation failed to "
1528 "stop port %d because of status %d.\n",
1529 __func__,
1530 iport->logical_port_index,
1531 port_status);
1532 }
1533 }
1534
1535 return status;
1536}
1537
1538static enum sci_status sci_controller_stop_devices(struct isci_host *ihost)
1539{
1540 u32 index;
1541 enum sci_status status;
1542 enum sci_status device_status;
1543
1544 status = SCI_SUCCESS;
1545
1546 for (index = 0; index < ihost->remote_node_entries; index++) {
1547 if (ihost->device_table[index] != NULL) {
1548 /* / @todo What timeout value do we want to provide to this request? */
1549 device_status = sci_remote_device_stop(ihost->device_table[index], 0);
1550
1551 if ((device_status != SCI_SUCCESS) &&
1552 (device_status != SCI_FAILURE_INVALID_STATE)) {
1553 dev_warn(&ihost->pdev->dev,
1554 "%s: Controller stop operation failed "
1555 "to stop device 0x%p because of "
1556 "status %d.\n",
1557 __func__,
1558 ihost->device_table[index], device_status);
1559 }
1560 }
1561 }
1562
1563 return status;
1564}
1565
1566static void sci_controller_stopping_state_enter(struct sci_base_state_machine *sm)
1567{
1568 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1569
1570 /* Stop all of the components for this controller */
1571 sci_controller_stop_phys(ihost);
1572 sci_controller_stop_ports(ihost);
1573 sci_controller_stop_devices(ihost);
1574}
1575
1576static void sci_controller_stopping_state_exit(struct sci_base_state_machine *sm)
1577{
1578 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1579
1580 sci_del_timer(&ihost->timer);
1581}
1582
1583static void sci_controller_reset_hardware(struct isci_host *ihost)
1584{
1585 /* Disable interrupts so we dont take any spurious interrupts */
1586 sci_controller_disable_interrupts(ihost);
1587
1588 /* Reset the SCU */
1589 writel(0xFFFFFFFF, &ihost->smu_registers->soft_reset_control);
1590
1591 /* Delay for 1ms to before clearing the CQP and UFQPR. */
1592 udelay(1000);
1593
1594 /* The write to the CQGR clears the CQP */
1595 writel(0x00000000, &ihost->smu_registers->completion_queue_get);
1596
1597 /* The write to the UFQGP clears the UFQPR */
1598 writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
1599}
1600
1601static void sci_controller_resetting_state_enter(struct sci_base_state_machine *sm)
1602{
1603 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1604
1605 sci_controller_reset_hardware(ihost);
1606 sci_change_state(&ihost->sm, SCIC_RESET);
1607}
1608
1609static const struct sci_base_state sci_controller_state_table[] = {
1610 [SCIC_INITIAL] = {
1611 .enter_state = sci_controller_initial_state_enter,
1612 },
1613 [SCIC_RESET] = {},
1614 [SCIC_INITIALIZING] = {},
1615 [SCIC_INITIALIZED] = {},
1616 [SCIC_STARTING] = {
1617 .exit_state = sci_controller_starting_state_exit,
1618 },
1619 [SCIC_READY] = {
1620 .enter_state = sci_controller_ready_state_enter,
1621 .exit_state = sci_controller_ready_state_exit,
1622 },
1623 [SCIC_RESETTING] = {
1624 .enter_state = sci_controller_resetting_state_enter,
1625 },
1626 [SCIC_STOPPING] = {
1627 .enter_state = sci_controller_stopping_state_enter,
1628 .exit_state = sci_controller_stopping_state_exit,
1629 },
1630 [SCIC_STOPPED] = {},
1631 [SCIC_FAILED] = {}
1632};
1633
1634static void sci_controller_set_default_config_parameters(struct isci_host *ihost)
1635{
1636 /* these defaults are overridden by the platform / firmware */
1637 u16 index;
1638
1639 /* Default to APC mode. */
1640 ihost->oem_parameters.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
1641
1642 /* Default to APC mode. */
1643 ihost->oem_parameters.controller.max_concurrent_dev_spin_up = 1;
1644
1645 /* Default to no SSC operation. */
1646 ihost->oem_parameters.controller.do_enable_ssc = false;
1647
1648 /* Initialize all of the port parameter information to narrow ports. */
1649 for (index = 0; index < SCI_MAX_PORTS; index++) {
1650 ihost->oem_parameters.ports[index].phy_mask = 0;
1651 }
1652
1653 /* Initialize all of the phy parameter information. */
1654 for (index = 0; index < SCI_MAX_PHYS; index++) {
1655 /* Default to 6G (i.e. Gen 3) for now. */
1656 ihost->user_parameters.phys[index].max_speed_generation = 3;
1657
1658 /* the frequencies cannot be 0 */
1659 ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f;
1660 ihost->user_parameters.phys[index].in_connection_align_insertion_frequency = 0xff;
1661 ihost->user_parameters.phys[index].notify_enable_spin_up_insertion_frequency = 0x33;
1662
1663 /*
1664 * Previous Vitesse based expanders had a arbitration issue that
1665 * is worked around by having the upper 32-bits of SAS address
1666 * with a value greater then the Vitesse company identifier.
1667 * Hence, usage of 0x5FCFFFFF. */
1668 ihost->oem_parameters.phys[index].sas_address.low = 0x1 + ihost->id;
1669 ihost->oem_parameters.phys[index].sas_address.high = 0x5FCFFFFF;
1670 }
1671
1672 ihost->user_parameters.stp_inactivity_timeout = 5;
1673 ihost->user_parameters.ssp_inactivity_timeout = 5;
1674 ihost->user_parameters.stp_max_occupancy_timeout = 5;
1675 ihost->user_parameters.ssp_max_occupancy_timeout = 20;
1676 ihost->user_parameters.no_outbound_task_timeout = 20;
1677}
1678
1679static void controller_timeout(unsigned long data)
1680{
1681 struct sci_timer *tmr = (struct sci_timer *)data;
1682 struct isci_host *ihost = container_of(tmr, typeof(*ihost), timer);
1683 struct sci_base_state_machine *sm = &ihost->sm;
1684 unsigned long flags;
1685
1686 spin_lock_irqsave(&ihost->scic_lock, flags);
1687
1688 if (tmr->cancel)
1689 goto done;
1690
1691 if (sm->current_state_id == SCIC_STARTING)
1692 sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT);
1693 else if (sm->current_state_id == SCIC_STOPPING) {
1694 sci_change_state(sm, SCIC_FAILED);
1695 isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT);
1696 } else /* / @todo Now what do we want to do in this case? */
1697 dev_err(&ihost->pdev->dev,
1698 "%s: Controller timer fired when controller was not "
1699 "in a state being timed.\n",
1700 __func__);
1701
1702done:
1703 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1704}
1705
1706static enum sci_status sci_controller_construct(struct isci_host *ihost,
1707 void __iomem *scu_base,
1708 void __iomem *smu_base)
1709{
1710 u8 i;
1711
1712 sci_init_sm(&ihost->sm, sci_controller_state_table, SCIC_INITIAL);
1713
1714 ihost->scu_registers = scu_base;
1715 ihost->smu_registers = smu_base;
1716
1717 sci_port_configuration_agent_construct(&ihost->port_agent);
1718
1719 /* Construct the ports for this controller */
1720 for (i = 0; i < SCI_MAX_PORTS; i++)
1721 sci_port_construct(&ihost->ports[i], i, ihost);
1722 sci_port_construct(&ihost->ports[i], SCIC_SDS_DUMMY_PORT, ihost);
1723
1724 /* Construct the phys for this controller */
1725 for (i = 0; i < SCI_MAX_PHYS; i++) {
1726 /* Add all the PHYs to the dummy port */
1727 sci_phy_construct(&ihost->phys[i],
1728 &ihost->ports[SCI_MAX_PORTS], i);
1729 }
1730
1731 ihost->invalid_phy_mask = 0;
1732
1733 sci_init_timer(&ihost->timer, controller_timeout);
1734
1735 /* Initialize the User and OEM parameters to default values. */
1736 sci_controller_set_default_config_parameters(ihost);
1737
1738 return sci_controller_reset(ihost);
1739}
1740
1741int sci_oem_parameters_validate(struct sci_oem_params *oem)
1742{
1743 int i;
1744
1745 for (i = 0; i < SCI_MAX_PORTS; i++)
1746 if (oem->ports[i].phy_mask > SCIC_SDS_PARM_PHY_MASK_MAX)
1747 return -EINVAL;
1748
1749 for (i = 0; i < SCI_MAX_PHYS; i++)
1750 if (oem->phys[i].sas_address.high == 0 &&
1751 oem->phys[i].sas_address.low == 0)
1752 return -EINVAL;
1753
1754 if (oem->controller.mode_type == SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE) {
1755 for (i = 0; i < SCI_MAX_PHYS; i++)
1756 if (oem->ports[i].phy_mask != 0)
1757 return -EINVAL;
1758 } else if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
1759 u8 phy_mask = 0;
1760
1761 for (i = 0; i < SCI_MAX_PHYS; i++)
1762 phy_mask |= oem->ports[i].phy_mask;
1763
1764 if (phy_mask == 0)
1765 return -EINVAL;
1766 } else
1767 return -EINVAL;
1768
1769 if (oem->controller.max_concurrent_dev_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT)
1770 return -EINVAL;
1771
1772 return 0;
1773}
1774
1775static enum sci_status sci_oem_parameters_set(struct isci_host *ihost)
1776{
1777 u32 state = ihost->sm.current_state_id;
1778
1779 if (state == SCIC_RESET ||
1780 state == SCIC_INITIALIZING ||
1781 state == SCIC_INITIALIZED) {
1782
1783 if (sci_oem_parameters_validate(&ihost->oem_parameters))
1784 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
1785
1786 return SCI_SUCCESS;
1787 }
1788
1789 return SCI_FAILURE_INVALID_STATE;
1790}
1791
1792static void power_control_timeout(unsigned long data)
1793{
1794 struct sci_timer *tmr = (struct sci_timer *)data;
1795 struct isci_host *ihost = container_of(tmr, typeof(*ihost), power_control.timer);
1796 struct isci_phy *iphy;
1797 unsigned long flags;
1798 u8 i;
1799
1800 spin_lock_irqsave(&ihost->scic_lock, flags);
1801
1802 if (tmr->cancel)
1803 goto done;
1804
1805 ihost->power_control.phys_granted_power = 0;
1806
1807 if (ihost->power_control.phys_waiting == 0) {
1808 ihost->power_control.timer_started = false;
1809 goto done;
1810 }
1811
1812 for (i = 0; i < SCI_MAX_PHYS; i++) {
1813
1814 if (ihost->power_control.phys_waiting == 0)
1815 break;
1816
1817 iphy = ihost->power_control.requesters[i];
1818 if (iphy == NULL)
1819 continue;
1820
1821 if (ihost->power_control.phys_granted_power >=
1822 ihost->oem_parameters.controller.max_concurrent_dev_spin_up)
1823 break;
1824
1825 ihost->power_control.requesters[i] = NULL;
1826 ihost->power_control.phys_waiting--;
1827 ihost->power_control.phys_granted_power++;
1828 sci_phy_consume_power_handler(iphy);
1829 }
1830
1831 /*
1832 * It doesn't matter if the power list is empty, we need to start the
1833 * timer in case another phy becomes ready.
1834 */
1835 sci_mod_timer(tmr, SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
1836 ihost->power_control.timer_started = true;
1837
1838done:
1839 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1840}
1841
1842void sci_controller_power_control_queue_insert(struct isci_host *ihost,
1843 struct isci_phy *iphy)
1844{
1845 BUG_ON(iphy == NULL);
1846
1847 if (ihost->power_control.phys_granted_power <
1848 ihost->oem_parameters.controller.max_concurrent_dev_spin_up) {
1849 ihost->power_control.phys_granted_power++;
1850 sci_phy_consume_power_handler(iphy);
1851
1852 /*
1853 * stop and start the power_control timer. When the timer fires, the
1854 * no_of_phys_granted_power will be set to 0
1855 */
1856 if (ihost->power_control.timer_started)
1857 sci_del_timer(&ihost->power_control.timer);
1858
1859 sci_mod_timer(&ihost->power_control.timer,
1860 SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
1861 ihost->power_control.timer_started = true;
1862
1863 } else {
1864 /* Add the phy in the waiting list */
1865 ihost->power_control.requesters[iphy->phy_index] = iphy;
1866 ihost->power_control.phys_waiting++;
1867 }
1868}
1869
1870void sci_controller_power_control_queue_remove(struct isci_host *ihost,
1871 struct isci_phy *iphy)
1872{
1873 BUG_ON(iphy == NULL);
1874
1875 if (ihost->power_control.requesters[iphy->phy_index])
1876 ihost->power_control.phys_waiting--;
1877
1878 ihost->power_control.requesters[iphy->phy_index] = NULL;
1879}
1880
1881#define AFE_REGISTER_WRITE_DELAY 10
1882
1883/* Initialize the AFE for this phy index. We need to read the AFE setup from
1884 * the OEM parameters
1885 */
1886static void sci_controller_afe_initialization(struct isci_host *ihost)
1887{
1888 const struct sci_oem_params *oem = &ihost->oem_parameters;
1889 struct pci_dev *pdev = ihost->pdev;
1890 u32 afe_status;
1891 u32 phy_id;
1892
1893 /* Clear DFX Status registers */
1894 writel(0x0081000f, &ihost->scu_registers->afe.afe_dfx_master_control0);
1895 udelay(AFE_REGISTER_WRITE_DELAY);
1896
1897 if (is_b0(pdev)) {
1898 /* PM Rx Equalization Save, PM SPhy Rx Acknowledgement
1899 * Timer, PM Stagger Timer */
1900 writel(0x0007BFFF, &ihost->scu_registers->afe.afe_pmsn_master_control2);
1901 udelay(AFE_REGISTER_WRITE_DELAY);
1902 }
1903
1904 /* Configure bias currents to normal */
1905 if (is_a2(pdev))
1906 writel(0x00005A00, &ihost->scu_registers->afe.afe_bias_control);
1907 else if (is_b0(pdev) || is_c0(pdev))
1908 writel(0x00005F00, &ihost->scu_registers->afe.afe_bias_control);
1909
1910 udelay(AFE_REGISTER_WRITE_DELAY);
1911
1912 /* Enable PLL */
1913 if (is_b0(pdev) || is_c0(pdev))
1914 writel(0x80040A08, &ihost->scu_registers->afe.afe_pll_control0);
1915 else
1916 writel(0x80040908, &ihost->scu_registers->afe.afe_pll_control0);
1917
1918 udelay(AFE_REGISTER_WRITE_DELAY);
1919
1920 /* Wait for the PLL to lock */
1921 do {
1922 afe_status = readl(&ihost->scu_registers->afe.afe_common_block_status);
1923 udelay(AFE_REGISTER_WRITE_DELAY);
1924 } while ((afe_status & 0x00001000) == 0);
1925
1926 if (is_a2(pdev)) {
1927 /* Shorten SAS SNW lock time (RxLock timer value from 76 us to 50 us) */
1928 writel(0x7bcc96ad, &ihost->scu_registers->afe.afe_pmsn_master_control0);
1929 udelay(AFE_REGISTER_WRITE_DELAY);
1930 }
1931
1932 for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) {
1933 const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id];
1934
1935 if (is_b0(pdev)) {
1936 /* Configure transmitter SSC parameters */
1937 writel(0x00030000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
1938 udelay(AFE_REGISTER_WRITE_DELAY);
1939 } else if (is_c0(pdev)) {
1940 /* Configure transmitter SSC parameters */
1941 writel(0x0003000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
1942 udelay(AFE_REGISTER_WRITE_DELAY);
1943
1944 /*
1945 * All defaults, except the Receive Word Alignament/Comma Detect
1946 * Enable....(0xe800) */
1947 writel(0x00004500, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
1948 udelay(AFE_REGISTER_WRITE_DELAY);
1949 } else {
1950 /*
1951 * All defaults, except the Receive Word Alignament/Comma Detect
1952 * Enable....(0xe800) */
1953 writel(0x00004512, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
1954 udelay(AFE_REGISTER_WRITE_DELAY);
1955
1956 writel(0x0050100F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control1);
1957 udelay(AFE_REGISTER_WRITE_DELAY);
1958 }
1959
1960 /*
1961 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
1962 * & increase TX int & ext bias 20%....(0xe85c) */
1963 if (is_a2(pdev))
1964 writel(0x000003F0, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
1965 else if (is_b0(pdev)) {
1966 /* Power down TX and RX (PWRDNTX and PWRDNRX) */
1967 writel(0x000003D7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
1968 udelay(AFE_REGISTER_WRITE_DELAY);
1969
1970 /*
1971 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
1972 * & increase TX int & ext bias 20%....(0xe85c) */
1973 writel(0x000003D4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
1974 } else {
1975 writel(0x000001E7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
1976 udelay(AFE_REGISTER_WRITE_DELAY);
1977
1978 /*
1979 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
1980 * & increase TX int & ext bias 20%....(0xe85c) */
1981 writel(0x000001E4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
1982 }
1983 udelay(AFE_REGISTER_WRITE_DELAY);
1984
1985 if (is_a2(pdev)) {
1986 /* Enable TX equalization (0xe824) */
1987 writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
1988 udelay(AFE_REGISTER_WRITE_DELAY);
1989 }
1990
1991 /*
1992 * RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, TPD=0x0(TX Power On),
1993 * RDD=0x0(RX Detect Enabled) ....(0xe800) */
1994 writel(0x00004100, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
1995 udelay(AFE_REGISTER_WRITE_DELAY);
1996
1997 /* Leave DFE/FFE on */
1998 if (is_a2(pdev))
1999 writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
2000 else if (is_b0(pdev)) {
2001 writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
2002 udelay(AFE_REGISTER_WRITE_DELAY);
2003 /* Enable TX equalization (0xe824) */
2004 writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
2005 } else {
2006 writel(0x0140DF0F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control1);
2007 udelay(AFE_REGISTER_WRITE_DELAY);
2008
2009 writel(0x3F6F103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
2010 udelay(AFE_REGISTER_WRITE_DELAY);
2011
2012 /* Enable TX equalization (0xe824) */
2013 writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
2014 }
2015
2016 udelay(AFE_REGISTER_WRITE_DELAY);
2017
2018 writel(oem_phy->afe_tx_amp_control0,
2019 &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control0);
2020 udelay(AFE_REGISTER_WRITE_DELAY);
2021
2022 writel(oem_phy->afe_tx_amp_control1,
2023 &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control1);
2024 udelay(AFE_REGISTER_WRITE_DELAY);
2025
2026 writel(oem_phy->afe_tx_amp_control2,
2027 &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control2);
2028 udelay(AFE_REGISTER_WRITE_DELAY);
2029
2030 writel(oem_phy->afe_tx_amp_control3,
2031 &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control3);
2032 udelay(AFE_REGISTER_WRITE_DELAY);
2033 }
2034
2035 /* Transfer control to the PEs */
2036 writel(0x00010f00, &ihost->scu_registers->afe.afe_dfx_master_control0);
2037 udelay(AFE_REGISTER_WRITE_DELAY);
2038}
2039
2040static void sci_controller_initialize_power_control(struct isci_host *ihost)
2041{
2042 sci_init_timer(&ihost->power_control.timer, power_control_timeout);
2043
2044 memset(ihost->power_control.requesters, 0,
2045 sizeof(ihost->power_control.requesters));
2046
2047 ihost->power_control.phys_waiting = 0;
2048 ihost->power_control.phys_granted_power = 0;
2049}
2050
2051static enum sci_status sci_controller_initialize(struct isci_host *ihost)
2052{
2053 struct sci_base_state_machine *sm = &ihost->sm;
2054 enum sci_status result = SCI_FAILURE;
2055 unsigned long i, state, val;
2056
2057 if (ihost->sm.current_state_id != SCIC_RESET) {
2058 dev_warn(&ihost->pdev->dev,
2059 "SCIC Controller initialize operation requested "
2060 "in invalid state\n");
2061 return SCI_FAILURE_INVALID_STATE;
2062 }
2063
2064 sci_change_state(sm, SCIC_INITIALIZING);
2065
2066 sci_init_timer(&ihost->phy_timer, phy_startup_timeout);
2067
2068 ihost->next_phy_to_start = 0;
2069 ihost->phy_startup_timer_pending = false;
2070
2071 sci_controller_initialize_power_control(ihost);
2072
2073 /*
2074 * There is nothing to do here for B0 since we do not have to
2075 * program the AFE registers.
2076 * / @todo The AFE settings are supposed to be correct for the B0 but
2077 * / presently they seem to be wrong. */
2078 sci_controller_afe_initialization(ihost);
2079
2080
2081 /* Take the hardware out of reset */
2082 writel(0, &ihost->smu_registers->soft_reset_control);
2083
2084 /*
2085 * / @todo Provide meaningfull error code for hardware failure
2086 * result = SCI_FAILURE_CONTROLLER_HARDWARE; */
2087 for (i = 100; i >= 1; i--) {
2088 u32 status;
2089
2090 /* Loop until the hardware reports success */
2091 udelay(SCU_CONTEXT_RAM_INIT_STALL_TIME);
2092 status = readl(&ihost->smu_registers->control_status);
2093
2094 if ((status & SCU_RAM_INIT_COMPLETED) == SCU_RAM_INIT_COMPLETED)
2095 break;
2096 }
2097 if (i == 0)
2098 goto out;
2099
2100 /*
2101 * Determine what are the actaul device capacities that the
2102 * hardware will support */
2103 val = readl(&ihost->smu_registers->device_context_capacity);
2104
2105 /* Record the smaller of the two capacity values */
2106 ihost->logical_port_entries = min(smu_max_ports(val), SCI_MAX_PORTS);
2107 ihost->task_context_entries = min(smu_max_task_contexts(val), SCI_MAX_IO_REQUESTS);
2108 ihost->remote_node_entries = min(smu_max_rncs(val), SCI_MAX_REMOTE_DEVICES);
2109
2110 /*
2111 * Make all PEs that are unassigned match up with the
2112 * logical ports
2113 */
2114 for (i = 0; i < ihost->logical_port_entries; i++) {
2115 struct scu_port_task_scheduler_group_registers __iomem
2116 *ptsg = &ihost->scu_registers->peg0.ptsg;
2117
2118 writel(i, &ptsg->protocol_engine[i]);
2119 }
2120
2121 /* Initialize hardware PCI Relaxed ordering in DMA engines */
2122 val = readl(&ihost->scu_registers->sdma.pdma_configuration);
2123 val |= SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
2124 writel(val, &ihost->scu_registers->sdma.pdma_configuration);
2125
2126 val = readl(&ihost->scu_registers->sdma.cdma_configuration);
2127 val |= SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
2128 writel(val, &ihost->scu_registers->sdma.cdma_configuration);
2129
2130 /*
2131 * Initialize the PHYs before the PORTs because the PHY registers
2132 * are accessed during the port initialization.
2133 */
2134 for (i = 0; i < SCI_MAX_PHYS; i++) {
2135 result = sci_phy_initialize(&ihost->phys[i],
2136 &ihost->scu_registers->peg0.pe[i].tl,
2137 &ihost->scu_registers->peg0.pe[i].ll);
2138 if (result != SCI_SUCCESS)
2139 goto out;
2140 }
2141
2142 for (i = 0; i < ihost->logical_port_entries; i++) {
2143 struct isci_port *iport = &ihost->ports[i];
2144
2145 iport->port_task_scheduler_registers = &ihost->scu_registers->peg0.ptsg.port[i];
2146 iport->port_pe_configuration_register = &ihost->scu_registers->peg0.ptsg.protocol_engine[0];
2147 iport->viit_registers = &ihost->scu_registers->peg0.viit[i];
2148 }
2149
2150 result = sci_port_configuration_agent_initialize(ihost, &ihost->port_agent);
2151
2152 out:
2153 /* Advance the controller state machine */
2154 if (result == SCI_SUCCESS)
2155 state = SCIC_INITIALIZED;
2156 else
2157 state = SCIC_FAILED;
2158 sci_change_state(sm, state);
2159
2160 return result;
2161}
2162
2163static enum sci_status sci_user_parameters_set(struct isci_host *ihost,
2164 struct sci_user_parameters *sci_parms)
2165{
2166 u32 state = ihost->sm.current_state_id;
2167
2168 if (state == SCIC_RESET ||
2169 state == SCIC_INITIALIZING ||
2170 state == SCIC_INITIALIZED) {
2171 u16 index;
2172
2173 /*
2174 * Validate the user parameters. If they are not legal, then
2175 * return a failure.
2176 */
2177 for (index = 0; index < SCI_MAX_PHYS; index++) {
2178 struct sci_phy_user_params *user_phy;
2179
2180 user_phy = &sci_parms->phys[index];
2181
2182 if (!((user_phy->max_speed_generation <=
2183 SCIC_SDS_PARM_MAX_SPEED) &&
2184 (user_phy->max_speed_generation >
2185 SCIC_SDS_PARM_NO_SPEED)))
2186 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2187
2188 if (user_phy->in_connection_align_insertion_frequency <
2189 3)
2190 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2191
2192 if ((user_phy->in_connection_align_insertion_frequency <
2193 3) ||
2194 (user_phy->align_insertion_frequency == 0) ||
2195 (user_phy->
2196 notify_enable_spin_up_insertion_frequency ==
2197 0))
2198 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2199 }
2200
2201 if ((sci_parms->stp_inactivity_timeout == 0) ||
2202 (sci_parms->ssp_inactivity_timeout == 0) ||
2203 (sci_parms->stp_max_occupancy_timeout == 0) ||
2204 (sci_parms->ssp_max_occupancy_timeout == 0) ||
2205 (sci_parms->no_outbound_task_timeout == 0))
2206 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2207
2208 memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms));
2209
2210 return SCI_SUCCESS;
2211 }
2212
2213 return SCI_FAILURE_INVALID_STATE;
2214}
2215
2216static int sci_controller_mem_init(struct isci_host *ihost)
2217{
2218 struct device *dev = &ihost->pdev->dev;
2219 dma_addr_t dma;
2220 size_t size;
2221 int err;
2222
2223 size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32);
2224 ihost->completion_queue = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL);
2225 if (!ihost->completion_queue)
2226 return -ENOMEM;
2227
2228 writel(lower_32_bits(dma), &ihost->smu_registers->completion_queue_lower);
2229 writel(upper_32_bits(dma), &ihost->smu_registers->completion_queue_upper);
2230
2231 size = ihost->remote_node_entries * sizeof(union scu_remote_node_context);
2232 ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &dma,
2233 GFP_KERNEL);
2234 if (!ihost->remote_node_context_table)
2235 return -ENOMEM;
2236
2237 writel(lower_32_bits(dma), &ihost->smu_registers->remote_node_context_lower);
2238 writel(upper_32_bits(dma), &ihost->smu_registers->remote_node_context_upper);
2239
2240 size = ihost->task_context_entries * sizeof(struct scu_task_context),
2241 ihost->task_context_table = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL);
2242 if (!ihost->task_context_table)
2243 return -ENOMEM;
2244
2245 ihost->task_context_dma = dma;
2246 writel(lower_32_bits(dma), &ihost->smu_registers->host_task_table_lower);
2247 writel(upper_32_bits(dma), &ihost->smu_registers->host_task_table_upper);
2248
2249 err = sci_unsolicited_frame_control_construct(ihost);
2250 if (err)
2251 return err;
2252
2253 /*
2254 * Inform the silicon as to the location of the UF headers and
2255 * address table.
2256 */
2257 writel(lower_32_bits(ihost->uf_control.headers.physical_address),
2258 &ihost->scu_registers->sdma.uf_header_base_address_lower);
2259 writel(upper_32_bits(ihost->uf_control.headers.physical_address),
2260 &ihost->scu_registers->sdma.uf_header_base_address_upper);
2261
2262 writel(lower_32_bits(ihost->uf_control.address_table.physical_address),
2263 &ihost->scu_registers->sdma.uf_address_table_lower);
2264 writel(upper_32_bits(ihost->uf_control.address_table.physical_address),
2265 &ihost->scu_registers->sdma.uf_address_table_upper);
2266
2267 return 0;
2268}
2269
2270int isci_host_init(struct isci_host *ihost)
2271{
2272 int err = 0, i;
2273 enum sci_status status;
2274 struct sci_user_parameters sci_user_params;
2275 struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
2276
2277 spin_lock_init(&ihost->state_lock);
2278 spin_lock_init(&ihost->scic_lock);
2279 init_waitqueue_head(&ihost->eventq);
2280
2281 isci_host_change_state(ihost, isci_starting);
2282
2283 status = sci_controller_construct(ihost, scu_base(ihost),
2284 smu_base(ihost));
2285
2286 if (status != SCI_SUCCESS) {
2287 dev_err(&ihost->pdev->dev,
2288 "%s: sci_controller_construct failed - status = %x\n",
2289 __func__,
2290 status);
2291 return -ENODEV;
2292 }
2293
2294 ihost->sas_ha.dev = &ihost->pdev->dev;
2295 ihost->sas_ha.lldd_ha = ihost;
2296
2297 /*
2298 * grab initial values stored in the controller object for OEM and USER
2299 * parameters
2300 */
2301 isci_user_parameters_get(&sci_user_params);
2302 status = sci_user_parameters_set(ihost, &sci_user_params);
2303 if (status != SCI_SUCCESS) {
2304 dev_warn(&ihost->pdev->dev,
2305 "%s: sci_user_parameters_set failed\n",
2306 __func__);
2307 return -ENODEV;
2308 }
2309
2310 /* grab any OEM parameters specified in orom */
2311 if (pci_info->orom) {
2312 status = isci_parse_oem_parameters(&ihost->oem_parameters,
2313 pci_info->orom,
2314 ihost->id);
2315 if (status != SCI_SUCCESS) {
2316 dev_warn(&ihost->pdev->dev,
2317 "parsing firmware oem parameters failed\n");
2318 return -EINVAL;
2319 }
2320 }
2321
2322 status = sci_oem_parameters_set(ihost);
2323 if (status != SCI_SUCCESS) {
2324 dev_warn(&ihost->pdev->dev,
2325 "%s: sci_oem_parameters_set failed\n",
2326 __func__);
2327 return -ENODEV;
2328 }
2329
2330 tasklet_init(&ihost->completion_tasklet,
2331 isci_host_completion_routine, (unsigned long)ihost);
2332
2333 INIT_LIST_HEAD(&ihost->requests_to_complete);
2334 INIT_LIST_HEAD(&ihost->requests_to_errorback);
2335
2336 spin_lock_irq(&ihost->scic_lock);
2337 status = sci_controller_initialize(ihost);
2338 spin_unlock_irq(&ihost->scic_lock);
2339 if (status != SCI_SUCCESS) {
2340 dev_warn(&ihost->pdev->dev,
2341 "%s: sci_controller_initialize failed -"
2342 " status = 0x%x\n",
2343 __func__, status);
2344 return -ENODEV;
2345 }
2346
2347 err = sci_controller_mem_init(ihost);
2348 if (err)
2349 return err;
2350
2351 for (i = 0; i < SCI_MAX_PORTS; i++)
2352 isci_port_init(&ihost->ports[i], ihost, i);
2353
2354 for (i = 0; i < SCI_MAX_PHYS; i++)
2355 isci_phy_init(&ihost->phys[i], ihost, i);
2356
2357 for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
2358 struct isci_remote_device *idev = &ihost->devices[i];
2359
2360 INIT_LIST_HEAD(&idev->reqs_in_process);
2361 INIT_LIST_HEAD(&idev->node);
2362 }
2363
2364 for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
2365 struct isci_request *ireq;
2366 dma_addr_t dma;
2367
2368 ireq = dmam_alloc_coherent(&ihost->pdev->dev,
2369 sizeof(struct isci_request), &dma,
2370 GFP_KERNEL);
2371 if (!ireq)
2372 return -ENOMEM;
2373
2374 ireq->tc = &ihost->task_context_table[i];
2375 ireq->owning_controller = ihost;
2376 spin_lock_init(&ireq->state_lock);
2377 ireq->request_daddr = dma;
2378 ireq->isci_host = ihost;
2379 ihost->reqs[i] = ireq;
2380 }
2381
2382 return 0;
2383}
2384
2385void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport,
2386 struct isci_phy *iphy)
2387{
2388 switch (ihost->sm.current_state_id) {
2389 case SCIC_STARTING:
2390 sci_del_timer(&ihost->phy_timer);
2391 ihost->phy_startup_timer_pending = false;
2392 ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
2393 iport, iphy);
2394 sci_controller_start_next_phy(ihost);
2395 break;
2396 case SCIC_READY:
2397 ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
2398 iport, iphy);
2399 break;
2400 default:
2401 dev_dbg(&ihost->pdev->dev,
2402 "%s: SCIC Controller linkup event from phy %d in "
2403 "unexpected state %d\n", __func__, iphy->phy_index,
2404 ihost->sm.current_state_id);
2405 }
2406}
2407
2408void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport,
2409 struct isci_phy *iphy)
2410{
2411 switch (ihost->sm.current_state_id) {
2412 case SCIC_STARTING:
2413 case SCIC_READY:
2414 ihost->port_agent.link_down_handler(ihost, &ihost->port_agent,
2415 iport, iphy);
2416 break;
2417 default:
2418 dev_dbg(&ihost->pdev->dev,
2419 "%s: SCIC Controller linkdown event from phy %d in "
2420 "unexpected state %d\n",
2421 __func__,
2422 iphy->phy_index,
2423 ihost->sm.current_state_id);
2424 }
2425}
2426
2427static bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost)
2428{
2429 u32 index;
2430
2431 for (index = 0; index < ihost->remote_node_entries; index++) {
2432 if ((ihost->device_table[index] != NULL) &&
2433 (ihost->device_table[index]->sm.current_state_id == SCI_DEV_STOPPING))
2434 return true;
2435 }
2436
2437 return false;
2438}
2439
2440void sci_controller_remote_device_stopped(struct isci_host *ihost,
2441 struct isci_remote_device *idev)
2442{
2443 if (ihost->sm.current_state_id != SCIC_STOPPING) {
2444 dev_dbg(&ihost->pdev->dev,
2445 "SCIC Controller 0x%p remote device stopped event "
2446 "from device 0x%p in unexpected state %d\n",
2447 ihost, idev,
2448 ihost->sm.current_state_id);
2449 return;
2450 }
2451
2452 if (!sci_controller_has_remote_devices_stopping(ihost))
2453 sci_change_state(&ihost->sm, SCIC_STOPPED);
2454}
2455
2456void sci_controller_post_request(struct isci_host *ihost, u32 request)
2457{
2458 dev_dbg(&ihost->pdev->dev, "%s[%d]: %#x\n",
2459 __func__, ihost->id, request);
2460
2461 writel(request, &ihost->smu_registers->post_context_port);
2462}
2463
2464struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag)
2465{
2466 u16 task_index;
2467 u16 task_sequence;
2468
2469 task_index = ISCI_TAG_TCI(io_tag);
2470
2471 if (task_index < ihost->task_context_entries) {
2472 struct isci_request *ireq = ihost->reqs[task_index];
2473
2474 if (test_bit(IREQ_ACTIVE, &ireq->flags)) {
2475 task_sequence = ISCI_TAG_SEQ(io_tag);
2476
2477 if (task_sequence == ihost->io_request_sequence[task_index])
2478 return ireq;
2479 }
2480 }
2481
2482 return NULL;
2483}
2484
2485/**
2486 * This method allocates remote node index and the reserves the remote node
2487 * context space for use. This method can fail if there are no more remote
2488 * node index available.
2489 * @scic: This is the controller object which contains the set of
2490 * free remote node ids
2491 * @sci_dev: This is the device object which is requesting the a remote node
2492 * id
2493 * @node_id: This is the remote node id that is assinged to the device if one
2494 * is available
2495 *
2496 * enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote
2497 * node index available.
2498 */
2499enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost,
2500 struct isci_remote_device *idev,
2501 u16 *node_id)
2502{
2503 u16 node_index;
2504 u32 remote_node_count = sci_remote_device_node_count(idev);
2505
2506 node_index = sci_remote_node_table_allocate_remote_node(
2507 &ihost->available_remote_nodes, remote_node_count
2508 );
2509
2510 if (node_index != SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
2511 ihost->device_table[node_index] = idev;
2512
2513 *node_id = node_index;
2514
2515 return SCI_SUCCESS;
2516 }
2517
2518 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
2519}
2520
2521void sci_controller_free_remote_node_context(struct isci_host *ihost,
2522 struct isci_remote_device *idev,
2523 u16 node_id)
2524{
2525 u32 remote_node_count = sci_remote_device_node_count(idev);
2526
2527 if (ihost->device_table[node_id] == idev) {
2528 ihost->device_table[node_id] = NULL;
2529
2530 sci_remote_node_table_release_remote_node_index(
2531 &ihost->available_remote_nodes, remote_node_count, node_id
2532 );
2533 }
2534}
2535
2536void sci_controller_copy_sata_response(void *response_buffer,
2537 void *frame_header,
2538 void *frame_buffer)
2539{
2540 /* XXX type safety? */
2541 memcpy(response_buffer, frame_header, sizeof(u32));
2542
2543 memcpy(response_buffer + sizeof(u32),
2544 frame_buffer,
2545 sizeof(struct dev_to_host_fis) - sizeof(u32));
2546}
2547
2548void sci_controller_release_frame(struct isci_host *ihost, u32 frame_index)
2549{
2550 if (sci_unsolicited_frame_control_release_frame(&ihost->uf_control, frame_index))
2551 writel(ihost->uf_control.get,
2552 &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
2553}
2554
2555void isci_tci_free(struct isci_host *ihost, u16 tci)
2556{
2557 u16 tail = ihost->tci_tail & (SCI_MAX_IO_REQUESTS-1);
2558
2559 ihost->tci_pool[tail] = tci;
2560 ihost->tci_tail = tail + 1;
2561}
2562
2563static u16 isci_tci_alloc(struct isci_host *ihost)
2564{
2565 u16 head = ihost->tci_head & (SCI_MAX_IO_REQUESTS-1);
2566 u16 tci = ihost->tci_pool[head];
2567
2568 ihost->tci_head = head + 1;
2569 return tci;
2570}
2571
2572static u16 isci_tci_space(struct isci_host *ihost)
2573{
2574 return CIRC_SPACE(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
2575}
2576
2577u16 isci_alloc_tag(struct isci_host *ihost)
2578{
2579 if (isci_tci_space(ihost)) {
2580 u16 tci = isci_tci_alloc(ihost);
2581 u8 seq = ihost->io_request_sequence[tci];
2582
2583 return ISCI_TAG(seq, tci);
2584 }
2585
2586 return SCI_CONTROLLER_INVALID_IO_TAG;
2587}
2588
2589enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag)
2590{
2591 u16 tci = ISCI_TAG_TCI(io_tag);
2592 u16 seq = ISCI_TAG_SEQ(io_tag);
2593
2594 /* prevent tail from passing head */
2595 if (isci_tci_active(ihost) == 0)
2596 return SCI_FAILURE_INVALID_IO_TAG;
2597
2598 if (seq == ihost->io_request_sequence[tci]) {
2599 ihost->io_request_sequence[tci] = (seq+1) & (SCI_MAX_SEQ-1);
2600
2601 isci_tci_free(ihost, tci);
2602
2603 return SCI_SUCCESS;
2604 }
2605 return SCI_FAILURE_INVALID_IO_TAG;
2606}
2607
2608enum sci_status sci_controller_start_io(struct isci_host *ihost,
2609 struct isci_remote_device *idev,
2610 struct isci_request *ireq)
2611{
2612 enum sci_status status;
2613
2614 if (ihost->sm.current_state_id != SCIC_READY) {
2615 dev_warn(&ihost->pdev->dev, "invalid state to start I/O");
2616 return SCI_FAILURE_INVALID_STATE;
2617 }
2618
2619 status = sci_remote_device_start_io(ihost, idev, ireq);
2620 if (status != SCI_SUCCESS)
2621 return status;
2622
2623 set_bit(IREQ_ACTIVE, &ireq->flags);
2624 sci_controller_post_request(ihost, ireq->post_context);
2625 return SCI_SUCCESS;
2626}
2627
2628enum sci_status sci_controller_terminate_request(struct isci_host *ihost,
2629 struct isci_remote_device *idev,
2630 struct isci_request *ireq)
2631{
2632 /* terminate an ongoing (i.e. started) core IO request. This does not
2633 * abort the IO request at the target, but rather removes the IO
2634 * request from the host controller.
2635 */
2636 enum sci_status status;
2637
2638 if (ihost->sm.current_state_id != SCIC_READY) {
2639 dev_warn(&ihost->pdev->dev,
2640 "invalid state to terminate request\n");
2641 return SCI_FAILURE_INVALID_STATE;
2642 }
2643
2644 status = sci_io_request_terminate(ireq);
2645 if (status != SCI_SUCCESS)
2646 return status;
2647
2648 /*
2649 * Utilize the original post context command and or in the POST_TC_ABORT
2650 * request sub-type.
2651 */
2652 sci_controller_post_request(ihost,
2653 ireq->post_context | SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT);
2654 return SCI_SUCCESS;
2655}
2656
2657/**
2658 * sci_controller_complete_io() - This method will perform core specific
2659 * completion operations for an IO request. After this method is invoked,
2660 * the user should consider the IO request as invalid until it is properly
2661 * reused (i.e. re-constructed).
2662 * @ihost: The handle to the controller object for which to complete the
2663 * IO request.
2664 * @idev: The handle to the remote device object for which to complete
2665 * the IO request.
2666 * @ireq: the handle to the io request object to complete.
2667 */
2668enum sci_status sci_controller_complete_io(struct isci_host *ihost,
2669 struct isci_remote_device *idev,
2670 struct isci_request *ireq)
2671{
2672 enum sci_status status;
2673 u16 index;
2674
2675 switch (ihost->sm.current_state_id) {
2676 case SCIC_STOPPING:
2677 /* XXX: Implement this function */
2678 return SCI_FAILURE;
2679 case SCIC_READY:
2680 status = sci_remote_device_complete_io(ihost, idev, ireq);
2681 if (status != SCI_SUCCESS)
2682 return status;
2683
2684 index = ISCI_TAG_TCI(ireq->io_tag);
2685 clear_bit(IREQ_ACTIVE, &ireq->flags);
2686 return SCI_SUCCESS;
2687 default:
2688 dev_warn(&ihost->pdev->dev, "invalid state to complete I/O");
2689 return SCI_FAILURE_INVALID_STATE;
2690 }
2691
2692}
2693
2694enum sci_status sci_controller_continue_io(struct isci_request *ireq)
2695{
2696 struct isci_host *ihost = ireq->owning_controller;
2697
2698 if (ihost->sm.current_state_id != SCIC_READY) {
2699 dev_warn(&ihost->pdev->dev, "invalid state to continue I/O");
2700 return SCI_FAILURE_INVALID_STATE;
2701 }
2702
2703 set_bit(IREQ_ACTIVE, &ireq->flags);
2704 sci_controller_post_request(ihost, ireq->post_context);
2705 return SCI_SUCCESS;
2706}
2707
2708/**
2709 * sci_controller_start_task() - This method is called by the SCIC user to
2710 * send/start a framework task management request.
2711 * @controller: the handle to the controller object for which to start the task
2712 * management request.
2713 * @remote_device: the handle to the remote device object for which to start
2714 * the task management request.
2715 * @task_request: the handle to the task request object to start.
2716 */
2717enum sci_task_status sci_controller_start_task(struct isci_host *ihost,
2718 struct isci_remote_device *idev,
2719 struct isci_request *ireq)
2720{
2721 enum sci_status status;
2722
2723 if (ihost->sm.current_state_id != SCIC_READY) {
2724 dev_warn(&ihost->pdev->dev,
2725 "%s: SCIC Controller starting task from invalid "
2726 "state\n",
2727 __func__);
2728 return SCI_TASK_FAILURE_INVALID_STATE;
2729 }
2730
2731 status = sci_remote_device_start_task(ihost, idev, ireq);
2732 switch (status) {
2733 case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS:
2734 set_bit(IREQ_ACTIVE, &ireq->flags);
2735
2736 /*
2737 * We will let framework know this task request started successfully,
2738 * although core is still woring on starting the request (to post tc when
2739 * RNC is resumed.)
2740 */
2741 return SCI_SUCCESS;
2742 case SCI_SUCCESS:
2743 set_bit(IREQ_ACTIVE, &ireq->flags);
2744 sci_controller_post_request(ihost, ireq->post_context);
2745 break;
2746 default:
2747 break;
2748 }
2749
2750 return status;
2751}
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
new file mode 100644
index 000000000000..062101a39f79
--- /dev/null
+++ b/drivers/scsi/isci/host.h
@@ -0,0 +1,542 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55#ifndef _SCI_HOST_H_
56#define _SCI_HOST_H_
57
58#include "remote_device.h"
59#include "phy.h"
60#include "isci.h"
61#include "remote_node_table.h"
62#include "registers.h"
63#include "unsolicited_frame_control.h"
64#include "probe_roms.h"
65
66struct isci_request;
67struct scu_task_context;
68
69
70/**
71 * struct sci_power_control -
72 *
73 * This structure defines the fields for managing power control for direct
74 * attached disk devices.
75 */
76struct sci_power_control {
77 /**
78 * This field is set when the power control timer is running and cleared when
79 * it is not.
80 */
81 bool timer_started;
82
83 /**
84 * Timer to control when the directed attached disks can consume power.
85 */
86 struct sci_timer timer;
87
88 /**
89 * This field is used to keep track of how many phys are put into the
90 * requesters field.
91 */
92 u8 phys_waiting;
93
94 /**
95 * This field is used to keep track of how many phys have been granted to consume power
96 */
97 u8 phys_granted_power;
98
99 /**
100 * This field is an array of phys that we are waiting on. The phys are direct
101 * mapped into requesters via struct sci_phy.phy_index
102 */
103 struct isci_phy *requesters[SCI_MAX_PHYS];
104
105};
106
107struct sci_port_configuration_agent;
108typedef void (*port_config_fn)(struct isci_host *,
109 struct sci_port_configuration_agent *,
110 struct isci_port *, struct isci_phy *);
111
112struct sci_port_configuration_agent {
113 u16 phy_configured_mask;
114 u16 phy_ready_mask;
115 struct {
116 u8 min_index;
117 u8 max_index;
118 } phy_valid_port_range[SCI_MAX_PHYS];
119 bool timer_pending;
120 port_config_fn link_up_handler;
121 port_config_fn link_down_handler;
122 struct sci_timer timer;
123};
124
125/**
126 * isci_host - primary host/controller object
127 * @timer: timeout start/stop operations
128 * @device_table: rni (hw remote node index) to remote device lookup table
129 * @available_remote_nodes: rni allocator
130 * @power_control: manage device spin up
131 * @io_request_sequence: generation number for tci's (task contexts)
132 * @task_context_table: hw task context table
133 * @remote_node_context_table: hw remote node context table
134 * @completion_queue: hw-producer driver-consumer communication ring
135 * @completion_queue_get: tracks the driver 'head' of the ring to notify hw
136 * @logical_port_entries: min({driver|silicon}-supported-port-count)
137 * @remote_node_entries: min({driver|silicon}-supported-node-count)
138 * @task_context_entries: min({driver|silicon}-supported-task-count)
139 * @phy_timer: phy startup timer
140 * @invalid_phy_mask: if an invalid_link_up notification is reported a bit for
141 * the phy index is set so further notifications are not
142 * made. Once the phy reports link up and is made part of a
143 * port then this bit is cleared.
144
145 */
146struct isci_host {
147 struct sci_base_state_machine sm;
148 /* XXX can we time this externally */
149 struct sci_timer timer;
150 /* XXX drop reference module params directly */
151 struct sci_user_parameters user_parameters;
152 /* XXX no need to be a union */
153 struct sci_oem_params oem_parameters;
154 struct sci_port_configuration_agent port_agent;
155 struct isci_remote_device *device_table[SCI_MAX_REMOTE_DEVICES];
156 struct sci_remote_node_table available_remote_nodes;
157 struct sci_power_control power_control;
158 u8 io_request_sequence[SCI_MAX_IO_REQUESTS];
159 struct scu_task_context *task_context_table;
160 dma_addr_t task_context_dma;
161 union scu_remote_node_context *remote_node_context_table;
162 u32 *completion_queue;
163 u32 completion_queue_get;
164 u32 logical_port_entries;
165 u32 remote_node_entries;
166 u32 task_context_entries;
167 struct sci_unsolicited_frame_control uf_control;
168
169 /* phy startup */
170 struct sci_timer phy_timer;
171 /* XXX kill */
172 bool phy_startup_timer_pending;
173 u32 next_phy_to_start;
174 /* XXX convert to unsigned long and use bitops */
175 u8 invalid_phy_mask;
176
177 /* TODO attempt dynamic interrupt coalescing scheme */
178 u16 interrupt_coalesce_number;
179 u32 interrupt_coalesce_timeout;
180 struct smu_registers __iomem *smu_registers;
181 struct scu_registers __iomem *scu_registers;
182
183 u16 tci_head;
184 u16 tci_tail;
185 u16 tci_pool[SCI_MAX_IO_REQUESTS];
186
187 int id; /* unique within a given pci device */
188 struct isci_phy phys[SCI_MAX_PHYS];
189 struct isci_port ports[SCI_MAX_PORTS + 1]; /* includes dummy port */
190 struct sas_ha_struct sas_ha;
191
192 spinlock_t state_lock;
193 struct pci_dev *pdev;
194 enum isci_status status;
195 #define IHOST_START_PENDING 0
196 #define IHOST_STOP_PENDING 1
197 unsigned long flags;
198 wait_queue_head_t eventq;
199 struct Scsi_Host *shost;
200 struct tasklet_struct completion_tasklet;
201 struct list_head requests_to_complete;
202 struct list_head requests_to_errorback;
203 spinlock_t scic_lock;
204 struct isci_request *reqs[SCI_MAX_IO_REQUESTS];
205 struct isci_remote_device devices[SCI_MAX_REMOTE_DEVICES];
206};
207
208/**
209 * enum sci_controller_states - This enumeration depicts all the states
210 * for the common controller state machine.
211 */
212enum sci_controller_states {
213 /**
214 * Simply the initial state for the base controller state machine.
215 */
216 SCIC_INITIAL = 0,
217
218 /**
219 * This state indicates that the controller is reset. The memory for
220 * the controller is in it's initial state, but the controller requires
221 * initialization.
222 * This state is entered from the INITIAL state.
223 * This state is entered from the RESETTING state.
224 */
225 SCIC_RESET,
226
227 /**
228 * This state is typically an action state that indicates the controller
229 * is in the process of initialization. In this state no new IO operations
230 * are permitted.
231 * This state is entered from the RESET state.
232 */
233 SCIC_INITIALIZING,
234
235 /**
236 * This state indicates that the controller has been successfully
237 * initialized. In this state no new IO operations are permitted.
238 * This state is entered from the INITIALIZING state.
239 */
240 SCIC_INITIALIZED,
241
242 /**
243 * This state indicates the the controller is in the process of becoming
244 * ready (i.e. starting). In this state no new IO operations are permitted.
245 * This state is entered from the INITIALIZED state.
246 */
247 SCIC_STARTING,
248
249 /**
250 * This state indicates the controller is now ready. Thus, the user
251 * is able to perform IO operations on the controller.
252 * This state is entered from the STARTING state.
253 */
254 SCIC_READY,
255
256 /**
257 * This state is typically an action state that indicates the controller
258 * is in the process of resetting. Thus, the user is unable to perform
259 * IO operations on the controller. A reset is considered destructive in
260 * most cases.
261 * This state is entered from the READY state.
262 * This state is entered from the FAILED state.
263 * This state is entered from the STOPPED state.
264 */
265 SCIC_RESETTING,
266
267 /**
268 * This state indicates that the controller is in the process of stopping.
269 * In this state no new IO operations are permitted, but existing IO
270 * operations are allowed to complete.
271 * This state is entered from the READY state.
272 */
273 SCIC_STOPPING,
274
275 /**
276 * This state indicates that the controller has successfully been stopped.
277 * In this state no new IO operations are permitted.
278 * This state is entered from the STOPPING state.
279 */
280 SCIC_STOPPED,
281
282 /**
283 * This state indicates that the controller could not successfully be
284 * initialized. In this state no new IO operations are permitted.
285 * This state is entered from the INITIALIZING state.
286 * This state is entered from the STARTING state.
287 * This state is entered from the STOPPING state.
288 * This state is entered from the RESETTING state.
289 */
290 SCIC_FAILED,
291};
292
293/**
294 * struct isci_pci_info - This class represents the pci function containing the
295 * controllers. Depending on PCI SKU, there could be up to 2 controllers in
296 * the PCI function.
297 */
298#define SCI_MAX_MSIX_INT (SCI_NUM_MSI_X_INT*SCI_MAX_CONTROLLERS)
299
300struct isci_pci_info {
301 struct msix_entry msix_entries[SCI_MAX_MSIX_INT];
302 struct isci_host *hosts[SCI_MAX_CONTROLLERS];
303 struct isci_orom *orom;
304};
305
306static inline struct isci_pci_info *to_pci_info(struct pci_dev *pdev)
307{
308 return pci_get_drvdata(pdev);
309}
310
311#define for_each_isci_host(id, ihost, pdev) \
312 for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \
313 id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \
314 ihost = to_pci_info(pdev)->hosts[++id])
315
316static inline enum isci_status isci_host_get_state(struct isci_host *isci_host)
317{
318 return isci_host->status;
319}
320
321static inline void isci_host_change_state(struct isci_host *isci_host,
322 enum isci_status status)
323{
324 unsigned long flags;
325
326 dev_dbg(&isci_host->pdev->dev,
327 "%s: isci_host = %p, state = 0x%x",
328 __func__,
329 isci_host,
330 status);
331 spin_lock_irqsave(&isci_host->state_lock, flags);
332 isci_host->status = status;
333 spin_unlock_irqrestore(&isci_host->state_lock, flags);
334
335}
336
337static inline void wait_for_start(struct isci_host *ihost)
338{
339 wait_event(ihost->eventq, !test_bit(IHOST_START_PENDING, &ihost->flags));
340}
341
342static inline void wait_for_stop(struct isci_host *ihost)
343{
344 wait_event(ihost->eventq, !test_bit(IHOST_STOP_PENDING, &ihost->flags));
345}
346
347static inline void wait_for_device_start(struct isci_host *ihost, struct isci_remote_device *idev)
348{
349 wait_event(ihost->eventq, !test_bit(IDEV_START_PENDING, &idev->flags));
350}
351
352static inline void wait_for_device_stop(struct isci_host *ihost, struct isci_remote_device *idev)
353{
354 wait_event(ihost->eventq, !test_bit(IDEV_STOP_PENDING, &idev->flags));
355}
356
357static inline struct isci_host *dev_to_ihost(struct domain_device *dev)
358{
359 return dev->port->ha->lldd_ha;
360}
361
362/* we always use protocol engine group zero */
363#define ISCI_PEG 0
364
365/* see sci_controller_io_tag_allocate|free for how seq and tci are built */
366#define ISCI_TAG(seq, tci) (((u16) (seq)) << 12 | tci)
367
368/* these are returned by the hardware, so sanitize them */
369#define ISCI_TAG_SEQ(tag) (((tag) >> 12) & (SCI_MAX_SEQ-1))
370#define ISCI_TAG_TCI(tag) ((tag) & (SCI_MAX_IO_REQUESTS-1))
371
372/* expander attached sata devices require 3 rnc slots */
373static inline int sci_remote_device_node_count(struct isci_remote_device *idev)
374{
375 struct domain_device *dev = idev->domain_dev;
376
377 if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) &&
378 !idev->is_direct_attached)
379 return SCU_STP_REMOTE_NODE_COUNT;
380 return SCU_SSP_REMOTE_NODE_COUNT;
381}
382
383/**
384 * sci_controller_clear_invalid_phy() -
385 *
386 * This macro will clear the bit in the invalid phy mask for this controller
387 * object. This is used to control messages reported for invalid link up
388 * notifications.
389 */
390#define sci_controller_clear_invalid_phy(controller, phy) \
391 ((controller)->invalid_phy_mask &= ~(1 << (phy)->phy_index))
392
393static inline struct device *sciphy_to_dev(struct isci_phy *iphy)
394{
395
396 if (!iphy || !iphy->isci_port || !iphy->isci_port->isci_host)
397 return NULL;
398
399 return &iphy->isci_port->isci_host->pdev->dev;
400}
401
402static inline struct device *sciport_to_dev(struct isci_port *iport)
403{
404
405 if (!iport || !iport->isci_host)
406 return NULL;
407
408 return &iport->isci_host->pdev->dev;
409}
410
411static inline struct device *scirdev_to_dev(struct isci_remote_device *idev)
412{
413 if (!idev || !idev->isci_port || !idev->isci_port->isci_host)
414 return NULL;
415
416 return &idev->isci_port->isci_host->pdev->dev;
417}
418
419static inline bool is_a2(struct pci_dev *pdev)
420{
421 if (pdev->revision < 4)
422 return true;
423 return false;
424}
425
426static inline bool is_b0(struct pci_dev *pdev)
427{
428 if (pdev->revision == 4)
429 return true;
430 return false;
431}
432
433static inline bool is_c0(struct pci_dev *pdev)
434{
435 if (pdev->revision >= 5)
436 return true;
437 return false;
438}
439
440void sci_controller_post_request(struct isci_host *ihost,
441 u32 request);
442void sci_controller_release_frame(struct isci_host *ihost,
443 u32 frame_index);
444void sci_controller_copy_sata_response(void *response_buffer,
445 void *frame_header,
446 void *frame_buffer);
447enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost,
448 struct isci_remote_device *idev,
449 u16 *node_id);
450void sci_controller_free_remote_node_context(
451 struct isci_host *ihost,
452 struct isci_remote_device *idev,
453 u16 node_id);
454
455struct isci_request *sci_request_by_tag(struct isci_host *ihost,
456 u16 io_tag);
457
458void sci_controller_power_control_queue_insert(
459 struct isci_host *ihost,
460 struct isci_phy *iphy);
461
462void sci_controller_power_control_queue_remove(
463 struct isci_host *ihost,
464 struct isci_phy *iphy);
465
466void sci_controller_link_up(
467 struct isci_host *ihost,
468 struct isci_port *iport,
469 struct isci_phy *iphy);
470
471void sci_controller_link_down(
472 struct isci_host *ihost,
473 struct isci_port *iport,
474 struct isci_phy *iphy);
475
476void sci_controller_remote_device_stopped(
477 struct isci_host *ihost,
478 struct isci_remote_device *idev);
479
480void sci_controller_copy_task_context(
481 struct isci_host *ihost,
482 struct isci_request *ireq);
483
484void sci_controller_register_setup(struct isci_host *ihost);
485
486enum sci_status sci_controller_continue_io(struct isci_request *ireq);
487int isci_host_scan_finished(struct Scsi_Host *, unsigned long);
488void isci_host_scan_start(struct Scsi_Host *);
489u16 isci_alloc_tag(struct isci_host *ihost);
490enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag);
491void isci_tci_free(struct isci_host *ihost, u16 tci);
492
493int isci_host_init(struct isci_host *);
494
495void isci_host_init_controller_names(
496 struct isci_host *isci_host,
497 unsigned int controller_idx);
498
499void isci_host_deinit(
500 struct isci_host *);
501
502void isci_host_port_link_up(
503 struct isci_host *,
504 struct isci_port *,
505 struct isci_phy *);
506int isci_host_dev_found(struct domain_device *);
507
508void isci_host_remote_device_start_complete(
509 struct isci_host *,
510 struct isci_remote_device *,
511 enum sci_status);
512
513void sci_controller_disable_interrupts(
514 struct isci_host *ihost);
515
516enum sci_status sci_controller_start_io(
517 struct isci_host *ihost,
518 struct isci_remote_device *idev,
519 struct isci_request *ireq);
520
521enum sci_task_status sci_controller_start_task(
522 struct isci_host *ihost,
523 struct isci_remote_device *idev,
524 struct isci_request *ireq);
525
526enum sci_status sci_controller_terminate_request(
527 struct isci_host *ihost,
528 struct isci_remote_device *idev,
529 struct isci_request *ireq);
530
531enum sci_status sci_controller_complete_io(
532 struct isci_host *ihost,
533 struct isci_remote_device *idev,
534 struct isci_request *ireq);
535
536void sci_port_configuration_agent_construct(
537 struct sci_port_configuration_agent *port_agent);
538
539enum sci_status sci_port_configuration_agent_initialize(
540 struct isci_host *ihost,
541 struct sci_port_configuration_agent *port_agent);
542#endif
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
new file mode 100644
index 000000000000..61e0d09e2b57
--- /dev/null
+++ b/drivers/scsi/isci/init.c
@@ -0,0 +1,565 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#include <linux/kernel.h>
57#include <linux/init.h>
58#include <linux/module.h>
59#include <linux/firmware.h>
60#include <linux/efi.h>
61#include <asm/string.h>
62#include "isci.h"
63#include "task.h"
64#include "probe_roms.h"
65
66static struct scsi_transport_template *isci_transport_template;
67
68static DEFINE_PCI_DEVICE_TABLE(isci_id_table) = {
69 { PCI_VDEVICE(INTEL, 0x1D61),},
70 { PCI_VDEVICE(INTEL, 0x1D63),},
71 { PCI_VDEVICE(INTEL, 0x1D65),},
72 { PCI_VDEVICE(INTEL, 0x1D67),},
73 { PCI_VDEVICE(INTEL, 0x1D69),},
74 { PCI_VDEVICE(INTEL, 0x1D6B),},
75 { PCI_VDEVICE(INTEL, 0x1D60),},
76 { PCI_VDEVICE(INTEL, 0x1D62),},
77 { PCI_VDEVICE(INTEL, 0x1D64),},
78 { PCI_VDEVICE(INTEL, 0x1D66),},
79 { PCI_VDEVICE(INTEL, 0x1D68),},
80 { PCI_VDEVICE(INTEL, 0x1D6A),},
81 {}
82};
83
84MODULE_DEVICE_TABLE(pci, isci_id_table);
85
86/* linux isci specific settings */
87
88unsigned char no_outbound_task_to = 20;
89module_param(no_outbound_task_to, byte, 0);
90MODULE_PARM_DESC(no_outbound_task_to, "No Outbound Task Timeout (1us incr)");
91
92u16 ssp_max_occ_to = 20;
93module_param(ssp_max_occ_to, ushort, 0);
94MODULE_PARM_DESC(ssp_max_occ_to, "SSP Max occupancy timeout (100us incr)");
95
96u16 stp_max_occ_to = 5;
97module_param(stp_max_occ_to, ushort, 0);
98MODULE_PARM_DESC(stp_max_occ_to, "STP Max occupancy timeout (100us incr)");
99
100u16 ssp_inactive_to = 5;
101module_param(ssp_inactive_to, ushort, 0);
102MODULE_PARM_DESC(ssp_inactive_to, "SSP inactivity timeout (100us incr)");
103
104u16 stp_inactive_to = 5;
105module_param(stp_inactive_to, ushort, 0);
106MODULE_PARM_DESC(stp_inactive_to, "STP inactivity timeout (100us incr)");
107
108unsigned char phy_gen = 3;
109module_param(phy_gen, byte, 0);
110MODULE_PARM_DESC(phy_gen, "PHY generation (1: 1.5Gbps 2: 3.0Gbps 3: 6.0Gbps)");
111
112unsigned char max_concurr_spinup = 1;
113module_param(max_concurr_spinup, byte, 0);
114MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup");
115
116static struct scsi_host_template isci_sht = {
117
118 .module = THIS_MODULE,
119 .name = DRV_NAME,
120 .proc_name = DRV_NAME,
121 .queuecommand = sas_queuecommand,
122 .target_alloc = sas_target_alloc,
123 .slave_configure = sas_slave_configure,
124 .slave_destroy = sas_slave_destroy,
125 .scan_finished = isci_host_scan_finished,
126 .scan_start = isci_host_scan_start,
127 .change_queue_depth = sas_change_queue_depth,
128 .change_queue_type = sas_change_queue_type,
129 .bios_param = sas_bios_param,
130 .can_queue = ISCI_CAN_QUEUE_VAL,
131 .cmd_per_lun = 1,
132 .this_id = -1,
133 .sg_tablesize = SG_ALL,
134 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
135 .use_clustering = ENABLE_CLUSTERING,
136 .eh_device_reset_handler = sas_eh_device_reset_handler,
137 .eh_bus_reset_handler = isci_bus_reset_handler,
138 .slave_alloc = sas_slave_alloc,
139 .target_destroy = sas_target_destroy,
140 .ioctl = sas_ioctl,
141};
142
143static struct sas_domain_function_template isci_transport_ops = {
144
145 /* The class calls these to notify the LLDD of an event. */
146 .lldd_port_formed = isci_port_formed,
147 .lldd_port_deformed = isci_port_deformed,
148
149 /* The class calls these when a device is found or gone. */
150 .lldd_dev_found = isci_remote_device_found,
151 .lldd_dev_gone = isci_remote_device_gone,
152
153 .lldd_execute_task = isci_task_execute_task,
154 /* Task Management Functions. Must be called from process context. */
155 .lldd_abort_task = isci_task_abort_task,
156 .lldd_abort_task_set = isci_task_abort_task_set,
157 .lldd_clear_aca = isci_task_clear_aca,
158 .lldd_clear_task_set = isci_task_clear_task_set,
159 .lldd_I_T_nexus_reset = isci_task_I_T_nexus_reset,
160 .lldd_lu_reset = isci_task_lu_reset,
161 .lldd_query_task = isci_task_query_task,
162
163 /* Port and Adapter management */
164 .lldd_clear_nexus_port = isci_task_clear_nexus_port,
165 .lldd_clear_nexus_ha = isci_task_clear_nexus_ha,
166
167 /* Phy management */
168 .lldd_control_phy = isci_phy_control,
169};
170
171
172/******************************************************************************
173* P R O T E C T E D M E T H O D S
174******************************************************************************/
175
176
177
178/**
179 * isci_register_sas_ha() - This method initializes various lldd
180 * specific members of the sas_ha struct and calls the libsas
181 * sas_register_ha() function.
182 * @isci_host: This parameter specifies the lldd specific wrapper for the
183 * libsas sas_ha struct.
184 *
185 * This method returns an error code indicating sucess or failure. The user
186 * should check for possible memory allocation error return otherwise, a zero
187 * indicates success.
188 */
189static int isci_register_sas_ha(struct isci_host *isci_host)
190{
191 int i;
192 struct sas_ha_struct *sas_ha = &(isci_host->sas_ha);
193 struct asd_sas_phy **sas_phys;
194 struct asd_sas_port **sas_ports;
195
196 sas_phys = devm_kzalloc(&isci_host->pdev->dev,
197 SCI_MAX_PHYS * sizeof(void *),
198 GFP_KERNEL);
199 if (!sas_phys)
200 return -ENOMEM;
201
202 sas_ports = devm_kzalloc(&isci_host->pdev->dev,
203 SCI_MAX_PORTS * sizeof(void *),
204 GFP_KERNEL);
205 if (!sas_ports)
206 return -ENOMEM;
207
208 /*----------------- Libsas Initialization Stuff----------------------
209 * Set various fields in the sas_ha struct:
210 */
211
212 sas_ha->sas_ha_name = DRV_NAME;
213 sas_ha->lldd_module = THIS_MODULE;
214 sas_ha->sas_addr = &isci_host->phys[0].sas_addr[0];
215
216 /* set the array of phy and port structs. */
217 for (i = 0; i < SCI_MAX_PHYS; i++) {
218 sas_phys[i] = &isci_host->phys[i].sas_phy;
219 sas_ports[i] = &isci_host->ports[i].sas_port;
220 }
221
222 sas_ha->sas_phy = sas_phys;
223 sas_ha->sas_port = sas_ports;
224 sas_ha->num_phys = SCI_MAX_PHYS;
225
226 sas_ha->lldd_queue_size = ISCI_CAN_QUEUE_VAL;
227 sas_ha->lldd_max_execute_num = 1;
228 sas_ha->strict_wide_ports = 1;
229
230 sas_register_ha(sas_ha);
231
232 return 0;
233}
234
235static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf)
236{
237 struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev);
238 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
239 struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha);
240
241 return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id);
242}
243
244static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL);
245
246static void isci_unregister(struct isci_host *isci_host)
247{
248 struct Scsi_Host *shost;
249
250 if (!isci_host)
251 return;
252
253 shost = isci_host->shost;
254 device_remove_file(&shost->shost_dev, &dev_attr_isci_id);
255
256 sas_unregister_ha(&isci_host->sas_ha);
257
258 sas_remove_host(isci_host->shost);
259 scsi_remove_host(isci_host->shost);
260 scsi_host_put(isci_host->shost);
261}
262
263static int __devinit isci_pci_init(struct pci_dev *pdev)
264{
265 int err, bar_num, bar_mask = 0;
266 void __iomem * const *iomap;
267
268 err = pcim_enable_device(pdev);
269 if (err) {
270 dev_err(&pdev->dev,
271 "failed enable PCI device %s!\n",
272 pci_name(pdev));
273 return err;
274 }
275
276 for (bar_num = 0; bar_num < SCI_PCI_BAR_COUNT; bar_num++)
277 bar_mask |= 1 << (bar_num * 2);
278
279 err = pcim_iomap_regions(pdev, bar_mask, DRV_NAME);
280 if (err)
281 return err;
282
283 iomap = pcim_iomap_table(pdev);
284 if (!iomap)
285 return -ENOMEM;
286
287 pci_set_master(pdev);
288
289 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
290 if (err) {
291 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
292 if (err)
293 return err;
294 }
295
296 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
297 if (err) {
298 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
299 if (err)
300 return err;
301 }
302
303 return 0;
304}
305
306static int num_controllers(struct pci_dev *pdev)
307{
308 /* bar size alone can tell us if we are running with a dual controller
309 * part, no need to trust revision ids that might be under broken firmware
310 * control
311 */
312 resource_size_t scu_bar_size = pci_resource_len(pdev, SCI_SCU_BAR*2);
313 resource_size_t smu_bar_size = pci_resource_len(pdev, SCI_SMU_BAR*2);
314
315 if (scu_bar_size >= SCI_SCU_BAR_SIZE*SCI_MAX_CONTROLLERS &&
316 smu_bar_size >= SCI_SMU_BAR_SIZE*SCI_MAX_CONTROLLERS)
317 return SCI_MAX_CONTROLLERS;
318 else
319 return 1;
320}
321
322static int isci_setup_interrupts(struct pci_dev *pdev)
323{
324 int err, i, num_msix;
325 struct isci_host *ihost;
326 struct isci_pci_info *pci_info = to_pci_info(pdev);
327
328 /*
329 * Determine the number of vectors associated with this
330 * PCI function.
331 */
332 num_msix = num_controllers(pdev) * SCI_NUM_MSI_X_INT;
333
334 for (i = 0; i < num_msix; i++)
335 pci_info->msix_entries[i].entry = i;
336
337 err = pci_enable_msix(pdev, pci_info->msix_entries, num_msix);
338 if (err)
339 goto intx;
340
341 for (i = 0; i < num_msix; i++) {
342 int id = i / SCI_NUM_MSI_X_INT;
343 struct msix_entry *msix = &pci_info->msix_entries[i];
344 irq_handler_t isr;
345
346 ihost = pci_info->hosts[id];
347 /* odd numbered vectors are error interrupts */
348 if (i & 1)
349 isr = isci_error_isr;
350 else
351 isr = isci_msix_isr;
352
353 err = devm_request_irq(&pdev->dev, msix->vector, isr, 0,
354 DRV_NAME"-msix", ihost);
355 if (!err)
356 continue;
357
358 dev_info(&pdev->dev, "msix setup failed falling back to intx\n");
359 while (i--) {
360 id = i / SCI_NUM_MSI_X_INT;
361 ihost = pci_info->hosts[id];
362 msix = &pci_info->msix_entries[i];
363 devm_free_irq(&pdev->dev, msix->vector, ihost);
364 }
365 pci_disable_msix(pdev);
366 goto intx;
367 }
368 return 0;
369
370 intx:
371 for_each_isci_host(i, ihost, pdev) {
372 err = devm_request_irq(&pdev->dev, pdev->irq, isci_intx_isr,
373 IRQF_SHARED, DRV_NAME"-intx", ihost);
374 if (err)
375 break;
376 }
377 return err;
378}
379
380static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
381{
382 struct isci_host *isci_host;
383 struct Scsi_Host *shost;
384 int err;
385
386 isci_host = devm_kzalloc(&pdev->dev, sizeof(*isci_host), GFP_KERNEL);
387 if (!isci_host)
388 return NULL;
389
390 isci_host->pdev = pdev;
391 isci_host->id = id;
392
393 shost = scsi_host_alloc(&isci_sht, sizeof(void *));
394 if (!shost)
395 return NULL;
396 isci_host->shost = shost;
397
398 err = isci_host_init(isci_host);
399 if (err)
400 goto err_shost;
401
402 SHOST_TO_SAS_HA(shost) = &isci_host->sas_ha;
403 isci_host->sas_ha.core.shost = shost;
404 shost->transportt = isci_transport_template;
405
406 shost->max_id = ~0;
407 shost->max_lun = ~0;
408 shost->max_cmd_len = MAX_COMMAND_SIZE;
409
410 err = scsi_add_host(shost, &pdev->dev);
411 if (err)
412 goto err_shost;
413
414 err = isci_register_sas_ha(isci_host);
415 if (err)
416 goto err_shost_remove;
417
418 err = device_create_file(&shost->shost_dev, &dev_attr_isci_id);
419 if (err)
420 goto err_unregister_ha;
421
422 return isci_host;
423
424 err_unregister_ha:
425 sas_unregister_ha(&(isci_host->sas_ha));
426 err_shost_remove:
427 scsi_remove_host(shost);
428 err_shost:
429 scsi_host_put(shost);
430
431 return NULL;
432}
433
434static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
435{
436 struct isci_pci_info *pci_info;
437 int err, i;
438 struct isci_host *isci_host;
439 const struct firmware *fw = NULL;
440 struct isci_orom *orom = NULL;
441 char *source = "(platform)";
442
443 dev_info(&pdev->dev, "driver configured for rev: %d silicon\n",
444 pdev->revision);
445
446 pci_info = devm_kzalloc(&pdev->dev, sizeof(*pci_info), GFP_KERNEL);
447 if (!pci_info)
448 return -ENOMEM;
449 pci_set_drvdata(pdev, pci_info);
450
451 if (efi_enabled)
452 orom = isci_get_efi_var(pdev);
453
454 if (!orom)
455 orom = isci_request_oprom(pdev);
456
457 for (i = 0; orom && i < ARRAY_SIZE(orom->ctrl); i++) {
458 if (sci_oem_parameters_validate(&orom->ctrl[i])) {
459 dev_warn(&pdev->dev,
460 "[%d]: invalid oem parameters detected, falling back to firmware\n", i);
461 devm_kfree(&pdev->dev, orom);
462 orom = NULL;
463 break;
464 }
465 }
466
467 if (!orom) {
468 source = "(firmware)";
469 orom = isci_request_firmware(pdev, fw);
470 if (!orom) {
471 /* TODO convert this to WARN_TAINT_ONCE once the
472 * orom/efi parameter support is widely available
473 */
474 dev_warn(&pdev->dev,
475 "Loading user firmware failed, using default "
476 "values\n");
477 dev_warn(&pdev->dev,
478 "Default OEM configuration being used: 4 "
479 "narrow ports, and default SAS Addresses\n");
480 }
481 }
482
483 if (orom)
484 dev_info(&pdev->dev,
485 "OEM SAS parameters (version: %u.%u) loaded %s\n",
486 (orom->hdr.version & 0xf0) >> 4,
487 (orom->hdr.version & 0xf), source);
488
489 pci_info->orom = orom;
490
491 err = isci_pci_init(pdev);
492 if (err)
493 return err;
494
495 for (i = 0; i < num_controllers(pdev); i++) {
496 struct isci_host *h = isci_host_alloc(pdev, i);
497
498 if (!h) {
499 err = -ENOMEM;
500 goto err_host_alloc;
501 }
502 pci_info->hosts[i] = h;
503 }
504
505 err = isci_setup_interrupts(pdev);
506 if (err)
507 goto err_host_alloc;
508
509 for_each_isci_host(i, isci_host, pdev)
510 scsi_scan_host(isci_host->shost);
511
512 return 0;
513
514 err_host_alloc:
515 for_each_isci_host(i, isci_host, pdev)
516 isci_unregister(isci_host);
517 return err;
518}
519
520static void __devexit isci_pci_remove(struct pci_dev *pdev)
521{
522 struct isci_host *ihost;
523 int i;
524
525 for_each_isci_host(i, ihost, pdev) {
526 isci_unregister(ihost);
527 isci_host_deinit(ihost);
528 sci_controller_disable_interrupts(ihost);
529 }
530}
531
532static struct pci_driver isci_pci_driver = {
533 .name = DRV_NAME,
534 .id_table = isci_id_table,
535 .probe = isci_pci_probe,
536 .remove = __devexit_p(isci_pci_remove),
537};
538
539static __init int isci_init(void)
540{
541 int err;
542
543 pr_info("%s: Intel(R) C600 SAS Controller Driver\n", DRV_NAME);
544
545 isci_transport_template = sas_domain_attach_transport(&isci_transport_ops);
546 if (!isci_transport_template)
547 return -ENOMEM;
548
549 err = pci_register_driver(&isci_pci_driver);
550 if (err)
551 sas_release_transport(isci_transport_template);
552
553 return err;
554}
555
556static __exit void isci_exit(void)
557{
558 pci_unregister_driver(&isci_pci_driver);
559 sas_release_transport(isci_transport_template);
560}
561
562MODULE_LICENSE("Dual BSD/GPL");
563MODULE_FIRMWARE(ISCI_FW_NAME);
564module_init(isci_init);
565module_exit(isci_exit);
diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h
new file mode 100644
index 000000000000..d1de63312e7f
--- /dev/null
+++ b/drivers/scsi/isci/isci.h
@@ -0,0 +1,538 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#ifndef __ISCI_H__
57#define __ISCI_H__
58
59#include <linux/interrupt.h>
60#include <linux/types.h>
61
62#define DRV_NAME "isci"
63#define SCI_PCI_BAR_COUNT 2
64#define SCI_NUM_MSI_X_INT 2
65#define SCI_SMU_BAR 0
66#define SCI_SMU_BAR_SIZE (16*1024)
67#define SCI_SCU_BAR 1
68#define SCI_SCU_BAR_SIZE (4*1024*1024)
69#define SCI_IO_SPACE_BAR0 2
70#define SCI_IO_SPACE_BAR1 3
71#define ISCI_CAN_QUEUE_VAL 250 /* < SCI_MAX_IO_REQUESTS ? */
72#define SCIC_CONTROLLER_STOP_TIMEOUT 5000
73
74#define SCI_CONTROLLER_INVALID_IO_TAG 0xFFFF
75
76#define SCI_MAX_PHYS (4UL)
77#define SCI_MAX_PORTS SCI_MAX_PHYS
78#define SCI_MAX_SMP_PHYS (384) /* not silicon constrained */
79#define SCI_MAX_REMOTE_DEVICES (256UL)
80#define SCI_MAX_IO_REQUESTS (256UL)
81#define SCI_MAX_SEQ (16)
82#define SCI_MAX_MSIX_MESSAGES (2)
83#define SCI_MAX_SCATTER_GATHER_ELEMENTS 130 /* not silicon constrained */
84#define SCI_MAX_CONTROLLERS 2
85#define SCI_MAX_DOMAINS SCI_MAX_PORTS
86
87#define SCU_MAX_CRITICAL_NOTIFICATIONS (384)
88#define SCU_MAX_EVENTS_SHIFT (7)
89#define SCU_MAX_EVENTS (1 << SCU_MAX_EVENTS_SHIFT)
90#define SCU_MAX_UNSOLICITED_FRAMES (128)
91#define SCU_MAX_COMPLETION_QUEUE_SCRATCH (128)
92#define SCU_MAX_COMPLETION_QUEUE_ENTRIES (SCU_MAX_CRITICAL_NOTIFICATIONS \
93 + SCU_MAX_EVENTS \
94 + SCU_MAX_UNSOLICITED_FRAMES \
95 + SCI_MAX_IO_REQUESTS \
96 + SCU_MAX_COMPLETION_QUEUE_SCRATCH)
97#define SCU_MAX_COMPLETION_QUEUE_SHIFT (ilog2(SCU_MAX_COMPLETION_QUEUE_ENTRIES))
98
99#define SCU_ABSOLUTE_MAX_UNSOLICITED_FRAMES (4096)
100#define SCU_UNSOLICITED_FRAME_BUFFER_SIZE (1024)
101#define SCU_INVALID_FRAME_INDEX (0xFFFF)
102
103#define SCU_IO_REQUEST_MAX_SGE_SIZE (0x00FFFFFF)
104#define SCU_IO_REQUEST_MAX_TRANSFER_LENGTH (0x00FFFFFF)
105
106static inline void check_sizes(void)
107{
108 BUILD_BUG_ON_NOT_POWER_OF_2(SCU_MAX_EVENTS);
109 BUILD_BUG_ON(SCU_MAX_UNSOLICITED_FRAMES <= 8);
110 BUILD_BUG_ON_NOT_POWER_OF_2(SCU_MAX_UNSOLICITED_FRAMES);
111 BUILD_BUG_ON_NOT_POWER_OF_2(SCU_MAX_COMPLETION_QUEUE_ENTRIES);
112 BUILD_BUG_ON(SCU_MAX_UNSOLICITED_FRAMES > SCU_ABSOLUTE_MAX_UNSOLICITED_FRAMES);
113 BUILD_BUG_ON_NOT_POWER_OF_2(SCI_MAX_IO_REQUESTS);
114 BUILD_BUG_ON_NOT_POWER_OF_2(SCI_MAX_SEQ);
115}
116
117/**
118 * enum sci_status - This is the general return status enumeration for non-IO,
119 * non-task management related SCI interface methods.
120 *
121 *
122 */
123enum sci_status {
124 /**
125 * This member indicates successful completion.
126 */
127 SCI_SUCCESS = 0,
128
129 /**
130 * This value indicates that the calling method completed successfully,
131 * but that the IO may have completed before having it's start method
132 * invoked. This occurs during SAT translation for requests that do
133 * not require an IO to the target or for any other requests that may
134 * be completed without having to submit IO.
135 */
136 SCI_SUCCESS_IO_COMPLETE_BEFORE_START,
137
138 /**
139 * This Value indicates that the SCU hardware returned an early response
140 * because the io request specified more data than is returned by the
141 * target device (mode pages, inquiry data, etc.). The completion routine
142 * will handle this case to get the actual number of bytes transferred.
143 */
144 SCI_SUCCESS_IO_DONE_EARLY,
145
146 /**
147 * This member indicates that the object for which a state change is
148 * being requested is already in said state.
149 */
150 SCI_WARNING_ALREADY_IN_STATE,
151
152 /**
153 * This member indicates interrupt coalescence timer may cause SAS
154 * specification compliance issues (i.e. SMP target mode response
155 * frames must be returned within 1.9 milliseconds).
156 */
157 SCI_WARNING_TIMER_CONFLICT,
158
159 /**
160 * This field indicates a sequence of action is not completed yet. Mostly,
161 * this status is used when multiple ATA commands are needed in a SATI translation.
162 */
163 SCI_WARNING_SEQUENCE_INCOMPLETE,
164
165 /**
166 * This member indicates that there was a general failure.
167 */
168 SCI_FAILURE,
169
170 /**
171 * This member indicates that the SCI implementation is unable to complete
172 * an operation due to a critical flaw the prevents any further operation
173 * (i.e. an invalid pointer).
174 */
175 SCI_FATAL_ERROR,
176
177 /**
178 * This member indicates the calling function failed, because the state
179 * of the controller is in a state that prevents successful completion.
180 */
181 SCI_FAILURE_INVALID_STATE,
182
183 /**
184 * This member indicates the calling function failed, because there is
185 * insufficient resources/memory to complete the request.
186 */
187 SCI_FAILURE_INSUFFICIENT_RESOURCES,
188
189 /**
190 * This member indicates the calling function failed, because the
191 * controller object required for the operation can't be located.
192 */
193 SCI_FAILURE_CONTROLLER_NOT_FOUND,
194
195 /**
196 * This member indicates the calling function failed, because the
197 * discovered controller type is not supported by the library.
198 */
199 SCI_FAILURE_UNSUPPORTED_CONTROLLER_TYPE,
200
201 /**
202 * This member indicates the calling function failed, because the
203 * requested initialization data version isn't supported.
204 */
205 SCI_FAILURE_UNSUPPORTED_INIT_DATA_VERSION,
206
207 /**
208 * This member indicates the calling function failed, because the
209 * requested configuration of SAS Phys into SAS Ports is not supported.
210 */
211 SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION,
212
213 /**
214 * This member indicates the calling function failed, because the
215 * requested protocol is not supported by the remote device, port,
216 * or controller.
217 */
218 SCI_FAILURE_UNSUPPORTED_PROTOCOL,
219
220 /**
221 * This member indicates the calling function failed, because the
222 * requested information type is not supported by the SCI implementation.
223 */
224 SCI_FAILURE_UNSUPPORTED_INFORMATION_TYPE,
225
226 /**
227 * This member indicates the calling function failed, because the
228 * device already exists.
229 */
230 SCI_FAILURE_DEVICE_EXISTS,
231
232 /**
233 * This member indicates the calling function failed, because adding
234 * a phy to the object is not possible.
235 */
236 SCI_FAILURE_ADDING_PHY_UNSUPPORTED,
237
238 /**
239 * This member indicates the calling function failed, because the
240 * requested information type is not supported by the SCI implementation.
241 */
242 SCI_FAILURE_UNSUPPORTED_INFORMATION_FIELD,
243
244 /**
245 * This member indicates the calling function failed, because the SCI
246 * implementation does not support the supplied time limit.
247 */
248 SCI_FAILURE_UNSUPPORTED_TIME_LIMIT,
249
250 /**
251 * This member indicates the calling method failed, because the SCI
252 * implementation does not contain the specified Phy.
253 */
254 SCI_FAILURE_INVALID_PHY,
255
256 /**
257 * This member indicates the calling method failed, because the SCI
258 * implementation does not contain the specified Port.
259 */
260 SCI_FAILURE_INVALID_PORT,
261
262 /**
263 * This member indicates the calling method was partly successful
264 * The port was reset but not all phys in port are operational
265 */
266 SCI_FAILURE_RESET_PORT_PARTIAL_SUCCESS,
267
268 /**
269 * This member indicates that calling method failed
270 * The port reset did not complete because none of the phys are operational
271 */
272 SCI_FAILURE_RESET_PORT_FAILURE,
273
274 /**
275 * This member indicates the calling method failed, because the SCI
276 * implementation does not contain the specified remote device.
277 */
278 SCI_FAILURE_INVALID_REMOTE_DEVICE,
279
280 /**
281 * This member indicates the calling method failed, because the remote
282 * device is in a bad state and requires a reset.
283 */
284 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED,
285
286 /**
287 * This member indicates the calling method failed, because the SCI
288 * implementation does not contain or support the specified IO tag.
289 */
290 SCI_FAILURE_INVALID_IO_TAG,
291
292 /**
293 * This member indicates that the operation failed and the user should
294 * check the response data associated with the IO.
295 */
296 SCI_FAILURE_IO_RESPONSE_VALID,
297
298 /**
299 * This member indicates that the operation failed, the failure is
300 * controller implementation specific, and the response data associated
301 * with the request is not valid. You can query for the controller
302 * specific error information via sci_controller_get_request_status()
303 */
304 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR,
305
306 /**
307 * This member indicated that the operation failed because the
308 * user requested this IO to be terminated.
309 */
310 SCI_FAILURE_IO_TERMINATED,
311
312 /**
313 * This member indicates that the operation failed and the associated
314 * request requires a SCSI abort task to be sent to the target.
315 */
316 SCI_FAILURE_IO_REQUIRES_SCSI_ABORT,
317
318 /**
319 * This member indicates that the operation failed because the supplied
320 * device could not be located.
321 */
322 SCI_FAILURE_DEVICE_NOT_FOUND,
323
324 /**
325 * This member indicates that the operation failed because the
326 * objects association is required and is not correctly set.
327 */
328 SCI_FAILURE_INVALID_ASSOCIATION,
329
330 /**
331 * This member indicates that the operation failed, because a timeout
332 * occurred.
333 */
334 SCI_FAILURE_TIMEOUT,
335
336 /**
337 * This member indicates that the operation failed, because the user
338 * specified a value that is either invalid or not supported.
339 */
340 SCI_FAILURE_INVALID_PARAMETER_VALUE,
341
342 /**
343 * This value indicates that the operation failed, because the number
344 * of messages (MSI-X) is not supported.
345 */
346 SCI_FAILURE_UNSUPPORTED_MESSAGE_COUNT,
347
348 /**
349 * This value indicates that the method failed due to a lack of
350 * available NCQ tags.
351 */
352 SCI_FAILURE_NO_NCQ_TAG_AVAILABLE,
353
354 /**
355 * This value indicates that a protocol violation has occurred on the
356 * link.
357 */
358 SCI_FAILURE_PROTOCOL_VIOLATION,
359
360 /**
361 * This value indicates a failure condition that retry may help to clear.
362 */
363 SCI_FAILURE_RETRY_REQUIRED,
364
365 /**
366 * This field indicates the retry limit was reached when a retry is attempted
367 */
368 SCI_FAILURE_RETRY_LIMIT_REACHED,
369
370 /**
371 * This member indicates the calling method was partly successful.
372 * Mostly, this status is used when a LUN_RESET issued to an expander attached
373 * STP device in READY NCQ substate needs to have RNC suspended/resumed
374 * before posting TC.
375 */
376 SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS,
377
378 /**
379 * This field indicates an illegal phy connection based on the routing attribute
380 * of both expander phy attached to each other.
381 */
382 SCI_FAILURE_ILLEGAL_ROUTING_ATTRIBUTE_CONFIGURATION,
383
384 /**
385 * This field indicates a CONFIG ROUTE INFO command has a response with function result
386 * INDEX DOES NOT EXIST, usually means exceeding max route index.
387 */
388 SCI_FAILURE_EXCEED_MAX_ROUTE_INDEX,
389
390 /**
391 * This value indicates that an unsupported PCI device ID has been
392 * specified. This indicates that attempts to invoke
393 * sci_library_allocate_controller() will fail.
394 */
395 SCI_FAILURE_UNSUPPORTED_PCI_DEVICE_ID
396
397};
398
399/**
400 * enum sci_io_status - This enumeration depicts all of the possible IO
401 * completion status values. Each value in this enumeration maps directly
402 * to a value in the enum sci_status enumeration. Please refer to that
403 * enumeration for detailed comments concerning what the status represents.
404 *
405 * Add the API to retrieve the SCU status from the core. Check to see that the
406 * following status are properly handled: - SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL
407 * - SCI_IO_FAILURE_INVALID_IO_TAG
408 */
409enum sci_io_status {
410 SCI_IO_SUCCESS = SCI_SUCCESS,
411 SCI_IO_FAILURE = SCI_FAILURE,
412 SCI_IO_SUCCESS_COMPLETE_BEFORE_START = SCI_SUCCESS_IO_COMPLETE_BEFORE_START,
413 SCI_IO_SUCCESS_IO_DONE_EARLY = SCI_SUCCESS_IO_DONE_EARLY,
414 SCI_IO_FAILURE_INVALID_STATE = SCI_FAILURE_INVALID_STATE,
415 SCI_IO_FAILURE_INSUFFICIENT_RESOURCES = SCI_FAILURE_INSUFFICIENT_RESOURCES,
416 SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL = SCI_FAILURE_UNSUPPORTED_PROTOCOL,
417 SCI_IO_FAILURE_RESPONSE_VALID = SCI_FAILURE_IO_RESPONSE_VALID,
418 SCI_IO_FAILURE_CONTROLLER_SPECIFIC_ERR = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR,
419 SCI_IO_FAILURE_TERMINATED = SCI_FAILURE_IO_TERMINATED,
420 SCI_IO_FAILURE_REQUIRES_SCSI_ABORT = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT,
421 SCI_IO_FAILURE_INVALID_PARAMETER_VALUE = SCI_FAILURE_INVALID_PARAMETER_VALUE,
422 SCI_IO_FAILURE_NO_NCQ_TAG_AVAILABLE = SCI_FAILURE_NO_NCQ_TAG_AVAILABLE,
423 SCI_IO_FAILURE_PROTOCOL_VIOLATION = SCI_FAILURE_PROTOCOL_VIOLATION,
424
425 SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED,
426
427 SCI_IO_FAILURE_RETRY_REQUIRED = SCI_FAILURE_RETRY_REQUIRED,
428 SCI_IO_FAILURE_RETRY_LIMIT_REACHED = SCI_FAILURE_RETRY_LIMIT_REACHED,
429 SCI_IO_FAILURE_INVALID_REMOTE_DEVICE = SCI_FAILURE_INVALID_REMOTE_DEVICE
430};
431
432/**
433 * enum sci_task_status - This enumeration depicts all of the possible task
434 * completion status values. Each value in this enumeration maps directly
435 * to a value in the enum sci_status enumeration. Please refer to that
436 * enumeration for detailed comments concerning what the status represents.
437 *
438 * Check to see that the following status are properly handled:
439 */
440enum sci_task_status {
441 SCI_TASK_SUCCESS = SCI_SUCCESS,
442 SCI_TASK_FAILURE = SCI_FAILURE,
443 SCI_TASK_FAILURE_INVALID_STATE = SCI_FAILURE_INVALID_STATE,
444 SCI_TASK_FAILURE_INSUFFICIENT_RESOURCES = SCI_FAILURE_INSUFFICIENT_RESOURCES,
445 SCI_TASK_FAILURE_UNSUPPORTED_PROTOCOL = SCI_FAILURE_UNSUPPORTED_PROTOCOL,
446 SCI_TASK_FAILURE_INVALID_TAG = SCI_FAILURE_INVALID_IO_TAG,
447 SCI_TASK_FAILURE_RESPONSE_VALID = SCI_FAILURE_IO_RESPONSE_VALID,
448 SCI_TASK_FAILURE_CONTROLLER_SPECIFIC_ERR = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR,
449 SCI_TASK_FAILURE_TERMINATED = SCI_FAILURE_IO_TERMINATED,
450 SCI_TASK_FAILURE_INVALID_PARAMETER_VALUE = SCI_FAILURE_INVALID_PARAMETER_VALUE,
451
452 SCI_TASK_FAILURE_REMOTE_DEVICE_RESET_REQUIRED = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED,
453 SCI_TASK_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS = SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS
454
455};
456
457/**
458 * sci_swab32_cpy - convert between scsi and scu-hardware byte format
459 * @dest: receive the 4-byte endian swapped version of src
460 * @src: word aligned source buffer
461 *
462 * scu hardware handles SSP/SMP control, response, and unidentified
463 * frames in "big endian dword" order. Regardless of host endian this
464 * is always a swab32()-per-dword conversion of the standard definition,
465 * i.e. single byte fields swapped and multi-byte fields in little-
466 * endian
467 */
468static inline void sci_swab32_cpy(void *_dest, void *_src, ssize_t word_cnt)
469{
470 u32 *dest = _dest, *src = _src;
471
472 while (--word_cnt >= 0)
473 dest[word_cnt] = swab32(src[word_cnt]);
474}
475
476extern unsigned char no_outbound_task_to;
477extern u16 ssp_max_occ_to;
478extern u16 stp_max_occ_to;
479extern u16 ssp_inactive_to;
480extern u16 stp_inactive_to;
481extern unsigned char phy_gen;
482extern unsigned char max_concurr_spinup;
483
484irqreturn_t isci_msix_isr(int vec, void *data);
485irqreturn_t isci_intx_isr(int vec, void *data);
486irqreturn_t isci_error_isr(int vec, void *data);
487
488/*
489 * Each timer is associated with a cancellation flag that is set when
490 * del_timer() is called and checked in the timer callback function. This
491 * is needed since del_timer_sync() cannot be called with sci_lock held.
492 * For deinit however, del_timer_sync() is used without holding the lock.
493 */
494struct sci_timer {
495 struct timer_list timer;
496 bool cancel;
497};
498
499static inline
500void sci_init_timer(struct sci_timer *tmr, void (*fn)(unsigned long))
501{
502 tmr->timer.function = fn;
503 tmr->timer.data = (unsigned long) tmr;
504 tmr->cancel = 0;
505 init_timer(&tmr->timer);
506}
507
508static inline void sci_mod_timer(struct sci_timer *tmr, unsigned long msec)
509{
510 tmr->cancel = 0;
511 mod_timer(&tmr->timer, jiffies + msecs_to_jiffies(msec));
512}
513
514static inline void sci_del_timer(struct sci_timer *tmr)
515{
516 tmr->cancel = 1;
517 del_timer(&tmr->timer);
518}
519
520struct sci_base_state_machine {
521 const struct sci_base_state *state_table;
522 u32 initial_state_id;
523 u32 current_state_id;
524 u32 previous_state_id;
525};
526
527typedef void (*sci_state_transition_t)(struct sci_base_state_machine *sm);
528
529struct sci_base_state {
530 sci_state_transition_t enter_state; /* Called on state entry */
531 sci_state_transition_t exit_state; /* Called on state exit */
532};
533
534extern void sci_init_sm(struct sci_base_state_machine *sm,
535 const struct sci_base_state *state_table,
536 u32 initial_state);
537extern void sci_change_state(struct sci_base_state_machine *sm, u32 next_state);
538#endif /* __ISCI_H__ */
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
new file mode 100644
index 000000000000..79313a7a2356
--- /dev/null
+++ b/drivers/scsi/isci/phy.c
@@ -0,0 +1,1312 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#include "isci.h"
57#include "host.h"
58#include "phy.h"
59#include "scu_event_codes.h"
60#include "probe_roms.h"
61
62/* Maximum arbitration wait time in micro-seconds */
63#define SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME (700)
64
65enum sas_linkrate sci_phy_linkrate(struct isci_phy *iphy)
66{
67 return iphy->max_negotiated_speed;
68}
69
70static enum sci_status
71sci_phy_transport_layer_initialization(struct isci_phy *iphy,
72 struct scu_transport_layer_registers __iomem *reg)
73{
74 u32 tl_control;
75
76 iphy->transport_layer_registers = reg;
77
78 writel(SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX,
79 &iphy->transport_layer_registers->stp_rni);
80
81 /*
82 * Hardware team recommends that we enable the STP prefetch for all
83 * transports
84 */
85 tl_control = readl(&iphy->transport_layer_registers->control);
86 tl_control |= SCU_TLCR_GEN_BIT(STP_WRITE_DATA_PREFETCH);
87 writel(tl_control, &iphy->transport_layer_registers->control);
88
89 return SCI_SUCCESS;
90}
91
92static enum sci_status
93sci_phy_link_layer_initialization(struct isci_phy *iphy,
94 struct scu_link_layer_registers __iomem *reg)
95{
96 struct isci_host *ihost = iphy->owning_port->owning_controller;
97 int phy_idx = iphy->phy_index;
98 struct sci_phy_user_params *phy_user = &ihost->user_parameters.phys[phy_idx];
99 struct sci_phy_oem_params *phy_oem =
100 &ihost->oem_parameters.phys[phy_idx];
101 u32 phy_configuration;
102 struct sci_phy_cap phy_cap;
103 u32 parity_check = 0;
104 u32 parity_count = 0;
105 u32 llctl, link_rate;
106 u32 clksm_value = 0;
107
108 iphy->link_layer_registers = reg;
109
110 /* Set our IDENTIFY frame data */
111 #define SCI_END_DEVICE 0x01
112
113 writel(SCU_SAS_TIID_GEN_BIT(SMP_INITIATOR) |
114 SCU_SAS_TIID_GEN_BIT(SSP_INITIATOR) |
115 SCU_SAS_TIID_GEN_BIT(STP_INITIATOR) |
116 SCU_SAS_TIID_GEN_BIT(DA_SATA_HOST) |
117 SCU_SAS_TIID_GEN_VAL(DEVICE_TYPE, SCI_END_DEVICE),
118 &iphy->link_layer_registers->transmit_identification);
119
120 /* Write the device SAS Address */
121 writel(0xFEDCBA98,
122 &iphy->link_layer_registers->sas_device_name_high);
123 writel(phy_idx, &iphy->link_layer_registers->sas_device_name_low);
124
125 /* Write the source SAS Address */
126 writel(phy_oem->sas_address.high,
127 &iphy->link_layer_registers->source_sas_address_high);
128 writel(phy_oem->sas_address.low,
129 &iphy->link_layer_registers->source_sas_address_low);
130
131 /* Clear and Set the PHY Identifier */
132 writel(0, &iphy->link_layer_registers->identify_frame_phy_id);
133 writel(SCU_SAS_TIPID_GEN_VALUE(ID, phy_idx),
134 &iphy->link_layer_registers->identify_frame_phy_id);
135
136 /* Change the initial state of the phy configuration register */
137 phy_configuration =
138 readl(&iphy->link_layer_registers->phy_configuration);
139
140 /* Hold OOB state machine in reset */
141 phy_configuration |= SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
142 writel(phy_configuration,
143 &iphy->link_layer_registers->phy_configuration);
144
145 /* Configure the SNW capabilities */
146 phy_cap.all = 0;
147 phy_cap.start = 1;
148 phy_cap.gen3_no_ssc = 1;
149 phy_cap.gen2_no_ssc = 1;
150 phy_cap.gen1_no_ssc = 1;
151 if (ihost->oem_parameters.controller.do_enable_ssc == true) {
152 phy_cap.gen3_ssc = 1;
153 phy_cap.gen2_ssc = 1;
154 phy_cap.gen1_ssc = 1;
155 }
156
157 /*
158 * The SAS specification indicates that the phy_capabilities that
159 * are transmitted shall have an even parity. Calculate the parity. */
160 parity_check = phy_cap.all;
161 while (parity_check != 0) {
162 if (parity_check & 0x1)
163 parity_count++;
164 parity_check >>= 1;
165 }
166
167 /*
168 * If parity indicates there are an odd number of bits set, then
169 * set the parity bit to 1 in the phy capabilities. */
170 if ((parity_count % 2) != 0)
171 phy_cap.parity = 1;
172
173 writel(phy_cap.all, &iphy->link_layer_registers->phy_capabilities);
174
175 /* Set the enable spinup period but disable the ability to send
176 * notify enable spinup
177 */
178 writel(SCU_ENSPINUP_GEN_VAL(COUNT,
179 phy_user->notify_enable_spin_up_insertion_frequency),
180 &iphy->link_layer_registers->notify_enable_spinup_control);
181
182 /* Write the ALIGN Insertion Ferequency for connected phy and
183 * inpendent of connected state
184 */
185 clksm_value = SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(CONNECTED,
186 phy_user->in_connection_align_insertion_frequency);
187
188 clksm_value |= SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(GENERAL,
189 phy_user->align_insertion_frequency);
190
191 writel(clksm_value, &iphy->link_layer_registers->clock_skew_management);
192
193 /* @todo Provide a way to write this register correctly */
194 writel(0x02108421,
195 &iphy->link_layer_registers->afe_lookup_table_control);
196
197 llctl = SCU_SAS_LLCTL_GEN_VAL(NO_OUTBOUND_TASK_TIMEOUT,
198 (u8)ihost->user_parameters.no_outbound_task_timeout);
199
200 switch (phy_user->max_speed_generation) {
201 case SCIC_SDS_PARM_GEN3_SPEED:
202 link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN3;
203 break;
204 case SCIC_SDS_PARM_GEN2_SPEED:
205 link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN2;
206 break;
207 default:
208 link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1;
209 break;
210 }
211 llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate);
212 writel(llctl, &iphy->link_layer_registers->link_layer_control);
213
214 if (is_a2(ihost->pdev)) {
215 /* Program the max ARB time for the PHY to 700us so we inter-operate with
216 * the PMC expander which shuts down PHYs if the expander PHY generates too
217 * many breaks. This time value will guarantee that the initiator PHY will
218 * generate the break.
219 */
220 writel(SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME,
221 &iphy->link_layer_registers->maximum_arbitration_wait_timer_timeout);
222 }
223
224 /* Disable link layer hang detection, rely on the OS timeout for I/O timeouts. */
225 writel(0, &iphy->link_layer_registers->link_layer_hang_detection_timeout);
226
227 /* We can exit the initial state to the stopped state */
228 sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
229
230 return SCI_SUCCESS;
231}
232
233static void phy_sata_timeout(unsigned long data)
234{
235 struct sci_timer *tmr = (struct sci_timer *)data;
236 struct isci_phy *iphy = container_of(tmr, typeof(*iphy), sata_timer);
237 struct isci_host *ihost = iphy->owning_port->owning_controller;
238 unsigned long flags;
239
240 spin_lock_irqsave(&ihost->scic_lock, flags);
241
242 if (tmr->cancel)
243 goto done;
244
245 dev_dbg(sciphy_to_dev(iphy),
246 "%s: SCIC SDS Phy 0x%p did not receive signature fis before "
247 "timeout.\n",
248 __func__,
249 iphy);
250
251 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
252done:
253 spin_unlock_irqrestore(&ihost->scic_lock, flags);
254}
255
256/**
257 * This method returns the port currently containing this phy. If the phy is
258 * currently contained by the dummy port, then the phy is considered to not
259 * be part of a port.
260 * @sci_phy: This parameter specifies the phy for which to retrieve the
261 * containing port.
262 *
263 * This method returns a handle to a port that contains the supplied phy.
264 * NULL This value is returned if the phy is not part of a real
265 * port (i.e. it's contained in the dummy port). !NULL All other
266 * values indicate a handle/pointer to the port containing the phy.
267 */
268struct isci_port *phy_get_non_dummy_port(struct isci_phy *iphy)
269{
270 struct isci_port *iport = iphy->owning_port;
271
272 if (iport->physical_port_index == SCIC_SDS_DUMMY_PORT)
273 return NULL;
274
275 return iphy->owning_port;
276}
277
278/**
279 * This method will assign a port to the phy object.
280 * @out]: iphy This parameter specifies the phy for which to assign a port
281 * object.
282 *
283 *
284 */
285void sci_phy_set_port(
286 struct isci_phy *iphy,
287 struct isci_port *iport)
288{
289 iphy->owning_port = iport;
290
291 if (iphy->bcn_received_while_port_unassigned) {
292 iphy->bcn_received_while_port_unassigned = false;
293 sci_port_broadcast_change_received(iphy->owning_port, iphy);
294 }
295}
296
297enum sci_status sci_phy_initialize(struct isci_phy *iphy,
298 struct scu_transport_layer_registers __iomem *tl,
299 struct scu_link_layer_registers __iomem *ll)
300{
301 /* Perfrom the initialization of the TL hardware */
302 sci_phy_transport_layer_initialization(iphy, tl);
303
304 /* Perofrm the initialization of the PE hardware */
305 sci_phy_link_layer_initialization(iphy, ll);
306
307 /* There is nothing that needs to be done in this state just
308 * transition to the stopped state
309 */
310 sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
311
312 return SCI_SUCCESS;
313}
314
315/**
316 * This method assigns the direct attached device ID for this phy.
317 *
318 * @iphy The phy for which the direct attached device id is to
319 * be assigned.
320 * @device_id The direct attached device ID to assign to the phy.
321 * This will either be the RNi for the device or an invalid RNi if there
322 * is no current device assigned to the phy.
323 */
324void sci_phy_setup_transport(struct isci_phy *iphy, u32 device_id)
325{
326 u32 tl_control;
327
328 writel(device_id, &iphy->transport_layer_registers->stp_rni);
329
330 /*
331 * The read should guarantee that the first write gets posted
332 * before the next write
333 */
334 tl_control = readl(&iphy->transport_layer_registers->control);
335 tl_control |= SCU_TLCR_GEN_BIT(CLEAR_TCI_NCQ_MAPPING_TABLE);
336 writel(tl_control, &iphy->transport_layer_registers->control);
337}
338
339static void sci_phy_suspend(struct isci_phy *iphy)
340{
341 u32 scu_sas_pcfg_value;
342
343 scu_sas_pcfg_value =
344 readl(&iphy->link_layer_registers->phy_configuration);
345 scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE);
346 writel(scu_sas_pcfg_value,
347 &iphy->link_layer_registers->phy_configuration);
348
349 sci_phy_setup_transport(iphy, SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
350}
351
352void sci_phy_resume(struct isci_phy *iphy)
353{
354 u32 scu_sas_pcfg_value;
355
356 scu_sas_pcfg_value =
357 readl(&iphy->link_layer_registers->phy_configuration);
358 scu_sas_pcfg_value &= ~SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE);
359 writel(scu_sas_pcfg_value,
360 &iphy->link_layer_registers->phy_configuration);
361}
362
363void sci_phy_get_sas_address(struct isci_phy *iphy, struct sci_sas_address *sas)
364{
365 sas->high = readl(&iphy->link_layer_registers->source_sas_address_high);
366 sas->low = readl(&iphy->link_layer_registers->source_sas_address_low);
367}
368
369void sci_phy_get_attached_sas_address(struct isci_phy *iphy, struct sci_sas_address *sas)
370{
371 struct sas_identify_frame *iaf;
372
373 iaf = &iphy->frame_rcvd.iaf;
374 memcpy(sas, iaf->sas_addr, SAS_ADDR_SIZE);
375}
376
377void sci_phy_get_protocols(struct isci_phy *iphy, struct sci_phy_proto *proto)
378{
379 proto->all = readl(&iphy->link_layer_registers->transmit_identification);
380}
381
382enum sci_status sci_phy_start(struct isci_phy *iphy)
383{
384 enum sci_phy_states state = iphy->sm.current_state_id;
385
386 if (state != SCI_PHY_STOPPED) {
387 dev_dbg(sciphy_to_dev(iphy),
388 "%s: in wrong state: %d\n", __func__, state);
389 return SCI_FAILURE_INVALID_STATE;
390 }
391
392 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
393 return SCI_SUCCESS;
394}
395
396enum sci_status sci_phy_stop(struct isci_phy *iphy)
397{
398 enum sci_phy_states state = iphy->sm.current_state_id;
399
400 switch (state) {
401 case SCI_PHY_SUB_INITIAL:
402 case SCI_PHY_SUB_AWAIT_OSSP_EN:
403 case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
404 case SCI_PHY_SUB_AWAIT_SAS_POWER:
405 case SCI_PHY_SUB_AWAIT_SATA_POWER:
406 case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
407 case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
408 case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
409 case SCI_PHY_SUB_FINAL:
410 case SCI_PHY_READY:
411 break;
412 default:
413 dev_dbg(sciphy_to_dev(iphy),
414 "%s: in wrong state: %d\n", __func__, state);
415 return SCI_FAILURE_INVALID_STATE;
416 }
417
418 sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
419 return SCI_SUCCESS;
420}
421
422enum sci_status sci_phy_reset(struct isci_phy *iphy)
423{
424 enum sci_phy_states state = iphy->sm.current_state_id;
425
426 if (state != SCI_PHY_READY) {
427 dev_dbg(sciphy_to_dev(iphy),
428 "%s: in wrong state: %d\n", __func__, state);
429 return SCI_FAILURE_INVALID_STATE;
430 }
431
432 sci_change_state(&iphy->sm, SCI_PHY_RESETTING);
433 return SCI_SUCCESS;
434}
435
436enum sci_status sci_phy_consume_power_handler(struct isci_phy *iphy)
437{
438 enum sci_phy_states state = iphy->sm.current_state_id;
439
440 switch (state) {
441 case SCI_PHY_SUB_AWAIT_SAS_POWER: {
442 u32 enable_spinup;
443
444 enable_spinup = readl(&iphy->link_layer_registers->notify_enable_spinup_control);
445 enable_spinup |= SCU_ENSPINUP_GEN_BIT(ENABLE);
446 writel(enable_spinup, &iphy->link_layer_registers->notify_enable_spinup_control);
447
448 /* Change state to the final state this substate machine has run to completion */
449 sci_change_state(&iphy->sm, SCI_PHY_SUB_FINAL);
450
451 return SCI_SUCCESS;
452 }
453 case SCI_PHY_SUB_AWAIT_SATA_POWER: {
454 u32 scu_sas_pcfg_value;
455
456 /* Release the spinup hold state and reset the OOB state machine */
457 scu_sas_pcfg_value =
458 readl(&iphy->link_layer_registers->phy_configuration);
459 scu_sas_pcfg_value &=
460 ~(SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD) | SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE));
461 scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
462 writel(scu_sas_pcfg_value,
463 &iphy->link_layer_registers->phy_configuration);
464
465 /* Now restart the OOB operation */
466 scu_sas_pcfg_value &= ~SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
467 scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
468 writel(scu_sas_pcfg_value,
469 &iphy->link_layer_registers->phy_configuration);
470
471 /* Change state to the final state this substate machine has run to completion */
472 sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_PHY_EN);
473
474 return SCI_SUCCESS;
475 }
476 default:
477 dev_dbg(sciphy_to_dev(iphy),
478 "%s: in wrong state: %d\n", __func__, state);
479 return SCI_FAILURE_INVALID_STATE;
480 }
481}
482
483static void sci_phy_start_sas_link_training(struct isci_phy *iphy)
484{
485 /* continue the link training for the phy as if it were a SAS PHY
486 * instead of a SATA PHY. This is done because the completion queue had a SAS
487 * PHY DETECTED event when the state machine was expecting a SATA PHY event.
488 */
489 u32 phy_control;
490
491 phy_control = readl(&iphy->link_layer_registers->phy_configuration);
492 phy_control |= SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD);
493 writel(phy_control,
494 &iphy->link_layer_registers->phy_configuration);
495
496 sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SAS_SPEED_EN);
497
498 iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SAS;
499}
500
501static void sci_phy_start_sata_link_training(struct isci_phy *iphy)
502{
503 /* This method continues the link training for the phy as if it were a SATA PHY
504 * instead of a SAS PHY. This is done because the completion queue had a SATA
505 * SPINUP HOLD event when the state machine was expecting a SAS PHY event. none
506 */
507 sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_POWER);
508
509 iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA;
510}
511
512/**
513 * sci_phy_complete_link_training - perform processing common to
514 * all protocols upon completion of link training.
515 * @sci_phy: This parameter specifies the phy object for which link training
516 * has completed.
517 * @max_link_rate: This parameter specifies the maximum link rate to be
518 * associated with this phy.
519 * @next_state: This parameter specifies the next state for the phy's starting
520 * sub-state machine.
521 *
522 */
523static void sci_phy_complete_link_training(struct isci_phy *iphy,
524 enum sas_linkrate max_link_rate,
525 u32 next_state)
526{
527 iphy->max_negotiated_speed = max_link_rate;
528
529 sci_change_state(&iphy->sm, next_state);
530}
531
532enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
533{
534 enum sci_phy_states state = iphy->sm.current_state_id;
535
536 switch (state) {
537 case SCI_PHY_SUB_AWAIT_OSSP_EN:
538 switch (scu_get_event_code(event_code)) {
539 case SCU_EVENT_SAS_PHY_DETECTED:
540 sci_phy_start_sas_link_training(iphy);
541 iphy->is_in_link_training = true;
542 break;
543 case SCU_EVENT_SATA_SPINUP_HOLD:
544 sci_phy_start_sata_link_training(iphy);
545 iphy->is_in_link_training = true;
546 break;
547 default:
548 dev_dbg(sciphy_to_dev(iphy),
549 "%s: PHY starting substate machine received "
550 "unexpected event_code %x\n",
551 __func__,
552 event_code);
553 return SCI_FAILURE;
554 }
555 return SCI_SUCCESS;
556 case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
557 switch (scu_get_event_code(event_code)) {
558 case SCU_EVENT_SAS_PHY_DETECTED:
559 /*
560 * Why is this being reported again by the controller?
561 * We would re-enter this state so just stay here */
562 break;
563 case SCU_EVENT_SAS_15:
564 case SCU_EVENT_SAS_15_SSC:
565 sci_phy_complete_link_training(iphy, SAS_LINK_RATE_1_5_GBPS,
566 SCI_PHY_SUB_AWAIT_IAF_UF);
567 break;
568 case SCU_EVENT_SAS_30:
569 case SCU_EVENT_SAS_30_SSC:
570 sci_phy_complete_link_training(iphy, SAS_LINK_RATE_3_0_GBPS,
571 SCI_PHY_SUB_AWAIT_IAF_UF);
572 break;
573 case SCU_EVENT_SAS_60:
574 case SCU_EVENT_SAS_60_SSC:
575 sci_phy_complete_link_training(iphy, SAS_LINK_RATE_6_0_GBPS,
576 SCI_PHY_SUB_AWAIT_IAF_UF);
577 break;
578 case SCU_EVENT_SATA_SPINUP_HOLD:
579 /*
580 * We were doing SAS PHY link training and received a SATA PHY event
581 * continue OOB/SN as if this were a SATA PHY */
582 sci_phy_start_sata_link_training(iphy);
583 break;
584 case SCU_EVENT_LINK_FAILURE:
585 /* Link failure change state back to the starting state */
586 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
587 break;
588 default:
589 dev_warn(sciphy_to_dev(iphy),
590 "%s: PHY starting substate machine received "
591 "unexpected event_code %x\n",
592 __func__, event_code);
593
594 return SCI_FAILURE;
595 break;
596 }
597 return SCI_SUCCESS;
598 case SCI_PHY_SUB_AWAIT_IAF_UF:
599 switch (scu_get_event_code(event_code)) {
600 case SCU_EVENT_SAS_PHY_DETECTED:
601 /* Backup the state machine */
602 sci_phy_start_sas_link_training(iphy);
603 break;
604 case SCU_EVENT_SATA_SPINUP_HOLD:
605 /* We were doing SAS PHY link training and received a
606 * SATA PHY event continue OOB/SN as if this were a
607 * SATA PHY
608 */
609 sci_phy_start_sata_link_training(iphy);
610 break;
611 case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT:
612 case SCU_EVENT_LINK_FAILURE:
613 case SCU_EVENT_HARD_RESET_RECEIVED:
614 /* Start the oob/sn state machine over again */
615 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
616 break;
617 default:
618 dev_warn(sciphy_to_dev(iphy),
619 "%s: PHY starting substate machine received "
620 "unexpected event_code %x\n",
621 __func__, event_code);
622 return SCI_FAILURE;
623 }
624 return SCI_SUCCESS;
625 case SCI_PHY_SUB_AWAIT_SAS_POWER:
626 switch (scu_get_event_code(event_code)) {
627 case SCU_EVENT_LINK_FAILURE:
628 /* Link failure change state back to the starting state */
629 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
630 break;
631 default:
632 dev_warn(sciphy_to_dev(iphy),
633 "%s: PHY starting substate machine received unexpected "
634 "event_code %x\n",
635 __func__,
636 event_code);
637 return SCI_FAILURE;
638 }
639 return SCI_SUCCESS;
640 case SCI_PHY_SUB_AWAIT_SATA_POWER:
641 switch (scu_get_event_code(event_code)) {
642 case SCU_EVENT_LINK_FAILURE:
643 /* Link failure change state back to the starting state */
644 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
645 break;
646 case SCU_EVENT_SATA_SPINUP_HOLD:
647 /* These events are received every 10ms and are
648 * expected while in this state
649 */
650 break;
651
652 case SCU_EVENT_SAS_PHY_DETECTED:
653 /* There has been a change in the phy type before OOB/SN for the
654 * SATA finished start down the SAS link traning path.
655 */
656 sci_phy_start_sas_link_training(iphy);
657 break;
658
659 default:
660 dev_warn(sciphy_to_dev(iphy),
661 "%s: PHY starting substate machine received "
662 "unexpected event_code %x\n",
663 __func__, event_code);
664
665 return SCI_FAILURE;
666 }
667 return SCI_SUCCESS;
668 case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
669 switch (scu_get_event_code(event_code)) {
670 case SCU_EVENT_LINK_FAILURE:
671 /* Link failure change state back to the starting state */
672 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
673 break;
674 case SCU_EVENT_SATA_SPINUP_HOLD:
675 /* These events might be received since we dont know how many may be in
676 * the completion queue while waiting for power
677 */
678 break;
679 case SCU_EVENT_SATA_PHY_DETECTED:
680 iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA;
681
682 /* We have received the SATA PHY notification change state */
683 sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN);
684 break;
685 case SCU_EVENT_SAS_PHY_DETECTED:
686 /* There has been a change in the phy type before OOB/SN for the
687 * SATA finished start down the SAS link traning path.
688 */
689 sci_phy_start_sas_link_training(iphy);
690 break;
691 default:
692 dev_warn(sciphy_to_dev(iphy),
693 "%s: PHY starting substate machine received "
694 "unexpected event_code %x\n",
695 __func__,
696 event_code);
697
698 return SCI_FAILURE;;
699 }
700 return SCI_SUCCESS;
701 case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
702 switch (scu_get_event_code(event_code)) {
703 case SCU_EVENT_SATA_PHY_DETECTED:
704 /*
705 * The hardware reports multiple SATA PHY detected events
706 * ignore the extras */
707 break;
708 case SCU_EVENT_SATA_15:
709 case SCU_EVENT_SATA_15_SSC:
710 sci_phy_complete_link_training(iphy, SAS_LINK_RATE_1_5_GBPS,
711 SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
712 break;
713 case SCU_EVENT_SATA_30:
714 case SCU_EVENT_SATA_30_SSC:
715 sci_phy_complete_link_training(iphy, SAS_LINK_RATE_3_0_GBPS,
716 SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
717 break;
718 case SCU_EVENT_SATA_60:
719 case SCU_EVENT_SATA_60_SSC:
720 sci_phy_complete_link_training(iphy, SAS_LINK_RATE_6_0_GBPS,
721 SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
722 break;
723 case SCU_EVENT_LINK_FAILURE:
724 /* Link failure change state back to the starting state */
725 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
726 break;
727 case SCU_EVENT_SAS_PHY_DETECTED:
728 /*
729 * There has been a change in the phy type before OOB/SN for the
730 * SATA finished start down the SAS link traning path. */
731 sci_phy_start_sas_link_training(iphy);
732 break;
733 default:
734 dev_warn(sciphy_to_dev(iphy),
735 "%s: PHY starting substate machine received "
736 "unexpected event_code %x\n",
737 __func__, event_code);
738
739 return SCI_FAILURE;
740 }
741
742 return SCI_SUCCESS;
743 case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
744 switch (scu_get_event_code(event_code)) {
745 case SCU_EVENT_SATA_PHY_DETECTED:
746 /* Backup the state machine */
747 sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN);
748 break;
749
750 case SCU_EVENT_LINK_FAILURE:
751 /* Link failure change state back to the starting state */
752 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
753 break;
754
755 default:
756 dev_warn(sciphy_to_dev(iphy),
757 "%s: PHY starting substate machine received "
758 "unexpected event_code %x\n",
759 __func__,
760 event_code);
761
762 return SCI_FAILURE;
763 }
764 return SCI_SUCCESS;
765 case SCI_PHY_READY:
766 switch (scu_get_event_code(event_code)) {
767 case SCU_EVENT_LINK_FAILURE:
768 /* Link failure change state back to the starting state */
769 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
770 break;
771 case SCU_EVENT_BROADCAST_CHANGE:
772 /* Broadcast change received. Notify the port. */
773 if (phy_get_non_dummy_port(iphy) != NULL)
774 sci_port_broadcast_change_received(iphy->owning_port, iphy);
775 else
776 iphy->bcn_received_while_port_unassigned = true;
777 break;
778 default:
779 dev_warn(sciphy_to_dev(iphy),
780 "%sP SCIC PHY 0x%p ready state machine received "
781 "unexpected event_code %x\n",
782 __func__, iphy, event_code);
783 return SCI_FAILURE_INVALID_STATE;
784 }
785 return SCI_SUCCESS;
786 case SCI_PHY_RESETTING:
787 switch (scu_get_event_code(event_code)) {
788 case SCU_EVENT_HARD_RESET_TRANSMITTED:
789 /* Link failure change state back to the starting state */
790 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
791 break;
792 default:
793 dev_warn(sciphy_to_dev(iphy),
794 "%s: SCIC PHY 0x%p resetting state machine received "
795 "unexpected event_code %x\n",
796 __func__, iphy, event_code);
797
798 return SCI_FAILURE_INVALID_STATE;
799 break;
800 }
801 return SCI_SUCCESS;
802 default:
803 dev_dbg(sciphy_to_dev(iphy),
804 "%s: in wrong state: %d\n", __func__, state);
805 return SCI_FAILURE_INVALID_STATE;
806 }
807}
808
809enum sci_status sci_phy_frame_handler(struct isci_phy *iphy, u32 frame_index)
810{
811 enum sci_phy_states state = iphy->sm.current_state_id;
812 struct isci_host *ihost = iphy->owning_port->owning_controller;
813 enum sci_status result;
814 unsigned long flags;
815
816 switch (state) {
817 case SCI_PHY_SUB_AWAIT_IAF_UF: {
818 u32 *frame_words;
819 struct sas_identify_frame iaf;
820
821 result = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
822 frame_index,
823 (void **)&frame_words);
824
825 if (result != SCI_SUCCESS)
826 return result;
827
828 sci_swab32_cpy(&iaf, frame_words, sizeof(iaf) / sizeof(u32));
829 if (iaf.frame_type == 0) {
830 u32 state;
831
832 spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
833 memcpy(&iphy->frame_rcvd.iaf, &iaf, sizeof(iaf));
834 spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags);
835 if (iaf.smp_tport) {
836 /* We got the IAF for an expander PHY go to the final
837 * state since there are no power requirements for
838 * expander phys.
839 */
840 state = SCI_PHY_SUB_FINAL;
841 } else {
842 /* We got the IAF we can now go to the await spinup
843 * semaphore state
844 */
845 state = SCI_PHY_SUB_AWAIT_SAS_POWER;
846 }
847 sci_change_state(&iphy->sm, state);
848 result = SCI_SUCCESS;
849 } else
850 dev_warn(sciphy_to_dev(iphy),
851 "%s: PHY starting substate machine received "
852 "unexpected frame id %x\n",
853 __func__, frame_index);
854
855 sci_controller_release_frame(ihost, frame_index);
856 return result;
857 }
858 case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: {
859 struct dev_to_host_fis *frame_header;
860 u32 *fis_frame_data;
861
862 result = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
863 frame_index,
864 (void **)&frame_header);
865
866 if (result != SCI_SUCCESS)
867 return result;
868
869 if ((frame_header->fis_type == FIS_REGD2H) &&
870 !(frame_header->status & ATA_BUSY)) {
871 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
872 frame_index,
873 (void **)&fis_frame_data);
874
875 spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
876 sci_controller_copy_sata_response(&iphy->frame_rcvd.fis,
877 frame_header,
878 fis_frame_data);
879 spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags);
880
881 /* got IAF we can now go to the await spinup semaphore state */
882 sci_change_state(&iphy->sm, SCI_PHY_SUB_FINAL);
883
884 result = SCI_SUCCESS;
885 } else
886 dev_warn(sciphy_to_dev(iphy),
887 "%s: PHY starting substate machine received "
888 "unexpected frame id %x\n",
889 __func__, frame_index);
890
891 /* Regardless of the result we are done with this frame with it */
892 sci_controller_release_frame(ihost, frame_index);
893
894 return result;
895 }
896 default:
897 dev_dbg(sciphy_to_dev(iphy),
898 "%s: in wrong state: %d\n", __func__, state);
899 return SCI_FAILURE_INVALID_STATE;
900 }
901
902}
903
904static void sci_phy_starting_initial_substate_enter(struct sci_base_state_machine *sm)
905{
906 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
907
908 /* This is just an temporary state go off to the starting state */
909 sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_OSSP_EN);
910}
911
912static void sci_phy_starting_await_sas_power_substate_enter(struct sci_base_state_machine *sm)
913{
914 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
915 struct isci_host *ihost = iphy->owning_port->owning_controller;
916
917 sci_controller_power_control_queue_insert(ihost, iphy);
918}
919
920static void sci_phy_starting_await_sas_power_substate_exit(struct sci_base_state_machine *sm)
921{
922 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
923 struct isci_host *ihost = iphy->owning_port->owning_controller;
924
925 sci_controller_power_control_queue_remove(ihost, iphy);
926}
927
928static void sci_phy_starting_await_sata_power_substate_enter(struct sci_base_state_machine *sm)
929{
930 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
931 struct isci_host *ihost = iphy->owning_port->owning_controller;
932
933 sci_controller_power_control_queue_insert(ihost, iphy);
934}
935
936static void sci_phy_starting_await_sata_power_substate_exit(struct sci_base_state_machine *sm)
937{
938 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
939 struct isci_host *ihost = iphy->owning_port->owning_controller;
940
941 sci_controller_power_control_queue_remove(ihost, iphy);
942}
943
944static void sci_phy_starting_await_sata_phy_substate_enter(struct sci_base_state_machine *sm)
945{
946 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
947
948 sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT);
949}
950
951static void sci_phy_starting_await_sata_phy_substate_exit(struct sci_base_state_machine *sm)
952{
953 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
954
955 sci_del_timer(&iphy->sata_timer);
956}
957
958static void sci_phy_starting_await_sata_speed_substate_enter(struct sci_base_state_machine *sm)
959{
960 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
961
962 sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT);
963}
964
965static void sci_phy_starting_await_sata_speed_substate_exit(struct sci_base_state_machine *sm)
966{
967 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
968
969 sci_del_timer(&iphy->sata_timer);
970}
971
972static void sci_phy_starting_await_sig_fis_uf_substate_enter(struct sci_base_state_machine *sm)
973{
974 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
975
976 if (sci_port_link_detected(iphy->owning_port, iphy)) {
977
978 /*
979 * Clear the PE suspend condition so we can actually
980 * receive SIG FIS
981 * The hardware will not respond to the XRDY until the PE
982 * suspend condition is cleared.
983 */
984 sci_phy_resume(iphy);
985
986 sci_mod_timer(&iphy->sata_timer,
987 SCIC_SDS_SIGNATURE_FIS_TIMEOUT);
988 } else
989 iphy->is_in_link_training = false;
990}
991
992static void sci_phy_starting_await_sig_fis_uf_substate_exit(struct sci_base_state_machine *sm)
993{
994 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
995
996 sci_del_timer(&iphy->sata_timer);
997}
998
999static void sci_phy_starting_final_substate_enter(struct sci_base_state_machine *sm)
1000{
1001 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
1002
1003 /* State machine has run to completion so exit out and change
1004 * the base state machine to the ready state
1005 */
1006 sci_change_state(&iphy->sm, SCI_PHY_READY);
1007}
1008
1009/**
1010 *
1011 * @sci_phy: This is the struct isci_phy object to stop.
1012 *
1013 * This method will stop the struct isci_phy object. This does not reset the
1014 * protocol engine it just suspends it and places it in a state where it will
1015 * not cause the end device to power up. none
1016 */
1017static void scu_link_layer_stop_protocol_engine(
1018 struct isci_phy *iphy)
1019{
1020 u32 scu_sas_pcfg_value;
1021 u32 enable_spinup_value;
1022
1023 /* Suspend the protocol engine and place it in a sata spinup hold state */
1024 scu_sas_pcfg_value =
1025 readl(&iphy->link_layer_registers->phy_configuration);
1026 scu_sas_pcfg_value |=
1027 (SCU_SAS_PCFG_GEN_BIT(OOB_RESET) |
1028 SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE) |
1029 SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD));
1030 writel(scu_sas_pcfg_value,
1031 &iphy->link_layer_registers->phy_configuration);
1032
1033 /* Disable the notify enable spinup primitives */
1034 enable_spinup_value = readl(&iphy->link_layer_registers->notify_enable_spinup_control);
1035 enable_spinup_value &= ~SCU_ENSPINUP_GEN_BIT(ENABLE);
1036 writel(enable_spinup_value, &iphy->link_layer_registers->notify_enable_spinup_control);
1037}
1038
1039/**
1040 *
1041 *
1042 * This method will start the OOB/SN state machine for this struct isci_phy object.
1043 */
1044static void scu_link_layer_start_oob(
1045 struct isci_phy *iphy)
1046{
1047 u32 scu_sas_pcfg_value;
1048
1049 scu_sas_pcfg_value =
1050 readl(&iphy->link_layer_registers->phy_configuration);
1051 scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
1052 scu_sas_pcfg_value &=
1053 ~(SCU_SAS_PCFG_GEN_BIT(OOB_RESET) |
1054 SCU_SAS_PCFG_GEN_BIT(HARD_RESET));
1055 writel(scu_sas_pcfg_value,
1056 &iphy->link_layer_registers->phy_configuration);
1057}
1058
1059/**
1060 *
1061 *
1062 * This method will transmit a hard reset request on the specified phy. The SCU
1063 * hardware requires that we reset the OOB state machine and set the hard reset
1064 * bit in the phy configuration register. We then must start OOB over with the
1065 * hard reset bit set.
1066 */
1067static void scu_link_layer_tx_hard_reset(
1068 struct isci_phy *iphy)
1069{
1070 u32 phy_configuration_value;
1071
1072 /*
1073 * SAS Phys must wait for the HARD_RESET_TX event notification to transition
1074 * to the starting state. */
1075 phy_configuration_value =
1076 readl(&iphy->link_layer_registers->phy_configuration);
1077 phy_configuration_value |=
1078 (SCU_SAS_PCFG_GEN_BIT(HARD_RESET) |
1079 SCU_SAS_PCFG_GEN_BIT(OOB_RESET));
1080 writel(phy_configuration_value,
1081 &iphy->link_layer_registers->phy_configuration);
1082
1083 /* Now take the OOB state machine out of reset */
1084 phy_configuration_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
1085 phy_configuration_value &= ~SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
1086 writel(phy_configuration_value,
1087 &iphy->link_layer_registers->phy_configuration);
1088}
1089
1090static void sci_phy_stopped_state_enter(struct sci_base_state_machine *sm)
1091{
1092 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
1093 struct isci_port *iport = iphy->owning_port;
1094 struct isci_host *ihost = iport->owning_controller;
1095
1096 /*
1097 * @todo We need to get to the controller to place this PE in a
1098 * reset state
1099 */
1100 sci_del_timer(&iphy->sata_timer);
1101
1102 scu_link_layer_stop_protocol_engine(iphy);
1103
1104 if (iphy->sm.previous_state_id != SCI_PHY_INITIAL)
1105 sci_controller_link_down(ihost, phy_get_non_dummy_port(iphy), iphy);
1106}
1107
1108static void sci_phy_starting_state_enter(struct sci_base_state_machine *sm)
1109{
1110 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
1111 struct isci_port *iport = iphy->owning_port;
1112 struct isci_host *ihost = iport->owning_controller;
1113
1114 scu_link_layer_stop_protocol_engine(iphy);
1115 scu_link_layer_start_oob(iphy);
1116
1117 /* We don't know what kind of phy we are going to be just yet */
1118 iphy->protocol = SCIC_SDS_PHY_PROTOCOL_UNKNOWN;
1119 iphy->bcn_received_while_port_unassigned = false;
1120
1121 if (iphy->sm.previous_state_id == SCI_PHY_READY)
1122 sci_controller_link_down(ihost, phy_get_non_dummy_port(iphy), iphy);
1123
1124 sci_change_state(&iphy->sm, SCI_PHY_SUB_INITIAL);
1125}
1126
1127static void sci_phy_ready_state_enter(struct sci_base_state_machine *sm)
1128{
1129 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
1130 struct isci_port *iport = iphy->owning_port;
1131 struct isci_host *ihost = iport->owning_controller;
1132
1133 sci_controller_link_up(ihost, phy_get_non_dummy_port(iphy), iphy);
1134}
1135
1136static void sci_phy_ready_state_exit(struct sci_base_state_machine *sm)
1137{
1138 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
1139
1140 sci_phy_suspend(iphy);
1141}
1142
1143static void sci_phy_resetting_state_enter(struct sci_base_state_machine *sm)
1144{
1145 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
1146
1147 /* The phy is being reset, therefore deactivate it from the port. In
1148 * the resetting state we don't notify the user regarding link up and
1149 * link down notifications
1150 */
1151 sci_port_deactivate_phy(iphy->owning_port, iphy, false);
1152
1153 if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) {
1154 scu_link_layer_tx_hard_reset(iphy);
1155 } else {
1156 /* The SCU does not need to have a discrete reset state so
1157 * just go back to the starting state.
1158 */
1159 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
1160 }
1161}
1162
1163static const struct sci_base_state sci_phy_state_table[] = {
1164 [SCI_PHY_INITIAL] = { },
1165 [SCI_PHY_STOPPED] = {
1166 .enter_state = sci_phy_stopped_state_enter,
1167 },
1168 [SCI_PHY_STARTING] = {
1169 .enter_state = sci_phy_starting_state_enter,
1170 },
1171 [SCI_PHY_SUB_INITIAL] = {
1172 .enter_state = sci_phy_starting_initial_substate_enter,
1173 },
1174 [SCI_PHY_SUB_AWAIT_OSSP_EN] = { },
1175 [SCI_PHY_SUB_AWAIT_SAS_SPEED_EN] = { },
1176 [SCI_PHY_SUB_AWAIT_IAF_UF] = { },
1177 [SCI_PHY_SUB_AWAIT_SAS_POWER] = {
1178 .enter_state = sci_phy_starting_await_sas_power_substate_enter,
1179 .exit_state = sci_phy_starting_await_sas_power_substate_exit,
1180 },
1181 [SCI_PHY_SUB_AWAIT_SATA_POWER] = {
1182 .enter_state = sci_phy_starting_await_sata_power_substate_enter,
1183 .exit_state = sci_phy_starting_await_sata_power_substate_exit
1184 },
1185 [SCI_PHY_SUB_AWAIT_SATA_PHY_EN] = {
1186 .enter_state = sci_phy_starting_await_sata_phy_substate_enter,
1187 .exit_state = sci_phy_starting_await_sata_phy_substate_exit
1188 },
1189 [SCI_PHY_SUB_AWAIT_SATA_SPEED_EN] = {
1190 .enter_state = sci_phy_starting_await_sata_speed_substate_enter,
1191 .exit_state = sci_phy_starting_await_sata_speed_substate_exit
1192 },
1193 [SCI_PHY_SUB_AWAIT_SIG_FIS_UF] = {
1194 .enter_state = sci_phy_starting_await_sig_fis_uf_substate_enter,
1195 .exit_state = sci_phy_starting_await_sig_fis_uf_substate_exit
1196 },
1197 [SCI_PHY_SUB_FINAL] = {
1198 .enter_state = sci_phy_starting_final_substate_enter,
1199 },
1200 [SCI_PHY_READY] = {
1201 .enter_state = sci_phy_ready_state_enter,
1202 .exit_state = sci_phy_ready_state_exit,
1203 },
1204 [SCI_PHY_RESETTING] = {
1205 .enter_state = sci_phy_resetting_state_enter,
1206 },
1207 [SCI_PHY_FINAL] = { },
1208};
1209
1210void sci_phy_construct(struct isci_phy *iphy,
1211 struct isci_port *iport, u8 phy_index)
1212{
1213 sci_init_sm(&iphy->sm, sci_phy_state_table, SCI_PHY_INITIAL);
1214
1215 /* Copy the rest of the input data to our locals */
1216 iphy->owning_port = iport;
1217 iphy->phy_index = phy_index;
1218 iphy->bcn_received_while_port_unassigned = false;
1219 iphy->protocol = SCIC_SDS_PHY_PROTOCOL_UNKNOWN;
1220 iphy->link_layer_registers = NULL;
1221 iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
1222
1223 /* Create the SIGNATURE FIS Timeout timer for this phy */
1224 sci_init_timer(&iphy->sata_timer, phy_sata_timeout);
1225}
1226
1227void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index)
1228{
1229 struct sci_oem_params *oem = &ihost->oem_parameters;
1230 u64 sci_sas_addr;
1231 __be64 sas_addr;
1232
1233 sci_sas_addr = oem->phys[index].sas_address.high;
1234 sci_sas_addr <<= 32;
1235 sci_sas_addr |= oem->phys[index].sas_address.low;
1236 sas_addr = cpu_to_be64(sci_sas_addr);
1237 memcpy(iphy->sas_addr, &sas_addr, sizeof(sas_addr));
1238
1239 iphy->isci_port = NULL;
1240 iphy->sas_phy.enabled = 0;
1241 iphy->sas_phy.id = index;
1242 iphy->sas_phy.sas_addr = &iphy->sas_addr[0];
1243 iphy->sas_phy.frame_rcvd = (u8 *)&iphy->frame_rcvd;
1244 iphy->sas_phy.ha = &ihost->sas_ha;
1245 iphy->sas_phy.lldd_phy = iphy;
1246 iphy->sas_phy.enabled = 1;
1247 iphy->sas_phy.class = SAS;
1248 iphy->sas_phy.iproto = SAS_PROTOCOL_ALL;
1249 iphy->sas_phy.tproto = 0;
1250 iphy->sas_phy.type = PHY_TYPE_PHYSICAL;
1251 iphy->sas_phy.role = PHY_ROLE_INITIATOR;
1252 iphy->sas_phy.oob_mode = OOB_NOT_CONNECTED;
1253 iphy->sas_phy.linkrate = SAS_LINK_RATE_UNKNOWN;
1254 memset(&iphy->frame_rcvd, 0, sizeof(iphy->frame_rcvd));
1255}
1256
1257
1258/**
1259 * isci_phy_control() - This function is one of the SAS Domain Template
1260 * functions. This is a phy management function.
1261 * @phy: This parameter specifies the sphy being controlled.
1262 * @func: This parameter specifies the phy control function being invoked.
1263 * @buf: This parameter is specific to the phy function being invoked.
1264 *
1265 * status, zero indicates success.
1266 */
1267int isci_phy_control(struct asd_sas_phy *sas_phy,
1268 enum phy_func func,
1269 void *buf)
1270{
1271 int ret = 0;
1272 struct isci_phy *iphy = sas_phy->lldd_phy;
1273 struct isci_port *iport = iphy->isci_port;
1274 struct isci_host *ihost = sas_phy->ha->lldd_ha;
1275 unsigned long flags;
1276
1277 dev_dbg(&ihost->pdev->dev,
1278 "%s: phy %p; func %d; buf %p; isci phy %p, port %p\n",
1279 __func__, sas_phy, func, buf, iphy, iport);
1280
1281 switch (func) {
1282 case PHY_FUNC_DISABLE:
1283 spin_lock_irqsave(&ihost->scic_lock, flags);
1284 sci_phy_stop(iphy);
1285 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1286 break;
1287
1288 case PHY_FUNC_LINK_RESET:
1289 spin_lock_irqsave(&ihost->scic_lock, flags);
1290 sci_phy_stop(iphy);
1291 sci_phy_start(iphy);
1292 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1293 break;
1294
1295 case PHY_FUNC_HARD_RESET:
1296 if (!iport)
1297 return -ENODEV;
1298
1299 /* Perform the port reset. */
1300 ret = isci_port_perform_hard_reset(ihost, iport, iphy);
1301
1302 break;
1303
1304 default:
1305 dev_dbg(&ihost->pdev->dev,
1306 "%s: phy %p; func %d NOT IMPLEMENTED!\n",
1307 __func__, sas_phy, func);
1308 ret = -ENOSYS;
1309 break;
1310 }
1311 return ret;
1312}
diff --git a/drivers/scsi/isci/phy.h b/drivers/scsi/isci/phy.h
new file mode 100644
index 000000000000..67699c8e321c
--- /dev/null
+++ b/drivers/scsi/isci/phy.h
@@ -0,0 +1,504 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55#ifndef _ISCI_PHY_H_
56#define _ISCI_PHY_H_
57
58#include <scsi/sas.h>
59#include <scsi/libsas.h>
60#include "isci.h"
61#include "sas.h"
62
63/* This is the timeout value for the SATA phy to wait for a SIGNATURE FIS
64 * before restarting the starting state machine. Technically, the old parallel
65 * ATA specification required up to 30 seconds for a device to issue its
66 * signature FIS as a result of a soft reset. Now we see that devices respond
67 * generally within 15 seconds, but we'll use 25 for now.
68 */
69#define SCIC_SDS_SIGNATURE_FIS_TIMEOUT 25000
70
71/* This is the timeout for the SATA OOB/SN because the hardware does not
72 * recognize a hot plug after OOB signal but before the SN signals. We need to
73 * make sure after a hotplug timeout if we have not received the speed event
74 * notification from the hardware that we restart the hardware OOB state
75 * machine.
76 */
77#define SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT 250
78
79enum sci_phy_protocol {
80 SCIC_SDS_PHY_PROTOCOL_UNKNOWN,
81 SCIC_SDS_PHY_PROTOCOL_SAS,
82 SCIC_SDS_PHY_PROTOCOL_SATA,
83 SCIC_SDS_MAX_PHY_PROTOCOLS
84};
85
86/**
87 * isci_phy - hba local phy infrastructure
88 * @sm:
89 * @protocol: attached device protocol
90 * @phy_index: physical index relative to the controller (0-3)
91 * @bcn_received_while_port_unassigned: bcn to report after port association
92 * @sata_timer: timeout SATA signature FIS arrival
93 */
94struct isci_phy {
95 struct sci_base_state_machine sm;
96 struct isci_port *owning_port;
97 enum sas_linkrate max_negotiated_speed;
98 enum sci_phy_protocol protocol;
99 u8 phy_index;
100 bool bcn_received_while_port_unassigned;
101 bool is_in_link_training;
102 struct sci_timer sata_timer;
103 struct scu_transport_layer_registers __iomem *transport_layer_registers;
104 struct scu_link_layer_registers __iomem *link_layer_registers;
105 struct asd_sas_phy sas_phy;
106 struct isci_port *isci_port;
107 u8 sas_addr[SAS_ADDR_SIZE];
108 union {
109 struct sas_identify_frame iaf;
110 struct dev_to_host_fis fis;
111 } frame_rcvd;
112};
113
114static inline struct isci_phy *to_iphy(struct asd_sas_phy *sas_phy)
115{
116 struct isci_phy *iphy = container_of(sas_phy, typeof(*iphy), sas_phy);
117
118 return iphy;
119}
120
121struct sci_phy_cap {
122 union {
123 struct {
124 /*
125 * The SAS specification indicates the start bit shall
126 * always be set to
127 * 1. This implementation will have the start bit set
128 * to 0 if the PHY CAPABILITIES were either not
129 * received or speed negotiation failed.
130 */
131 u8 start:1;
132 u8 tx_ssc_type:1;
133 u8 res1:2;
134 u8 req_logical_linkrate:4;
135
136 u32 gen1_no_ssc:1;
137 u32 gen1_ssc:1;
138 u32 gen2_no_ssc:1;
139 u32 gen2_ssc:1;
140 u32 gen3_no_ssc:1;
141 u32 gen3_ssc:1;
142 u32 res2:17;
143 u32 parity:1;
144 };
145 u32 all;
146 };
147} __packed;
148
149/* this data structure reflects the link layer transmit identification reg */
150struct sci_phy_proto {
151 union {
152 struct {
153 u16 _r_a:1;
154 u16 smp_iport:1;
155 u16 stp_iport:1;
156 u16 ssp_iport:1;
157 u16 _r_b:4;
158 u16 _r_c:1;
159 u16 smp_tport:1;
160 u16 stp_tport:1;
161 u16 ssp_tport:1;
162 u16 _r_d:4;
163 };
164 u16 all;
165 };
166} __packed;
167
168
169/**
170 * struct sci_phy_properties - This structure defines the properties common to
171 * all phys that can be retrieved.
172 *
173 *
174 */
175struct sci_phy_properties {
176 /**
177 * This field specifies the port that currently contains the
178 * supplied phy. This field may be set to NULL
179 * if the phy is not currently contained in a port.
180 */
181 struct isci_port *iport;
182
183 /**
184 * This field specifies the link rate at which the phy is
185 * currently operating.
186 */
187 enum sas_linkrate negotiated_link_rate;
188
189 /**
190 * This field specifies the index of the phy in relation to other
191 * phys within the controller. This index is zero relative.
192 */
193 u8 index;
194};
195
196/**
197 * struct sci_sas_phy_properties - This structure defines the properties,
198 * specific to a SAS phy, that can be retrieved.
199 *
200 *
201 */
202struct sci_sas_phy_properties {
203 /**
204 * This field delineates the Identify Address Frame received
205 * from the remote end point.
206 */
207 struct sas_identify_frame rcvd_iaf;
208
209 /**
210 * This field delineates the Phy capabilities structure received
211 * from the remote end point.
212 */
213 struct sci_phy_cap rcvd_cap;
214
215};
216
217/**
218 * struct sci_sata_phy_properties - This structure defines the properties,
219 * specific to a SATA phy, that can be retrieved.
220 *
221 *
222 */
223struct sci_sata_phy_properties {
224 /**
225 * This field delineates the signature FIS received from the
226 * attached target.
227 */
228 struct dev_to_host_fis signature_fis;
229
230 /**
231 * This field specifies to the user if a port selector is connected
232 * on the specified phy.
233 */
234 bool is_port_selector_present;
235
236};
237
238/**
239 * enum sci_phy_counter_id - This enumeration depicts the various pieces of
240 * optional information that can be retrieved for a specific phy.
241 *
242 *
243 */
244enum sci_phy_counter_id {
245 /**
246 * This PHY information field tracks the number of frames received.
247 */
248 SCIC_PHY_COUNTER_RECEIVED_FRAME,
249
250 /**
251 * This PHY information field tracks the number of frames transmitted.
252 */
253 SCIC_PHY_COUNTER_TRANSMITTED_FRAME,
254
255 /**
256 * This PHY information field tracks the number of DWORDs received.
257 */
258 SCIC_PHY_COUNTER_RECEIVED_FRAME_WORD,
259
260 /**
261 * This PHY information field tracks the number of DWORDs transmitted.
262 */
263 SCIC_PHY_COUNTER_TRANSMITTED_FRAME_DWORD,
264
265 /**
266 * This PHY information field tracks the number of times DWORD
267 * synchronization was lost.
268 */
269 SCIC_PHY_COUNTER_LOSS_OF_SYNC_ERROR,
270
271 /**
272 * This PHY information field tracks the number of received DWORDs with
273 * running disparity errors.
274 */
275 SCIC_PHY_COUNTER_RECEIVED_DISPARITY_ERROR,
276
277 /**
278 * This PHY information field tracks the number of received frames with a
279 * CRC error (not including short or truncated frames).
280 */
281 SCIC_PHY_COUNTER_RECEIVED_FRAME_CRC_ERROR,
282
283 /**
284 * This PHY information field tracks the number of DONE (ACK/NAK TIMEOUT)
285 * primitives received.
286 */
287 SCIC_PHY_COUNTER_RECEIVED_DONE_ACK_NAK_TIMEOUT,
288
289 /**
290 * This PHY information field tracks the number of DONE (ACK/NAK TIMEOUT)
291 * primitives transmitted.
292 */
293 SCIC_PHY_COUNTER_TRANSMITTED_DONE_ACK_NAK_TIMEOUT,
294
295 /**
296 * This PHY information field tracks the number of times the inactivity
297 * timer for connections on the phy has been utilized.
298 */
299 SCIC_PHY_COUNTER_INACTIVITY_TIMER_EXPIRED,
300
301 /**
302 * This PHY information field tracks the number of DONE (CREDIT TIMEOUT)
303 * primitives received.
304 */
305 SCIC_PHY_COUNTER_RECEIVED_DONE_CREDIT_TIMEOUT,
306
307 /**
308 * This PHY information field tracks the number of DONE (CREDIT TIMEOUT)
309 * primitives transmitted.
310 */
311 SCIC_PHY_COUNTER_TRANSMITTED_DONE_CREDIT_TIMEOUT,
312
313 /**
314 * This PHY information field tracks the number of CREDIT BLOCKED
315 * primitives received.
316 * @note Depending on remote device implementation, credit blocks
317 * may occur regularly.
318 */
319 SCIC_PHY_COUNTER_RECEIVED_CREDIT_BLOCKED,
320
321 /**
322 * This PHY information field contains the number of short frames
323 * received. A short frame is simply a frame smaller then what is
324 * allowed by either the SAS or SATA specification.
325 */
326 SCIC_PHY_COUNTER_RECEIVED_SHORT_FRAME,
327
328 /**
329 * This PHY information field contains the number of frames received after
330 * credit has been exhausted.
331 */
332 SCIC_PHY_COUNTER_RECEIVED_FRAME_WITHOUT_CREDIT,
333
334 /**
335 * This PHY information field contains the number of frames received after
336 * a DONE has been received.
337 */
338 SCIC_PHY_COUNTER_RECEIVED_FRAME_AFTER_DONE,
339
340 /**
341 * This PHY information field contains the number of times the phy
342 * failed to achieve DWORD synchronization during speed negotiation.
343 */
344 SCIC_PHY_COUNTER_SN_DWORD_SYNC_ERROR
345};
346
347enum sci_phy_states {
348 /**
349 * Simply the initial state for the base domain state machine.
350 */
351 SCI_PHY_INITIAL,
352
353 /**
354 * This state indicates that the phy has successfully been stopped.
355 * In this state no new IO operations are permitted on this phy.
356 * This state is entered from the INITIAL state.
357 * This state is entered from the STARTING state.
358 * This state is entered from the READY state.
359 * This state is entered from the RESETTING state.
360 */
361 SCI_PHY_STOPPED,
362
363 /**
364 * This state indicates that the phy is in the process of becomming
365 * ready. In this state no new IO operations are permitted on this phy.
366 * This state is entered from the STOPPED state.
367 * This state is entered from the READY state.
368 * This state is entered from the RESETTING state.
369 */
370 SCI_PHY_STARTING,
371
372 /**
373 * Initial state
374 */
375 SCI_PHY_SUB_INITIAL,
376
377 /**
378 * Wait state for the hardware OSSP event type notification
379 */
380 SCI_PHY_SUB_AWAIT_OSSP_EN,
381
382 /**
383 * Wait state for the PHY speed notification
384 */
385 SCI_PHY_SUB_AWAIT_SAS_SPEED_EN,
386
387 /**
388 * Wait state for the IAF Unsolicited frame notification
389 */
390 SCI_PHY_SUB_AWAIT_IAF_UF,
391
392 /**
393 * Wait state for the request to consume power
394 */
395 SCI_PHY_SUB_AWAIT_SAS_POWER,
396
397 /**
398 * Wait state for request to consume power
399 */
400 SCI_PHY_SUB_AWAIT_SATA_POWER,
401
402 /**
403 * Wait state for the SATA PHY notification
404 */
405 SCI_PHY_SUB_AWAIT_SATA_PHY_EN,
406
407 /**
408 * Wait for the SATA PHY speed notification
409 */
410 SCI_PHY_SUB_AWAIT_SATA_SPEED_EN,
411
412 /**
413 * Wait state for the SIGNATURE FIS unsolicited frame notification
414 */
415 SCI_PHY_SUB_AWAIT_SIG_FIS_UF,
416
417 /**
418 * Exit state for this state machine
419 */
420 SCI_PHY_SUB_FINAL,
421
422 /**
423 * This state indicates the the phy is now ready. Thus, the user
424 * is able to perform IO operations utilizing this phy as long as it
425 * is currently part of a valid port.
426 * This state is entered from the STARTING state.
427 */
428 SCI_PHY_READY,
429
430 /**
431 * This state indicates that the phy is in the process of being reset.
432 * In this state no new IO operations are permitted on this phy.
433 * This state is entered from the READY state.
434 */
435 SCI_PHY_RESETTING,
436
437 /**
438 * Simply the final state for the base phy state machine.
439 */
440 SCI_PHY_FINAL,
441};
442
443void sci_phy_construct(
444 struct isci_phy *iphy,
445 struct isci_port *iport,
446 u8 phy_index);
447
448struct isci_port *phy_get_non_dummy_port(struct isci_phy *iphy);
449
450void sci_phy_set_port(
451 struct isci_phy *iphy,
452 struct isci_port *iport);
453
454enum sci_status sci_phy_initialize(
455 struct isci_phy *iphy,
456 struct scu_transport_layer_registers __iomem *transport_layer_registers,
457 struct scu_link_layer_registers __iomem *link_layer_registers);
458
459enum sci_status sci_phy_start(
460 struct isci_phy *iphy);
461
462enum sci_status sci_phy_stop(
463 struct isci_phy *iphy);
464
465enum sci_status sci_phy_reset(
466 struct isci_phy *iphy);
467
468void sci_phy_resume(
469 struct isci_phy *iphy);
470
471void sci_phy_setup_transport(
472 struct isci_phy *iphy,
473 u32 device_id);
474
475enum sci_status sci_phy_event_handler(
476 struct isci_phy *iphy,
477 u32 event_code);
478
479enum sci_status sci_phy_frame_handler(
480 struct isci_phy *iphy,
481 u32 frame_index);
482
483enum sci_status sci_phy_consume_power_handler(
484 struct isci_phy *iphy);
485
486void sci_phy_get_sas_address(
487 struct isci_phy *iphy,
488 struct sci_sas_address *sas_address);
489
490void sci_phy_get_attached_sas_address(
491 struct isci_phy *iphy,
492 struct sci_sas_address *sas_address);
493
494struct sci_phy_proto;
495void sci_phy_get_protocols(
496 struct isci_phy *iphy,
497 struct sci_phy_proto *protocols);
498enum sas_linkrate sci_phy_linkrate(struct isci_phy *iphy);
499
500struct isci_host;
501void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index);
502int isci_phy_control(struct asd_sas_phy *phy, enum phy_func func, void *buf);
503
504#endif /* !defined(_ISCI_PHY_H_) */
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c
new file mode 100644
index 000000000000..8f6f9b77e41a
--- /dev/null
+++ b/drivers/scsi/isci/port.c
@@ -0,0 +1,1757 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#include "isci.h"
57#include "port.h"
58#include "request.h"
59
60#define SCIC_SDS_PORT_HARD_RESET_TIMEOUT (1000)
61#define SCU_DUMMY_INDEX (0xFFFF)
62
63static void isci_port_change_state(struct isci_port *iport, enum isci_status status)
64{
65 unsigned long flags;
66
67 dev_dbg(&iport->isci_host->pdev->dev,
68 "%s: iport = %p, state = 0x%x\n",
69 __func__, iport, status);
70
71 /* XXX pointless lock */
72 spin_lock_irqsave(&iport->state_lock, flags);
73 iport->status = status;
74 spin_unlock_irqrestore(&iport->state_lock, flags);
75}
76
77static void sci_port_get_protocols(struct isci_port *iport, struct sci_phy_proto *proto)
78{
79 u8 index;
80
81 proto->all = 0;
82 for (index = 0; index < SCI_MAX_PHYS; index++) {
83 struct isci_phy *iphy = iport->phy_table[index];
84
85 if (!iphy)
86 continue;
87 sci_phy_get_protocols(iphy, proto);
88 }
89}
90
91static u32 sci_port_get_phys(struct isci_port *iport)
92{
93 u32 index;
94 u32 mask;
95
96 mask = 0;
97 for (index = 0; index < SCI_MAX_PHYS; index++)
98 if (iport->phy_table[index])
99 mask |= (1 << index);
100
101 return mask;
102}
103
104/**
105 * sci_port_get_properties() - This method simply returns the properties
106 * regarding the port, such as: physical index, protocols, sas address, etc.
107 * @port: this parameter specifies the port for which to retrieve the physical
108 * index.
109 * @properties: This parameter specifies the properties structure into which to
110 * copy the requested information.
111 *
112 * Indicate if the user specified a valid port. SCI_SUCCESS This value is
113 * returned if the specified port was valid. SCI_FAILURE_INVALID_PORT This
114 * value is returned if the specified port is not valid. When this value is
115 * returned, no data is copied to the properties output parameter.
116 */
117static enum sci_status sci_port_get_properties(struct isci_port *iport,
118 struct sci_port_properties *prop)
119{
120 if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT)
121 return SCI_FAILURE_INVALID_PORT;
122
123 prop->index = iport->logical_port_index;
124 prop->phy_mask = sci_port_get_phys(iport);
125 sci_port_get_sas_address(iport, &prop->local.sas_address);
126 sci_port_get_protocols(iport, &prop->local.protocols);
127 sci_port_get_attached_sas_address(iport, &prop->remote.sas_address);
128
129 return SCI_SUCCESS;
130}
131
132static void sci_port_bcn_enable(struct isci_port *iport)
133{
134 struct isci_phy *iphy;
135 u32 val;
136 int i;
137
138 for (i = 0; i < ARRAY_SIZE(iport->phy_table); i++) {
139 iphy = iport->phy_table[i];
140 if (!iphy)
141 continue;
142 val = readl(&iphy->link_layer_registers->link_layer_control);
143 /* clear the bit by writing 1. */
144 writel(val, &iphy->link_layer_registers->link_layer_control);
145 }
146}
147
148/* called under sci_lock to stabilize phy:port associations */
149void isci_port_bcn_enable(struct isci_host *ihost, struct isci_port *iport)
150{
151 int i;
152
153 clear_bit(IPORT_BCN_BLOCKED, &iport->flags);
154 wake_up(&ihost->eventq);
155
156 if (!test_and_clear_bit(IPORT_BCN_PENDING, &iport->flags))
157 return;
158
159 for (i = 0; i < ARRAY_SIZE(iport->phy_table); i++) {
160 struct isci_phy *iphy = iport->phy_table[i];
161
162 if (!iphy)
163 continue;
164
165 ihost->sas_ha.notify_port_event(&iphy->sas_phy,
166 PORTE_BROADCAST_RCVD);
167 break;
168 }
169}
170
171static void isci_port_bc_change_received(struct isci_host *ihost,
172 struct isci_port *iport,
173 struct isci_phy *iphy)
174{
175 if (iport && test_bit(IPORT_BCN_BLOCKED, &iport->flags)) {
176 dev_dbg(&ihost->pdev->dev,
177 "%s: disabled BCN; isci_phy = %p, sas_phy = %p\n",
178 __func__, iphy, &iphy->sas_phy);
179 set_bit(IPORT_BCN_PENDING, &iport->flags);
180 atomic_inc(&iport->event);
181 wake_up(&ihost->eventq);
182 } else {
183 dev_dbg(&ihost->pdev->dev,
184 "%s: isci_phy = %p, sas_phy = %p\n",
185 __func__, iphy, &iphy->sas_phy);
186
187 ihost->sas_ha.notify_port_event(&iphy->sas_phy,
188 PORTE_BROADCAST_RCVD);
189 }
190 sci_port_bcn_enable(iport);
191}
192
193static void isci_port_link_up(struct isci_host *isci_host,
194 struct isci_port *iport,
195 struct isci_phy *iphy)
196{
197 unsigned long flags;
198 struct sci_port_properties properties;
199 unsigned long success = true;
200
201 BUG_ON(iphy->isci_port != NULL);
202
203 iphy->isci_port = iport;
204
205 dev_dbg(&isci_host->pdev->dev,
206 "%s: isci_port = %p\n",
207 __func__, iport);
208
209 spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
210
211 isci_port_change_state(iphy->isci_port, isci_starting);
212
213 sci_port_get_properties(iport, &properties);
214
215 if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) {
216 u64 attached_sas_address;
217
218 iphy->sas_phy.oob_mode = SATA_OOB_MODE;
219 iphy->sas_phy.frame_rcvd_size = sizeof(struct dev_to_host_fis);
220
221 /*
222 * For direct-attached SATA devices, the SCI core will
223 * automagically assign a SAS address to the end device
224 * for the purpose of creating a port. This SAS address
225 * will not be the same as assigned to the PHY and needs
226 * to be obtained from struct sci_port_properties properties.
227 */
228 attached_sas_address = properties.remote.sas_address.high;
229 attached_sas_address <<= 32;
230 attached_sas_address |= properties.remote.sas_address.low;
231 swab64s(&attached_sas_address);
232
233 memcpy(&iphy->sas_phy.attached_sas_addr,
234 &attached_sas_address, sizeof(attached_sas_address));
235 } else if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) {
236 iphy->sas_phy.oob_mode = SAS_OOB_MODE;
237 iphy->sas_phy.frame_rcvd_size = sizeof(struct sas_identify_frame);
238
239 /* Copy the attached SAS address from the IAF */
240 memcpy(iphy->sas_phy.attached_sas_addr,
241 iphy->frame_rcvd.iaf.sas_addr, SAS_ADDR_SIZE);
242 } else {
243 dev_err(&isci_host->pdev->dev, "%s: unkown target\n", __func__);
244 success = false;
245 }
246
247 iphy->sas_phy.phy->negotiated_linkrate = sci_phy_linkrate(iphy);
248
249 spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags);
250
251 /* Notify libsas that we have an address frame, if indeed
252 * we've found an SSP, SMP, or STP target */
253 if (success)
254 isci_host->sas_ha.notify_port_event(&iphy->sas_phy,
255 PORTE_BYTES_DMAED);
256}
257
258
259/**
260 * isci_port_link_down() - This function is called by the sci core when a link
261 * becomes inactive.
262 * @isci_host: This parameter specifies the isci host object.
263 * @phy: This parameter specifies the isci phy with the active link.
264 * @port: This parameter specifies the isci port with the active link.
265 *
266 */
267static void isci_port_link_down(struct isci_host *isci_host,
268 struct isci_phy *isci_phy,
269 struct isci_port *isci_port)
270{
271 struct isci_remote_device *isci_device;
272
273 dev_dbg(&isci_host->pdev->dev,
274 "%s: isci_port = %p\n", __func__, isci_port);
275
276 if (isci_port) {
277
278 /* check to see if this is the last phy on this port. */
279 if (isci_phy->sas_phy.port &&
280 isci_phy->sas_phy.port->num_phys == 1) {
281 atomic_inc(&isci_port->event);
282 isci_port_bcn_enable(isci_host, isci_port);
283
284 /* change the state for all devices on this port. The
285 * next task sent to this device will be returned as
286 * SAS_TASK_UNDELIVERED, and the scsi mid layer will
287 * remove the target
288 */
289 list_for_each_entry(isci_device,
290 &isci_port->remote_dev_list,
291 node) {
292 dev_dbg(&isci_host->pdev->dev,
293 "%s: isci_device = %p\n",
294 __func__, isci_device);
295 set_bit(IDEV_GONE, &isci_device->flags);
296 }
297 }
298 isci_port_change_state(isci_port, isci_stopping);
299 }
300
301 /* Notify libsas of the borken link, this will trigger calls to our
302 * isci_port_deformed and isci_dev_gone functions.
303 */
304 sas_phy_disconnected(&isci_phy->sas_phy);
305 isci_host->sas_ha.notify_phy_event(&isci_phy->sas_phy,
306 PHYE_LOSS_OF_SIGNAL);
307
308 isci_phy->isci_port = NULL;
309
310 dev_dbg(&isci_host->pdev->dev,
311 "%s: isci_port = %p - Done\n", __func__, isci_port);
312}
313
314
315/**
316 * isci_port_ready() - This function is called by the sci core when a link
317 * becomes ready.
318 * @isci_host: This parameter specifies the isci host object.
319 * @port: This parameter specifies the sci port with the active link.
320 *
321 */
322static void isci_port_ready(struct isci_host *isci_host, struct isci_port *isci_port)
323{
324 dev_dbg(&isci_host->pdev->dev,
325 "%s: isci_port = %p\n", __func__, isci_port);
326
327 complete_all(&isci_port->start_complete);
328 isci_port_change_state(isci_port, isci_ready);
329 return;
330}
331
332/**
333 * isci_port_not_ready() - This function is called by the sci core when a link
334 * is not ready. All remote devices on this link will be removed if they are
335 * in the stopping state.
336 * @isci_host: This parameter specifies the isci host object.
337 * @port: This parameter specifies the sci port with the active link.
338 *
339 */
340static void isci_port_not_ready(struct isci_host *isci_host, struct isci_port *isci_port)
341{
342 dev_dbg(&isci_host->pdev->dev,
343 "%s: isci_port = %p\n", __func__, isci_port);
344}
345
346static void isci_port_stop_complete(struct isci_host *ihost,
347 struct isci_port *iport,
348 enum sci_status completion_status)
349{
350 dev_dbg(&ihost->pdev->dev, "Port stop complete\n");
351}
352
353/**
354 * isci_port_hard_reset_complete() - This function is called by the sci core
355 * when the hard reset complete notification has been received.
356 * @port: This parameter specifies the sci port with the active link.
357 * @completion_status: This parameter specifies the core status for the reset
358 * process.
359 *
360 */
361static void isci_port_hard_reset_complete(struct isci_port *isci_port,
362 enum sci_status completion_status)
363{
364 dev_dbg(&isci_port->isci_host->pdev->dev,
365 "%s: isci_port = %p, completion_status=%x\n",
366 __func__, isci_port, completion_status);
367
368 /* Save the status of the hard reset from the port. */
369 isci_port->hard_reset_status = completion_status;
370
371 complete_all(&isci_port->hard_reset_complete);
372}
373
374/* This method will return a true value if the specified phy can be assigned to
375 * this port The following is a list of phys for each port that are allowed: -
376 * Port 0 - 3 2 1 0 - Port 1 - 1 - Port 2 - 3 2 - Port 3 - 3 This method
377 * doesn't preclude all configurations. It merely ensures that a phy is part
378 * of the allowable set of phy identifiers for that port. For example, one
379 * could assign phy 3 to port 0 and no other phys. Please refer to
380 * sci_port_is_phy_mask_valid() for information regarding whether the
381 * phy_mask for a port can be supported. bool true if this is a valid phy
382 * assignment for the port false if this is not a valid phy assignment for the
383 * port
384 */
385bool sci_port_is_valid_phy_assignment(struct isci_port *iport, u32 phy_index)
386{
387 struct isci_host *ihost = iport->owning_controller;
388 struct sci_user_parameters *user = &ihost->user_parameters;
389
390 /* Initialize to invalid value. */
391 u32 existing_phy_index = SCI_MAX_PHYS;
392 u32 index;
393
394 if ((iport->physical_port_index == 1) && (phy_index != 1))
395 return false;
396
397 if (iport->physical_port_index == 3 && phy_index != 3)
398 return false;
399
400 if (iport->physical_port_index == 2 &&
401 (phy_index == 0 || phy_index == 1))
402 return false;
403
404 for (index = 0; index < SCI_MAX_PHYS; index++)
405 if (iport->phy_table[index] && index != phy_index)
406 existing_phy_index = index;
407
408 /* Ensure that all of the phys in the port are capable of
409 * operating at the same maximum link rate.
410 */
411 if (existing_phy_index < SCI_MAX_PHYS &&
412 user->phys[phy_index].max_speed_generation !=
413 user->phys[existing_phy_index].max_speed_generation)
414 return false;
415
416 return true;
417}
418
419/**
420 *
421 * @sci_port: This is the port object for which to determine if the phy mask
422 * can be supported.
423 *
424 * This method will return a true value if the port's phy mask can be supported
425 * by the SCU. The following is a list of valid PHY mask configurations for
426 * each port: - Port 0 - [[3 2] 1] 0 - Port 1 - [1] - Port 2 - [[3] 2]
427 * - Port 3 - [3] This method returns a boolean indication specifying if the
428 * phy mask can be supported. true if this is a valid phy assignment for the
429 * port false if this is not a valid phy assignment for the port
430 */
431static bool sci_port_is_phy_mask_valid(
432 struct isci_port *iport,
433 u32 phy_mask)
434{
435 if (iport->physical_port_index == 0) {
436 if (((phy_mask & 0x0F) == 0x0F)
437 || ((phy_mask & 0x03) == 0x03)
438 || ((phy_mask & 0x01) == 0x01)
439 || (phy_mask == 0))
440 return true;
441 } else if (iport->physical_port_index == 1) {
442 if (((phy_mask & 0x02) == 0x02)
443 || (phy_mask == 0))
444 return true;
445 } else if (iport->physical_port_index == 2) {
446 if (((phy_mask & 0x0C) == 0x0C)
447 || ((phy_mask & 0x04) == 0x04)
448 || (phy_mask == 0))
449 return true;
450 } else if (iport->physical_port_index == 3) {
451 if (((phy_mask & 0x08) == 0x08)
452 || (phy_mask == 0))
453 return true;
454 }
455
456 return false;
457}
458
459/*
460 * This method retrieves a currently active (i.e. connected) phy contained in
461 * the port. Currently, the lowest order phy that is connected is returned.
462 * This method returns a pointer to a SCIS_SDS_PHY object. NULL This value is
463 * returned if there are no currently active (i.e. connected to a remote end
464 * point) phys contained in the port. All other values specify a struct sci_phy
465 * object that is active in the port.
466 */
467static struct isci_phy *sci_port_get_a_connected_phy(struct isci_port *iport)
468{
469 u32 index;
470 struct isci_phy *iphy;
471
472 for (index = 0; index < SCI_MAX_PHYS; index++) {
473 /* Ensure that the phy is both part of the port and currently
474 * connected to the remote end-point.
475 */
476 iphy = iport->phy_table[index];
477 if (iphy && sci_port_active_phy(iport, iphy))
478 return iphy;
479 }
480
481 return NULL;
482}
483
484static enum sci_status sci_port_set_phy(struct isci_port *iport, struct isci_phy *iphy)
485{
486 /* Check to see if we can add this phy to a port
487 * that means that the phy is not part of a port and that the port does
488 * not already have a phy assinged to the phy index.
489 */
490 if (!iport->phy_table[iphy->phy_index] &&
491 !phy_get_non_dummy_port(iphy) &&
492 sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) {
493 /* Phy is being added in the stopped state so we are in MPC mode
494 * make logical port index = physical port index
495 */
496 iport->logical_port_index = iport->physical_port_index;
497 iport->phy_table[iphy->phy_index] = iphy;
498 sci_phy_set_port(iphy, iport);
499
500 return SCI_SUCCESS;
501 }
502
503 return SCI_FAILURE;
504}
505
506static enum sci_status sci_port_clear_phy(struct isci_port *iport, struct isci_phy *iphy)
507{
508 /* Make sure that this phy is part of this port */
509 if (iport->phy_table[iphy->phy_index] == iphy &&
510 phy_get_non_dummy_port(iphy) == iport) {
511 struct isci_host *ihost = iport->owning_controller;
512
513 /* Yep it is assigned to this port so remove it */
514 sci_phy_set_port(iphy, &ihost->ports[SCI_MAX_PORTS]);
515 iport->phy_table[iphy->phy_index] = NULL;
516 return SCI_SUCCESS;
517 }
518
519 return SCI_FAILURE;
520}
521
522void sci_port_get_sas_address(struct isci_port *iport, struct sci_sas_address *sas)
523{
524 u32 index;
525
526 sas->high = 0;
527 sas->low = 0;
528 for (index = 0; index < SCI_MAX_PHYS; index++)
529 if (iport->phy_table[index])
530 sci_phy_get_sas_address(iport->phy_table[index], sas);
531}
532
533void sci_port_get_attached_sas_address(struct isci_port *iport, struct sci_sas_address *sas)
534{
535 struct isci_phy *iphy;
536
537 /*
538 * Ensure that the phy is both part of the port and currently
539 * connected to the remote end-point.
540 */
541 iphy = sci_port_get_a_connected_phy(iport);
542 if (iphy) {
543 if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA) {
544 sci_phy_get_attached_sas_address(iphy, sas);
545 } else {
546 sci_phy_get_sas_address(iphy, sas);
547 sas->low += iphy->phy_index;
548 }
549 } else {
550 sas->high = 0;
551 sas->low = 0;
552 }
553}
554
555/**
556 * sci_port_construct_dummy_rnc() - create dummy rnc for si workaround
557 *
558 * @sci_port: logical port on which we need to create the remote node context
559 * @rni: remote node index for this remote node context.
560 *
561 * This routine will construct a dummy remote node context data structure
562 * This structure will be posted to the hardware to work around a scheduler
563 * error in the hardware.
564 */
565static void sci_port_construct_dummy_rnc(struct isci_port *iport, u16 rni)
566{
567 union scu_remote_node_context *rnc;
568
569 rnc = &iport->owning_controller->remote_node_context_table[rni];
570
571 memset(rnc, 0, sizeof(union scu_remote_node_context));
572
573 rnc->ssp.remote_sas_address_hi = 0;
574 rnc->ssp.remote_sas_address_lo = 0;
575
576 rnc->ssp.remote_node_index = rni;
577 rnc->ssp.remote_node_port_width = 1;
578 rnc->ssp.logical_port_index = iport->physical_port_index;
579
580 rnc->ssp.nexus_loss_timer_enable = false;
581 rnc->ssp.check_bit = false;
582 rnc->ssp.is_valid = true;
583 rnc->ssp.is_remote_node_context = true;
584 rnc->ssp.function_number = 0;
585 rnc->ssp.arbitration_wait_time = 0;
586}
587
588/*
589 * construct a dummy task context data structure. This
590 * structure will be posted to the hardwre to work around a scheduler error
591 * in the hardware.
592 */
593static void sci_port_construct_dummy_task(struct isci_port *iport, u16 tag)
594{
595 struct isci_host *ihost = iport->owning_controller;
596 struct scu_task_context *task_context;
597
598 task_context = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
599 memset(task_context, 0, sizeof(struct scu_task_context));
600
601 task_context->initiator_request = 1;
602 task_context->connection_rate = 1;
603 task_context->logical_port_index = iport->physical_port_index;
604 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
605 task_context->task_index = ISCI_TAG_TCI(tag);
606 task_context->valid = SCU_TASK_CONTEXT_VALID;
607 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
608 task_context->remote_node_index = iport->reserved_rni;
609 task_context->do_not_dma_ssp_good_response = 1;
610 task_context->task_phase = 0x01;
611}
612
613static void sci_port_destroy_dummy_resources(struct isci_port *iport)
614{
615 struct isci_host *ihost = iport->owning_controller;
616
617 if (iport->reserved_tag != SCI_CONTROLLER_INVALID_IO_TAG)
618 isci_free_tag(ihost, iport->reserved_tag);
619
620 if (iport->reserved_rni != SCU_DUMMY_INDEX)
621 sci_remote_node_table_release_remote_node_index(&ihost->available_remote_nodes,
622 1, iport->reserved_rni);
623
624 iport->reserved_rni = SCU_DUMMY_INDEX;
625 iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
626}
627
628void sci_port_setup_transports(struct isci_port *iport, u32 device_id)
629{
630 u8 index;
631
632 for (index = 0; index < SCI_MAX_PHYS; index++) {
633 if (iport->active_phy_mask & (1 << index))
634 sci_phy_setup_transport(iport->phy_table[index], device_id);
635 }
636}
637
638static void sci_port_activate_phy(struct isci_port *iport, struct isci_phy *iphy,
639 bool do_notify_user)
640{
641 struct isci_host *ihost = iport->owning_controller;
642
643 if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA)
644 sci_phy_resume(iphy);
645
646 iport->active_phy_mask |= 1 << iphy->phy_index;
647
648 sci_controller_clear_invalid_phy(ihost, iphy);
649
650 if (do_notify_user == true)
651 isci_port_link_up(ihost, iport, iphy);
652}
653
654void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy,
655 bool do_notify_user)
656{
657 struct isci_host *ihost = iport->owning_controller;
658
659 iport->active_phy_mask &= ~(1 << iphy->phy_index);
660
661 iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
662
663 /* Re-assign the phy back to the LP as if it were a narrow port */
664 writel(iphy->phy_index,
665 &iport->port_pe_configuration_register[iphy->phy_index]);
666
667 if (do_notify_user == true)
668 isci_port_link_down(ihost, iphy, iport);
669}
670
671static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *iphy)
672{
673 struct isci_host *ihost = iport->owning_controller;
674
675 /*
676 * Check to see if we have alreay reported this link as bad and if
677 * not go ahead and tell the SCI_USER that we have discovered an
678 * invalid link.
679 */
680 if ((ihost->invalid_phy_mask & (1 << iphy->phy_index)) == 0) {
681 ihost->invalid_phy_mask |= 1 << iphy->phy_index;
682 dev_warn(&ihost->pdev->dev, "Invalid link up!\n");
683 }
684}
685
686static bool is_port_ready_state(enum sci_port_states state)
687{
688 switch (state) {
689 case SCI_PORT_READY:
690 case SCI_PORT_SUB_WAITING:
691 case SCI_PORT_SUB_OPERATIONAL:
692 case SCI_PORT_SUB_CONFIGURING:
693 return true;
694 default:
695 return false;
696 }
697}
698
699/* flag dummy rnc hanling when exiting a ready state */
700static void port_state_machine_change(struct isci_port *iport,
701 enum sci_port_states state)
702{
703 struct sci_base_state_machine *sm = &iport->sm;
704 enum sci_port_states old_state = sm->current_state_id;
705
706 if (is_port_ready_state(old_state) && !is_port_ready_state(state))
707 iport->ready_exit = true;
708
709 sci_change_state(sm, state);
710 iport->ready_exit = false;
711}
712
713/**
714 * sci_port_general_link_up_handler - phy can be assigned to port?
715 * @sci_port: sci_port object for which has a phy that has gone link up.
716 * @sci_phy: This is the struct isci_phy object that has gone link up.
717 * @do_notify_user: This parameter specifies whether to inform the user (via
718 * sci_port_link_up()) as to the fact that a new phy as become ready.
719 *
720 * Determine if this phy can be assigned to this
721 * port . If the phy is not a valid PHY for
722 * this port then the function will notify the user. A PHY can only be
723 * part of a port if it's attached SAS ADDRESS is the same as all other PHYs in
724 * the same port. none
725 */
726static void sci_port_general_link_up_handler(struct isci_port *iport,
727 struct isci_phy *iphy,
728 bool do_notify_user)
729{
730 struct sci_sas_address port_sas_address;
731 struct sci_sas_address phy_sas_address;
732
733 sci_port_get_attached_sas_address(iport, &port_sas_address);
734 sci_phy_get_attached_sas_address(iphy, &phy_sas_address);
735
736 /* If the SAS address of the new phy matches the SAS address of
737 * other phys in the port OR this is the first phy in the port,
738 * then activate the phy and allow it to be used for operations
739 * in this port.
740 */
741 if ((phy_sas_address.high == port_sas_address.high &&
742 phy_sas_address.low == port_sas_address.low) ||
743 iport->active_phy_mask == 0) {
744 struct sci_base_state_machine *sm = &iport->sm;
745
746 sci_port_activate_phy(iport, iphy, do_notify_user);
747 if (sm->current_state_id == SCI_PORT_RESETTING)
748 port_state_machine_change(iport, SCI_PORT_READY);
749 } else
750 sci_port_invalid_link_up(iport, iphy);
751}
752
753
754
755/**
756 * This method returns false if the port only has a single phy object assigned.
757 * If there are no phys or more than one phy then the method will return
758 * true.
759 * @sci_port: The port for which the wide port condition is to be checked.
760 *
761 * bool true Is returned if this is a wide ported port. false Is returned if
762 * this is a narrow port.
763 */
764static bool sci_port_is_wide(struct isci_port *iport)
765{
766 u32 index;
767 u32 phy_count = 0;
768
769 for (index = 0; index < SCI_MAX_PHYS; index++) {
770 if (iport->phy_table[index] != NULL) {
771 phy_count++;
772 }
773 }
774
775 return phy_count != 1;
776}
777
778/**
779 * This method is called by the PHY object when the link is detected. if the
780 * port wants the PHY to continue on to the link up state then the port
781 * layer must return true. If the port object returns false the phy object
782 * must halt its attempt to go link up.
783 * @sci_port: The port associated with the phy object.
784 * @sci_phy: The phy object that is trying to go link up.
785 *
786 * true if the phy object can continue to the link up condition. true Is
787 * returned if this phy can continue to the ready state. false Is returned if
788 * can not continue on to the ready state. This notification is in place for
789 * wide ports and direct attached phys. Since there are no wide ported SATA
790 * devices this could become an invalid port configuration.
791 */
792bool sci_port_link_detected(
793 struct isci_port *iport,
794 struct isci_phy *iphy)
795{
796 if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) &&
797 (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) &&
798 sci_port_is_wide(iport)) {
799 sci_port_invalid_link_up(iport, iphy);
800
801 return false;
802 }
803
804 return true;
805}
806
807static void port_timeout(unsigned long data)
808{
809 struct sci_timer *tmr = (struct sci_timer *)data;
810 struct isci_port *iport = container_of(tmr, typeof(*iport), timer);
811 struct isci_host *ihost = iport->owning_controller;
812 unsigned long flags;
813 u32 current_state;
814
815 spin_lock_irqsave(&ihost->scic_lock, flags);
816
817 if (tmr->cancel)
818 goto done;
819
820 current_state = iport->sm.current_state_id;
821
822 if (current_state == SCI_PORT_RESETTING) {
823 /* if the port is still in the resetting state then the timeout
824 * fired before the reset completed.
825 */
826 port_state_machine_change(iport, SCI_PORT_FAILED);
827 } else if (current_state == SCI_PORT_STOPPED) {
828 /* if the port is stopped then the start request failed In this
829 * case stay in the stopped state.
830 */
831 dev_err(sciport_to_dev(iport),
832 "%s: SCIC Port 0x%p failed to stop before tiemout.\n",
833 __func__,
834 iport);
835 } else if (current_state == SCI_PORT_STOPPING) {
836 /* if the port is still stopping then the stop has not completed */
837 isci_port_stop_complete(iport->owning_controller,
838 iport,
839 SCI_FAILURE_TIMEOUT);
840 } else {
841 /* The port is in the ready state and we have a timer
842 * reporting a timeout this should not happen.
843 */
844 dev_err(sciport_to_dev(iport),
845 "%s: SCIC Port 0x%p is processing a timeout operation "
846 "in state %d.\n", __func__, iport, current_state);
847 }
848
849done:
850 spin_unlock_irqrestore(&ihost->scic_lock, flags);
851}
852
853/* --------------------------------------------------------------------------- */
854
855/**
856 * This function updates the hardwares VIIT entry for this port.
857 *
858 *
859 */
860static void sci_port_update_viit_entry(struct isci_port *iport)
861{
862 struct sci_sas_address sas_address;
863
864 sci_port_get_sas_address(iport, &sas_address);
865
866 writel(sas_address.high,
867 &iport->viit_registers->initiator_sas_address_hi);
868 writel(sas_address.low,
869 &iport->viit_registers->initiator_sas_address_lo);
870
871 /* This value get cleared just in case its not already cleared */
872 writel(0, &iport->viit_registers->reserved);
873
874 /* We are required to update the status register last */
875 writel(SCU_VIIT_ENTRY_ID_VIIT |
876 SCU_VIIT_IPPT_INITIATOR |
877 ((1 << iport->physical_port_index) << SCU_VIIT_ENTRY_LPVIE_SHIFT) |
878 SCU_VIIT_STATUS_ALL_VALID,
879 &iport->viit_registers->status);
880}
881
882enum sas_linkrate sci_port_get_max_allowed_speed(struct isci_port *iport)
883{
884 u16 index;
885 struct isci_phy *iphy;
886 enum sas_linkrate max_allowed_speed = SAS_LINK_RATE_6_0_GBPS;
887
888 /*
889 * Loop through all of the phys in this port and find the phy with the
890 * lowest maximum link rate. */
891 for (index = 0; index < SCI_MAX_PHYS; index++) {
892 iphy = iport->phy_table[index];
893 if (iphy && sci_port_active_phy(iport, iphy) &&
894 iphy->max_negotiated_speed < max_allowed_speed)
895 max_allowed_speed = iphy->max_negotiated_speed;
896 }
897
898 return max_allowed_speed;
899}
900
901static void sci_port_suspend_port_task_scheduler(struct isci_port *iport)
902{
903 u32 pts_control_value;
904
905 pts_control_value = readl(&iport->port_task_scheduler_registers->control);
906 pts_control_value |= SCU_PTSxCR_GEN_BIT(SUSPEND);
907 writel(pts_control_value, &iport->port_task_scheduler_registers->control);
908}
909
910/**
911 * sci_port_post_dummy_request() - post dummy/workaround request
912 * @sci_port: port to post task
913 *
914 * Prevent the hardware scheduler from posting new requests to the front
915 * of the scheduler queue causing a starvation problem for currently
916 * ongoing requests.
917 *
918 */
919static void sci_port_post_dummy_request(struct isci_port *iport)
920{
921 struct isci_host *ihost = iport->owning_controller;
922 u16 tag = iport->reserved_tag;
923 struct scu_task_context *tc;
924 u32 command;
925
926 tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
927 tc->abort = 0;
928
929 command = SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
930 iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
931 ISCI_TAG_TCI(tag);
932
933 sci_controller_post_request(ihost, command);
934}
935
936/**
937 * This routine will abort the dummy request. This will alow the hardware to
938 * power down parts of the silicon to save power.
939 *
940 * @sci_port: The port on which the task must be aborted.
941 *
942 */
943static void sci_port_abort_dummy_request(struct isci_port *iport)
944{
945 struct isci_host *ihost = iport->owning_controller;
946 u16 tag = iport->reserved_tag;
947 struct scu_task_context *tc;
948 u32 command;
949
950 tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
951 tc->abort = 1;
952
953 command = SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT |
954 iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
955 ISCI_TAG_TCI(tag);
956
957 sci_controller_post_request(ihost, command);
958}
959
960/**
961 *
962 * @sci_port: This is the struct isci_port object to resume.
963 *
964 * This method will resume the port task scheduler for this port object. none
965 */
966static void
967sci_port_resume_port_task_scheduler(struct isci_port *iport)
968{
969 u32 pts_control_value;
970
971 pts_control_value = readl(&iport->port_task_scheduler_registers->control);
972 pts_control_value &= ~SCU_PTSxCR_GEN_BIT(SUSPEND);
973 writel(pts_control_value, &iport->port_task_scheduler_registers->control);
974}
975
976static void sci_port_ready_substate_waiting_enter(struct sci_base_state_machine *sm)
977{
978 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
979
980 sci_port_suspend_port_task_scheduler(iport);
981
982 iport->not_ready_reason = SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS;
983
984 if (iport->active_phy_mask != 0) {
985 /* At least one of the phys on the port is ready */
986 port_state_machine_change(iport,
987 SCI_PORT_SUB_OPERATIONAL);
988 }
989}
990
991static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm)
992{
993 u32 index;
994 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
995 struct isci_host *ihost = iport->owning_controller;
996
997 isci_port_ready(ihost, iport);
998
999 for (index = 0; index < SCI_MAX_PHYS; index++) {
1000 if (iport->phy_table[index]) {
1001 writel(iport->physical_port_index,
1002 &iport->port_pe_configuration_register[
1003 iport->phy_table[index]->phy_index]);
1004 }
1005 }
1006
1007 sci_port_update_viit_entry(iport);
1008
1009 sci_port_resume_port_task_scheduler(iport);
1010
1011 /*
1012 * Post the dummy task for the port so the hardware can schedule
1013 * io correctly
1014 */
1015 sci_port_post_dummy_request(iport);
1016}
1017
1018static void sci_port_invalidate_dummy_remote_node(struct isci_port *iport)
1019{
1020 struct isci_host *ihost = iport->owning_controller;
1021 u8 phys_index = iport->physical_port_index;
1022 union scu_remote_node_context *rnc;
1023 u16 rni = iport->reserved_rni;
1024 u32 command;
1025
1026 rnc = &ihost->remote_node_context_table[rni];
1027
1028 rnc->ssp.is_valid = false;
1029
1030 /* ensure the preceding tc abort request has reached the
1031 * controller and give it ample time to act before posting the rnc
1032 * invalidate
1033 */
1034 readl(&ihost->smu_registers->interrupt_status); /* flush */
1035 udelay(10);
1036
1037 command = SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE |
1038 phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
1039
1040 sci_controller_post_request(ihost, command);
1041}
1042
1043/**
1044 *
1045 * @object: This is the object which is cast to a struct isci_port object.
1046 *
1047 * This method will perform the actions required by the struct isci_port on
1048 * exiting the SCI_PORT_SUB_OPERATIONAL. This function reports
1049 * the port not ready and suspends the port task scheduler. none
1050 */
1051static void sci_port_ready_substate_operational_exit(struct sci_base_state_machine *sm)
1052{
1053 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1054 struct isci_host *ihost = iport->owning_controller;
1055
1056 /*
1057 * Kill the dummy task for this port if it has not yet posted
1058 * the hardware will treat this as a NOP and just return abort
1059 * complete.
1060 */
1061 sci_port_abort_dummy_request(iport);
1062
1063 isci_port_not_ready(ihost, iport);
1064
1065 if (iport->ready_exit)
1066 sci_port_invalidate_dummy_remote_node(iport);
1067}
1068
1069static void sci_port_ready_substate_configuring_enter(struct sci_base_state_machine *sm)
1070{
1071 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1072 struct isci_host *ihost = iport->owning_controller;
1073
1074 if (iport->active_phy_mask == 0) {
1075 isci_port_not_ready(ihost, iport);
1076
1077 port_state_machine_change(iport,
1078 SCI_PORT_SUB_WAITING);
1079 } else if (iport->started_request_count == 0)
1080 port_state_machine_change(iport,
1081 SCI_PORT_SUB_OPERATIONAL);
1082}
1083
1084static void sci_port_ready_substate_configuring_exit(struct sci_base_state_machine *sm)
1085{
1086 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1087
1088 sci_port_suspend_port_task_scheduler(iport);
1089 if (iport->ready_exit)
1090 sci_port_invalidate_dummy_remote_node(iport);
1091}
1092
1093enum sci_status sci_port_start(struct isci_port *iport)
1094{
1095 struct isci_host *ihost = iport->owning_controller;
1096 enum sci_status status = SCI_SUCCESS;
1097 enum sci_port_states state;
1098 u32 phy_mask;
1099
1100 state = iport->sm.current_state_id;
1101 if (state != SCI_PORT_STOPPED) {
1102 dev_warn(sciport_to_dev(iport),
1103 "%s: in wrong state: %d\n", __func__, state);
1104 return SCI_FAILURE_INVALID_STATE;
1105 }
1106
1107 if (iport->assigned_device_count > 0) {
1108 /* TODO This is a start failure operation because
1109 * there are still devices assigned to this port.
1110 * There must be no devices assigned to a port on a
1111 * start operation.
1112 */
1113 return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
1114 }
1115
1116 if (iport->reserved_rni == SCU_DUMMY_INDEX) {
1117 u16 rni = sci_remote_node_table_allocate_remote_node(
1118 &ihost->available_remote_nodes, 1);
1119
1120 if (rni != SCU_DUMMY_INDEX)
1121 sci_port_construct_dummy_rnc(iport, rni);
1122 else
1123 status = SCI_FAILURE_INSUFFICIENT_RESOURCES;
1124 iport->reserved_rni = rni;
1125 }
1126
1127 if (iport->reserved_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
1128 u16 tag;
1129
1130 tag = isci_alloc_tag(ihost);
1131 if (tag == SCI_CONTROLLER_INVALID_IO_TAG)
1132 status = SCI_FAILURE_INSUFFICIENT_RESOURCES;
1133 else
1134 sci_port_construct_dummy_task(iport, tag);
1135 iport->reserved_tag = tag;
1136 }
1137
1138 if (status == SCI_SUCCESS) {
1139 phy_mask = sci_port_get_phys(iport);
1140
1141 /*
1142 * There are one or more phys assigned to this port. Make sure
1143 * the port's phy mask is in fact legal and supported by the
1144 * silicon.
1145 */
1146 if (sci_port_is_phy_mask_valid(iport, phy_mask) == true) {
1147 port_state_machine_change(iport,
1148 SCI_PORT_READY);
1149
1150 return SCI_SUCCESS;
1151 }
1152 status = SCI_FAILURE;
1153 }
1154
1155 if (status != SCI_SUCCESS)
1156 sci_port_destroy_dummy_resources(iport);
1157
1158 return status;
1159}
1160
1161enum sci_status sci_port_stop(struct isci_port *iport)
1162{
1163 enum sci_port_states state;
1164
1165 state = iport->sm.current_state_id;
1166 switch (state) {
1167 case SCI_PORT_STOPPED:
1168 return SCI_SUCCESS;
1169 case SCI_PORT_SUB_WAITING:
1170 case SCI_PORT_SUB_OPERATIONAL:
1171 case SCI_PORT_SUB_CONFIGURING:
1172 case SCI_PORT_RESETTING:
1173 port_state_machine_change(iport,
1174 SCI_PORT_STOPPING);
1175 return SCI_SUCCESS;
1176 default:
1177 dev_warn(sciport_to_dev(iport),
1178 "%s: in wrong state: %d\n", __func__, state);
1179 return SCI_FAILURE_INVALID_STATE;
1180 }
1181}
1182
1183static enum sci_status sci_port_hard_reset(struct isci_port *iport, u32 timeout)
1184{
1185 enum sci_status status = SCI_FAILURE_INVALID_PHY;
1186 struct isci_phy *iphy = NULL;
1187 enum sci_port_states state;
1188 u32 phy_index;
1189
1190 state = iport->sm.current_state_id;
1191 if (state != SCI_PORT_SUB_OPERATIONAL) {
1192 dev_warn(sciport_to_dev(iport),
1193 "%s: in wrong state: %d\n", __func__, state);
1194 return SCI_FAILURE_INVALID_STATE;
1195 }
1196
1197 /* Select a phy on which we can send the hard reset request. */
1198 for (phy_index = 0; phy_index < SCI_MAX_PHYS && !iphy; phy_index++) {
1199 iphy = iport->phy_table[phy_index];
1200 if (iphy && !sci_port_active_phy(iport, iphy)) {
1201 /*
1202 * We found a phy but it is not ready select
1203 * different phy
1204 */
1205 iphy = NULL;
1206 }
1207 }
1208
1209 /* If we have a phy then go ahead and start the reset procedure */
1210 if (!iphy)
1211 return status;
1212 status = sci_phy_reset(iphy);
1213
1214 if (status != SCI_SUCCESS)
1215 return status;
1216
1217 sci_mod_timer(&iport->timer, timeout);
1218 iport->not_ready_reason = SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED;
1219
1220 port_state_machine_change(iport, SCI_PORT_RESETTING);
1221 return SCI_SUCCESS;
1222}
1223
1224/**
1225 * sci_port_add_phy() -
1226 * @sci_port: This parameter specifies the port in which the phy will be added.
1227 * @sci_phy: This parameter is the phy which is to be added to the port.
1228 *
1229 * This method will add a PHY to the selected port. This method returns an
1230 * enum sci_status. SCI_SUCCESS the phy has been added to the port. Any other
1231 * status is a failure to add the phy to the port.
1232 */
1233enum sci_status sci_port_add_phy(struct isci_port *iport,
1234 struct isci_phy *iphy)
1235{
1236 enum sci_status status;
1237 enum sci_port_states state;
1238
1239 state = iport->sm.current_state_id;
1240 switch (state) {
1241 case SCI_PORT_STOPPED: {
1242 struct sci_sas_address port_sas_address;
1243
1244 /* Read the port assigned SAS Address if there is one */
1245 sci_port_get_sas_address(iport, &port_sas_address);
1246
1247 if (port_sas_address.high != 0 && port_sas_address.low != 0) {
1248 struct sci_sas_address phy_sas_address;
1249
1250 /* Make sure that the PHY SAS Address matches the SAS Address
1251 * for this port
1252 */
1253 sci_phy_get_sas_address(iphy, &phy_sas_address);
1254
1255 if (port_sas_address.high != phy_sas_address.high ||
1256 port_sas_address.low != phy_sas_address.low)
1257 return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
1258 }
1259 return sci_port_set_phy(iport, iphy);
1260 }
1261 case SCI_PORT_SUB_WAITING:
1262 case SCI_PORT_SUB_OPERATIONAL:
1263 status = sci_port_set_phy(iport, iphy);
1264
1265 if (status != SCI_SUCCESS)
1266 return status;
1267
1268 sci_port_general_link_up_handler(iport, iphy, true);
1269 iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
1270 port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING);
1271
1272 return status;
1273 case SCI_PORT_SUB_CONFIGURING:
1274 status = sci_port_set_phy(iport, iphy);
1275
1276 if (status != SCI_SUCCESS)
1277 return status;
1278 sci_port_general_link_up_handler(iport, iphy, true);
1279
1280 /* Re-enter the configuring state since this may be the last phy in
1281 * the port.
1282 */
1283 port_state_machine_change(iport,
1284 SCI_PORT_SUB_CONFIGURING);
1285 return SCI_SUCCESS;
1286 default:
1287 dev_warn(sciport_to_dev(iport),
1288 "%s: in wrong state: %d\n", __func__, state);
1289 return SCI_FAILURE_INVALID_STATE;
1290 }
1291}
1292
1293/**
1294 * sci_port_remove_phy() -
1295 * @sci_port: This parameter specifies the port in which the phy will be added.
1296 * @sci_phy: This parameter is the phy which is to be added to the port.
1297 *
1298 * This method will remove the PHY from the selected PORT. This method returns
1299 * an enum sci_status. SCI_SUCCESS the phy has been removed from the port. Any
1300 * other status is a failure to add the phy to the port.
1301 */
1302enum sci_status sci_port_remove_phy(struct isci_port *iport,
1303 struct isci_phy *iphy)
1304{
1305 enum sci_status status;
1306 enum sci_port_states state;
1307
1308 state = iport->sm.current_state_id;
1309
1310 switch (state) {
1311 case SCI_PORT_STOPPED:
1312 return sci_port_clear_phy(iport, iphy);
1313 case SCI_PORT_SUB_OPERATIONAL:
1314 status = sci_port_clear_phy(iport, iphy);
1315 if (status != SCI_SUCCESS)
1316 return status;
1317
1318 sci_port_deactivate_phy(iport, iphy, true);
1319 iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
1320 port_state_machine_change(iport,
1321 SCI_PORT_SUB_CONFIGURING);
1322 return SCI_SUCCESS;
1323 case SCI_PORT_SUB_CONFIGURING:
1324 status = sci_port_clear_phy(iport, iphy);
1325
1326 if (status != SCI_SUCCESS)
1327 return status;
1328 sci_port_deactivate_phy(iport, iphy, true);
1329
1330 /* Re-enter the configuring state since this may be the last phy in
1331 * the port
1332 */
1333 port_state_machine_change(iport,
1334 SCI_PORT_SUB_CONFIGURING);
1335 return SCI_SUCCESS;
1336 default:
1337 dev_warn(sciport_to_dev(iport),
1338 "%s: in wrong state: %d\n", __func__, state);
1339 return SCI_FAILURE_INVALID_STATE;
1340 }
1341}
1342
1343enum sci_status sci_port_link_up(struct isci_port *iport,
1344 struct isci_phy *iphy)
1345{
1346 enum sci_port_states state;
1347
1348 state = iport->sm.current_state_id;
1349 switch (state) {
1350 case SCI_PORT_SUB_WAITING:
1351 /* Since this is the first phy going link up for the port we
1352 * can just enable it and continue
1353 */
1354 sci_port_activate_phy(iport, iphy, true);
1355
1356 port_state_machine_change(iport,
1357 SCI_PORT_SUB_OPERATIONAL);
1358 return SCI_SUCCESS;
1359 case SCI_PORT_SUB_OPERATIONAL:
1360 sci_port_general_link_up_handler(iport, iphy, true);
1361 return SCI_SUCCESS;
1362 case SCI_PORT_RESETTING:
1363 /* TODO We should make sure that the phy that has gone
1364 * link up is the same one on which we sent the reset. It is
1365 * possible that the phy on which we sent the reset is not the
1366 * one that has gone link up and we want to make sure that
1367 * phy being reset comes back. Consider the case where a
1368 * reset is sent but before the hardware processes the reset it
1369 * get a link up on the port because of a hot plug event.
1370 * because of the reset request this phy will go link down
1371 * almost immediately.
1372 */
1373
1374 /* In the resetting state we don't notify the user regarding
1375 * link up and link down notifications.
1376 */
1377 sci_port_general_link_up_handler(iport, iphy, false);
1378 return SCI_SUCCESS;
1379 default:
1380 dev_warn(sciport_to_dev(iport),
1381 "%s: in wrong state: %d\n", __func__, state);
1382 return SCI_FAILURE_INVALID_STATE;
1383 }
1384}
1385
1386enum sci_status sci_port_link_down(struct isci_port *iport,
1387 struct isci_phy *iphy)
1388{
1389 enum sci_port_states state;
1390
1391 state = iport->sm.current_state_id;
1392 switch (state) {
1393 case SCI_PORT_SUB_OPERATIONAL:
1394 sci_port_deactivate_phy(iport, iphy, true);
1395
1396 /* If there are no active phys left in the port, then
1397 * transition the port to the WAITING state until such time
1398 * as a phy goes link up
1399 */
1400 if (iport->active_phy_mask == 0)
1401 port_state_machine_change(iport,
1402 SCI_PORT_SUB_WAITING);
1403 return SCI_SUCCESS;
1404 case SCI_PORT_RESETTING:
1405 /* In the resetting state we don't notify the user regarding
1406 * link up and link down notifications. */
1407 sci_port_deactivate_phy(iport, iphy, false);
1408 return SCI_SUCCESS;
1409 default:
1410 dev_warn(sciport_to_dev(iport),
1411 "%s: in wrong state: %d\n", __func__, state);
1412 return SCI_FAILURE_INVALID_STATE;
1413 }
1414}
1415
1416enum sci_status sci_port_start_io(struct isci_port *iport,
1417 struct isci_remote_device *idev,
1418 struct isci_request *ireq)
1419{
1420 enum sci_port_states state;
1421
1422 state = iport->sm.current_state_id;
1423 switch (state) {
1424 case SCI_PORT_SUB_WAITING:
1425 return SCI_FAILURE_INVALID_STATE;
1426 case SCI_PORT_SUB_OPERATIONAL:
1427 iport->started_request_count++;
1428 return SCI_SUCCESS;
1429 default:
1430 dev_warn(sciport_to_dev(iport),
1431 "%s: in wrong state: %d\n", __func__, state);
1432 return SCI_FAILURE_INVALID_STATE;
1433 }
1434}
1435
1436enum sci_status sci_port_complete_io(struct isci_port *iport,
1437 struct isci_remote_device *idev,
1438 struct isci_request *ireq)
1439{
1440 enum sci_port_states state;
1441
1442 state = iport->sm.current_state_id;
1443 switch (state) {
1444 case SCI_PORT_STOPPED:
1445 dev_warn(sciport_to_dev(iport),
1446 "%s: in wrong state: %d\n", __func__, state);
1447 return SCI_FAILURE_INVALID_STATE;
1448 case SCI_PORT_STOPPING:
1449 sci_port_decrement_request_count(iport);
1450
1451 if (iport->started_request_count == 0)
1452 port_state_machine_change(iport,
1453 SCI_PORT_STOPPED);
1454 break;
1455 case SCI_PORT_READY:
1456 case SCI_PORT_RESETTING:
1457 case SCI_PORT_FAILED:
1458 case SCI_PORT_SUB_WAITING:
1459 case SCI_PORT_SUB_OPERATIONAL:
1460 sci_port_decrement_request_count(iport);
1461 break;
1462 case SCI_PORT_SUB_CONFIGURING:
1463 sci_port_decrement_request_count(iport);
1464 if (iport->started_request_count == 0) {
1465 port_state_machine_change(iport,
1466 SCI_PORT_SUB_OPERATIONAL);
1467 }
1468 break;
1469 }
1470 return SCI_SUCCESS;
1471}
1472
1473static void sci_port_enable_port_task_scheduler(struct isci_port *iport)
1474{
1475 u32 pts_control_value;
1476
1477 /* enable the port task scheduler in a suspended state */
1478 pts_control_value = readl(&iport->port_task_scheduler_registers->control);
1479 pts_control_value |= SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND);
1480 writel(pts_control_value, &iport->port_task_scheduler_registers->control);
1481}
1482
1483static void sci_port_disable_port_task_scheduler(struct isci_port *iport)
1484{
1485 u32 pts_control_value;
1486
1487 pts_control_value = readl(&iport->port_task_scheduler_registers->control);
1488 pts_control_value &=
1489 ~(SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND));
1490 writel(pts_control_value, &iport->port_task_scheduler_registers->control);
1491}
1492
1493static void sci_port_post_dummy_remote_node(struct isci_port *iport)
1494{
1495 struct isci_host *ihost = iport->owning_controller;
1496 u8 phys_index = iport->physical_port_index;
1497 union scu_remote_node_context *rnc;
1498 u16 rni = iport->reserved_rni;
1499 u32 command;
1500
1501 rnc = &ihost->remote_node_context_table[rni];
1502 rnc->ssp.is_valid = true;
1503
1504 command = SCU_CONTEXT_COMMAND_POST_RNC_32 |
1505 phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
1506
1507 sci_controller_post_request(ihost, command);
1508
1509 /* ensure hardware has seen the post rnc command and give it
1510 * ample time to act before sending the suspend
1511 */
1512 readl(&ihost->smu_registers->interrupt_status); /* flush */
1513 udelay(10);
1514
1515 command = SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX |
1516 phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
1517
1518 sci_controller_post_request(ihost, command);
1519}
1520
1521static void sci_port_stopped_state_enter(struct sci_base_state_machine *sm)
1522{
1523 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1524
1525 if (iport->sm.previous_state_id == SCI_PORT_STOPPING) {
1526 /*
1527 * If we enter this state becasuse of a request to stop
1528 * the port then we want to disable the hardwares port
1529 * task scheduler. */
1530 sci_port_disable_port_task_scheduler(iport);
1531 }
1532}
1533
1534static void sci_port_stopped_state_exit(struct sci_base_state_machine *sm)
1535{
1536 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1537
1538 /* Enable and suspend the port task scheduler */
1539 sci_port_enable_port_task_scheduler(iport);
1540}
1541
1542static void sci_port_ready_state_enter(struct sci_base_state_machine *sm)
1543{
1544 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1545 struct isci_host *ihost = iport->owning_controller;
1546 u32 prev_state;
1547
1548 prev_state = iport->sm.previous_state_id;
1549 if (prev_state == SCI_PORT_RESETTING)
1550 isci_port_hard_reset_complete(iport, SCI_SUCCESS);
1551 else
1552 isci_port_not_ready(ihost, iport);
1553
1554 /* Post and suspend the dummy remote node context for this port. */
1555 sci_port_post_dummy_remote_node(iport);
1556
1557 /* Start the ready substate machine */
1558 port_state_machine_change(iport,
1559 SCI_PORT_SUB_WAITING);
1560}
1561
1562static void sci_port_resetting_state_exit(struct sci_base_state_machine *sm)
1563{
1564 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1565
1566 sci_del_timer(&iport->timer);
1567}
1568
1569static void sci_port_stopping_state_exit(struct sci_base_state_machine *sm)
1570{
1571 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1572
1573 sci_del_timer(&iport->timer);
1574
1575 sci_port_destroy_dummy_resources(iport);
1576}
1577
1578static void sci_port_failed_state_enter(struct sci_base_state_machine *sm)
1579{
1580 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1581
1582 isci_port_hard_reset_complete(iport, SCI_FAILURE_TIMEOUT);
1583}
1584
1585/* --------------------------------------------------------------------------- */
1586
1587static const struct sci_base_state sci_port_state_table[] = {
1588 [SCI_PORT_STOPPED] = {
1589 .enter_state = sci_port_stopped_state_enter,
1590 .exit_state = sci_port_stopped_state_exit
1591 },
1592 [SCI_PORT_STOPPING] = {
1593 .exit_state = sci_port_stopping_state_exit
1594 },
1595 [SCI_PORT_READY] = {
1596 .enter_state = sci_port_ready_state_enter,
1597 },
1598 [SCI_PORT_SUB_WAITING] = {
1599 .enter_state = sci_port_ready_substate_waiting_enter,
1600 },
1601 [SCI_PORT_SUB_OPERATIONAL] = {
1602 .enter_state = sci_port_ready_substate_operational_enter,
1603 .exit_state = sci_port_ready_substate_operational_exit
1604 },
1605 [SCI_PORT_SUB_CONFIGURING] = {
1606 .enter_state = sci_port_ready_substate_configuring_enter,
1607 .exit_state = sci_port_ready_substate_configuring_exit
1608 },
1609 [SCI_PORT_RESETTING] = {
1610 .exit_state = sci_port_resetting_state_exit
1611 },
1612 [SCI_PORT_FAILED] = {
1613 .enter_state = sci_port_failed_state_enter,
1614 }
1615};
1616
1617void sci_port_construct(struct isci_port *iport, u8 index,
1618 struct isci_host *ihost)
1619{
1620 sci_init_sm(&iport->sm, sci_port_state_table, SCI_PORT_STOPPED);
1621
1622 iport->logical_port_index = SCIC_SDS_DUMMY_PORT;
1623 iport->physical_port_index = index;
1624 iport->active_phy_mask = 0;
1625 iport->ready_exit = false;
1626
1627 iport->owning_controller = ihost;
1628
1629 iport->started_request_count = 0;
1630 iport->assigned_device_count = 0;
1631
1632 iport->reserved_rni = SCU_DUMMY_INDEX;
1633 iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
1634
1635 sci_init_timer(&iport->timer, port_timeout);
1636
1637 iport->port_task_scheduler_registers = NULL;
1638
1639 for (index = 0; index < SCI_MAX_PHYS; index++)
1640 iport->phy_table[index] = NULL;
1641}
1642
1643void isci_port_init(struct isci_port *iport, struct isci_host *ihost, int index)
1644{
1645 INIT_LIST_HEAD(&iport->remote_dev_list);
1646 INIT_LIST_HEAD(&iport->domain_dev_list);
1647 spin_lock_init(&iport->state_lock);
1648 init_completion(&iport->start_complete);
1649 iport->isci_host = ihost;
1650 isci_port_change_state(iport, isci_freed);
1651 atomic_set(&iport->event, 0);
1652}
1653
1654/**
1655 * isci_port_get_state() - This function gets the status of the port object.
1656 * @isci_port: This parameter points to the isci_port object
1657 *
1658 * status of the object as a isci_status enum.
1659 */
1660enum isci_status isci_port_get_state(
1661 struct isci_port *isci_port)
1662{
1663 return isci_port->status;
1664}
1665
1666void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy)
1667{
1668 struct isci_host *ihost = iport->owning_controller;
1669
1670 /* notify the user. */
1671 isci_port_bc_change_received(ihost, iport, iphy);
1672}
1673
1674int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
1675 struct isci_phy *iphy)
1676{
1677 unsigned long flags;
1678 enum sci_status status;
1679 int idx, ret = TMF_RESP_FUNC_COMPLETE;
1680
1681 dev_dbg(&ihost->pdev->dev, "%s: iport = %p\n",
1682 __func__, iport);
1683
1684 init_completion(&iport->hard_reset_complete);
1685
1686 spin_lock_irqsave(&ihost->scic_lock, flags);
1687
1688 #define ISCI_PORT_RESET_TIMEOUT SCIC_SDS_SIGNATURE_FIS_TIMEOUT
1689 status = sci_port_hard_reset(iport, ISCI_PORT_RESET_TIMEOUT);
1690
1691 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1692
1693 if (status == SCI_SUCCESS) {
1694 wait_for_completion(&iport->hard_reset_complete);
1695
1696 dev_dbg(&ihost->pdev->dev,
1697 "%s: iport = %p; hard reset completion\n",
1698 __func__, iport);
1699
1700 if (iport->hard_reset_status != SCI_SUCCESS)
1701 ret = TMF_RESP_FUNC_FAILED;
1702 } else {
1703 ret = TMF_RESP_FUNC_FAILED;
1704
1705 dev_err(&ihost->pdev->dev,
1706 "%s: iport = %p; sci_port_hard_reset call"
1707 " failed 0x%x\n",
1708 __func__, iport, status);
1709
1710 }
1711
1712 /* If the hard reset for the port has failed, consider this
1713 * the same as link failures on all phys in the port.
1714 */
1715 if (ret != TMF_RESP_FUNC_COMPLETE) {
1716
1717 dev_err(&ihost->pdev->dev,
1718 "%s: iport = %p; hard reset failed "
1719 "(0x%x) - driving explicit link fail for all phys\n",
1720 __func__, iport, iport->hard_reset_status);
1721
1722 /* Down all phys in the port. */
1723 spin_lock_irqsave(&ihost->scic_lock, flags);
1724 for (idx = 0; idx < SCI_MAX_PHYS; ++idx) {
1725 struct isci_phy *iphy = iport->phy_table[idx];
1726
1727 if (!iphy)
1728 continue;
1729 sci_phy_stop(iphy);
1730 sci_phy_start(iphy);
1731 }
1732 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1733 }
1734 return ret;
1735}
1736
1737/**
1738 * isci_port_deformed() - This function is called by libsas when a port becomes
1739 * inactive.
1740 * @phy: This parameter specifies the libsas phy with the inactive port.
1741 *
1742 */
1743void isci_port_deformed(struct asd_sas_phy *phy)
1744{
1745 pr_debug("%s: sas_phy = %p\n", __func__, phy);
1746}
1747
1748/**
1749 * isci_port_formed() - This function is called by libsas when a port becomes
1750 * active.
1751 * @phy: This parameter specifies the libsas phy with the active port.
1752 *
1753 */
1754void isci_port_formed(struct asd_sas_phy *phy)
1755{
1756 pr_debug("%s: sas_phy = %p, sas_port = %p\n", __func__, phy, phy->port);
1757}
diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h
new file mode 100644
index 000000000000..b50ecd4e8f9c
--- /dev/null
+++ b/drivers/scsi/isci/port.h
@@ -0,0 +1,306 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#ifndef _ISCI_PORT_H_
57#define _ISCI_PORT_H_
58
59#include <scsi/libsas.h>
60#include "isci.h"
61#include "sas.h"
62#include "phy.h"
63
64#define SCIC_SDS_DUMMY_PORT 0xFF
65
66struct isci_phy;
67struct isci_host;
68
69enum isci_status {
70 isci_freed = 0x00,
71 isci_starting = 0x01,
72 isci_ready = 0x02,
73 isci_ready_for_io = 0x03,
74 isci_stopping = 0x04,
75 isci_stopped = 0x05,
76};
77
78/**
79 * struct isci_port - isci direct attached sas port object
80 * @event: counts bcns and port stop events (for bcn filtering)
81 * @ready_exit: several states constitute 'ready'. When exiting ready we
82 * need to take extra port-teardown actions that are
83 * skipped when exiting to another 'ready' state.
84 * @logical_port_index: software port index
85 * @physical_port_index: hardware port index
86 * @active_phy_mask: identifies phy members
87 * @reserved_tag:
88 * @reserved_rni: reserver for port task scheduler workaround
89 * @started_request_count: reference count for outstanding commands
90 * @not_ready_reason: set during state transitions and notified
91 * @timer: timeout start/stop operations
92 */
93struct isci_port {
94 enum isci_status status;
95 #define IPORT_BCN_BLOCKED 0
96 #define IPORT_BCN_PENDING 1
97 unsigned long flags;
98 atomic_t event;
99 struct isci_host *isci_host;
100 struct asd_sas_port sas_port;
101 struct list_head remote_dev_list;
102 spinlock_t state_lock;
103 struct list_head domain_dev_list;
104 struct completion start_complete;
105 struct completion hard_reset_complete;
106 enum sci_status hard_reset_status;
107 struct sci_base_state_machine sm;
108 bool ready_exit;
109 u8 logical_port_index;
110 u8 physical_port_index;
111 u8 active_phy_mask;
112 u16 reserved_rni;
113 u16 reserved_tag;
114 u32 started_request_count;
115 u32 assigned_device_count;
116 u32 not_ready_reason;
117 struct isci_phy *phy_table[SCI_MAX_PHYS];
118 struct isci_host *owning_controller;
119 struct sci_timer timer;
120 struct scu_port_task_scheduler_registers __iomem *port_task_scheduler_registers;
121 /* XXX rework: only one register, no need to replicate per-port */
122 u32 __iomem *port_pe_configuration_register;
123 struct scu_viit_entry __iomem *viit_registers;
124};
125
126enum sci_port_not_ready_reason_code {
127 SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS,
128 SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED,
129 SCIC_PORT_NOT_READY_INVALID_PORT_CONFIGURATION,
130 SCIC_PORT_NOT_READY_RECONFIGURING,
131
132 SCIC_PORT_NOT_READY_REASON_CODE_MAX
133};
134
135struct sci_port_end_point_properties {
136 struct sci_sas_address sas_address;
137 struct sci_phy_proto protocols;
138};
139
140struct sci_port_properties {
141 u32 index;
142 struct sci_port_end_point_properties local;
143 struct sci_port_end_point_properties remote;
144 u32 phy_mask;
145};
146
147/**
148 * enum sci_port_states - This enumeration depicts all the states for the
149 * common port state machine.
150 *
151 *
152 */
153enum sci_port_states {
154 /**
155 * This state indicates that the port has successfully been stopped.
156 * In this state no new IO operations are permitted.
157 * This state is entered from the STOPPING state.
158 */
159 SCI_PORT_STOPPED,
160
161 /**
162 * This state indicates that the port is in the process of stopping.
163 * In this state no new IO operations are permitted, but existing IO
164 * operations are allowed to complete.
165 * This state is entered from the READY state.
166 */
167 SCI_PORT_STOPPING,
168
169 /**
170 * This state indicates the port is now ready. Thus, the user is
171 * able to perform IO operations on this port.
172 * This state is entered from the STARTING state.
173 */
174 SCI_PORT_READY,
175
176 /**
177 * The substate where the port is started and ready but has no
178 * active phys.
179 */
180 SCI_PORT_SUB_WAITING,
181
182 /**
183 * The substate where the port is started and ready and there is
184 * at least one phy operational.
185 */
186 SCI_PORT_SUB_OPERATIONAL,
187
188 /**
189 * The substate where the port is started and there was an
190 * add/remove phy event. This state is only used in Automatic
191 * Port Configuration Mode (APC)
192 */
193 SCI_PORT_SUB_CONFIGURING,
194
195 /**
196 * This state indicates the port is in the process of performing a hard
197 * reset. Thus, the user is unable to perform IO operations on this
198 * port.
199 * This state is entered from the READY state.
200 */
201 SCI_PORT_RESETTING,
202
203 /**
204 * This state indicates the port has failed a reset request. This state
205 * is entered when a port reset request times out.
206 * This state is entered from the RESETTING state.
207 */
208 SCI_PORT_FAILED,
209
210
211};
212
213static inline void sci_port_decrement_request_count(struct isci_port *iport)
214{
215 if (WARN_ONCE(iport->started_request_count == 0,
216 "%s: tried to decrement started_request_count past 0!?",
217 __func__))
218 /* pass */;
219 else
220 iport->started_request_count--;
221}
222
223#define sci_port_active_phy(port, phy) \
224 (((port)->active_phy_mask & (1 << (phy)->phy_index)) != 0)
225
226void sci_port_construct(
227 struct isci_port *iport,
228 u8 port_index,
229 struct isci_host *ihost);
230
231enum sci_status sci_port_start(struct isci_port *iport);
232enum sci_status sci_port_stop(struct isci_port *iport);
233
234enum sci_status sci_port_add_phy(
235 struct isci_port *iport,
236 struct isci_phy *iphy);
237
238enum sci_status sci_port_remove_phy(
239 struct isci_port *iport,
240 struct isci_phy *iphy);
241
242void sci_port_setup_transports(
243 struct isci_port *iport,
244 u32 device_id);
245
246void isci_port_bcn_enable(struct isci_host *, struct isci_port *);
247
248void sci_port_deactivate_phy(
249 struct isci_port *iport,
250 struct isci_phy *iphy,
251 bool do_notify_user);
252
253bool sci_port_link_detected(
254 struct isci_port *iport,
255 struct isci_phy *iphy);
256
257enum sci_status sci_port_link_up(struct isci_port *iport,
258 struct isci_phy *iphy);
259enum sci_status sci_port_link_down(struct isci_port *iport,
260 struct isci_phy *iphy);
261
262struct isci_request;
263struct isci_remote_device;
264enum sci_status sci_port_start_io(
265 struct isci_port *iport,
266 struct isci_remote_device *idev,
267 struct isci_request *ireq);
268
269enum sci_status sci_port_complete_io(
270 struct isci_port *iport,
271 struct isci_remote_device *idev,
272 struct isci_request *ireq);
273
274enum sas_linkrate sci_port_get_max_allowed_speed(
275 struct isci_port *iport);
276
277void sci_port_broadcast_change_received(
278 struct isci_port *iport,
279 struct isci_phy *iphy);
280
281bool sci_port_is_valid_phy_assignment(
282 struct isci_port *iport,
283 u32 phy_index);
284
285void sci_port_get_sas_address(
286 struct isci_port *iport,
287 struct sci_sas_address *sas_address);
288
289void sci_port_get_attached_sas_address(
290 struct isci_port *iport,
291 struct sci_sas_address *sas_address);
292
293enum isci_status isci_port_get_state(
294 struct isci_port *isci_port);
295
296void isci_port_formed(struct asd_sas_phy *);
297void isci_port_deformed(struct asd_sas_phy *);
298
299void isci_port_init(
300 struct isci_port *port,
301 struct isci_host *host,
302 int index);
303
304int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
305 struct isci_phy *iphy);
306#endif /* !defined(_ISCI_PORT_H_) */
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
new file mode 100644
index 000000000000..486b113c634a
--- /dev/null
+++ b/drivers/scsi/isci/port_config.c
@@ -0,0 +1,754 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#include "host.h"
57
58#define SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT (10)
59#define SCIC_SDS_APC_RECONFIGURATION_TIMEOUT (10)
60#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION (100)
61
62enum SCIC_SDS_APC_ACTIVITY {
63 SCIC_SDS_APC_SKIP_PHY,
64 SCIC_SDS_APC_ADD_PHY,
65 SCIC_SDS_APC_START_TIMER,
66
67 SCIC_SDS_APC_ACTIVITY_MAX
68};
69
70/*
71 * ******************************************************************************
72 * General port configuration agent routines
73 * ****************************************************************************** */
74
75/**
76 *
77 * @address_one: A SAS Address to be compared.
78 * @address_two: A SAS Address to be compared.
79 *
80 * Compare the two SAS Address and if SAS Address One is greater than SAS
81 * Address Two then return > 0 else if SAS Address One is less than SAS Address
82 * Two return < 0 Otherwise they are the same return 0 A signed value of x > 0
83 * > y where x is returned for Address One > Address Two y is returned for
84 * Address One < Address Two 0 is returned ofr Address One = Address Two
85 */
86static s32 sci_sas_address_compare(
87 struct sci_sas_address address_one,
88 struct sci_sas_address address_two)
89{
90 if (address_one.high > address_two.high) {
91 return 1;
92 } else if (address_one.high < address_two.high) {
93 return -1;
94 } else if (address_one.low > address_two.low) {
95 return 1;
96 } else if (address_one.low < address_two.low) {
97 return -1;
98 }
99
100 /* The two SAS Address must be identical */
101 return 0;
102}
103
104/**
105 *
106 * @controller: The controller object used for the port search.
107 * @phy: The phy object to match.
108 *
109 * This routine will find a matching port for the phy. This means that the
110 * port and phy both have the same broadcast sas address and same received sas
111 * address. The port address or the NULL if there is no matching
112 * port. port address if the port can be found to match the phy.
113 * NULL if there is no matching port for the phy.
114 */
115static struct isci_port *sci_port_configuration_agent_find_port(
116 struct isci_host *ihost,
117 struct isci_phy *iphy)
118{
119 u8 i;
120 struct sci_sas_address port_sas_address;
121 struct sci_sas_address port_attached_device_address;
122 struct sci_sas_address phy_sas_address;
123 struct sci_sas_address phy_attached_device_address;
124
125 /*
126 * Since this phy can be a member of a wide port check to see if one or
127 * more phys match the sent and received SAS address as this phy in which
128 * case it should participate in the same port.
129 */
130 sci_phy_get_sas_address(iphy, &phy_sas_address);
131 sci_phy_get_attached_sas_address(iphy, &phy_attached_device_address);
132
133 for (i = 0; i < ihost->logical_port_entries; i++) {
134 struct isci_port *iport = &ihost->ports[i];
135
136 sci_port_get_sas_address(iport, &port_sas_address);
137 sci_port_get_attached_sas_address(iport, &port_attached_device_address);
138
139 if (sci_sas_address_compare(port_sas_address, phy_sas_address) == 0 &&
140 sci_sas_address_compare(port_attached_device_address, phy_attached_device_address) == 0)
141 return iport;
142 }
143
144 return NULL;
145}
146
147/**
148 *
149 * @controller: This is the controller object that contains the port agent
150 * @port_agent: This is the port configruation agent for the controller.
151 *
152 * This routine will validate the port configuration is correct for the SCU
153 * hardware. The SCU hardware allows for port configurations as follows. LP0
154 * -> (PE0), (PE0, PE1), (PE0, PE1, PE2, PE3) LP1 -> (PE1) LP2 -> (PE2), (PE2,
155 * PE3) LP3 -> (PE3) enum sci_status SCI_SUCCESS the port configuration is valid for
156 * this port configuration agent. SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION
157 * the port configuration is not valid for this port configuration agent.
158 */
159static enum sci_status sci_port_configuration_agent_validate_ports(
160 struct isci_host *ihost,
161 struct sci_port_configuration_agent *port_agent)
162{
163 struct sci_sas_address first_address;
164 struct sci_sas_address second_address;
165
166 /*
167 * Sanity check the max ranges for all the phys the max index
168 * is always equal to the port range index */
169 if (port_agent->phy_valid_port_range[0].max_index != 0 ||
170 port_agent->phy_valid_port_range[1].max_index != 1 ||
171 port_agent->phy_valid_port_range[2].max_index != 2 ||
172 port_agent->phy_valid_port_range[3].max_index != 3)
173 return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
174
175 /*
176 * This is a request to configure a single x4 port or at least attempt
177 * to make all the phys into a single port */
178 if (port_agent->phy_valid_port_range[0].min_index == 0 &&
179 port_agent->phy_valid_port_range[1].min_index == 0 &&
180 port_agent->phy_valid_port_range[2].min_index == 0 &&
181 port_agent->phy_valid_port_range[3].min_index == 0)
182 return SCI_SUCCESS;
183
184 /*
185 * This is a degenerate case where phy 1 and phy 2 are assigned
186 * to the same port this is explicitly disallowed by the hardware
187 * unless they are part of the same x4 port and this condition was
188 * already checked above. */
189 if (port_agent->phy_valid_port_range[2].min_index == 1) {
190 return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
191 }
192
193 /*
194 * PE0 and PE3 can never have the same SAS Address unless they
195 * are part of the same x4 wide port and we have already checked
196 * for this condition. */
197 sci_phy_get_sas_address(&ihost->phys[0], &first_address);
198 sci_phy_get_sas_address(&ihost->phys[3], &second_address);
199
200 if (sci_sas_address_compare(first_address, second_address) == 0) {
201 return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
202 }
203
204 /*
205 * PE0 and PE1 are configured into a 2x1 ports make sure that the
206 * SAS Address for PE0 and PE2 are different since they can not be
207 * part of the same port. */
208 if (port_agent->phy_valid_port_range[0].min_index == 0 &&
209 port_agent->phy_valid_port_range[1].min_index == 1) {
210 sci_phy_get_sas_address(&ihost->phys[0], &first_address);
211 sci_phy_get_sas_address(&ihost->phys[2], &second_address);
212
213 if (sci_sas_address_compare(first_address, second_address) == 0) {
214 return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
215 }
216 }
217
218 /*
219 * PE2 and PE3 are configured into a 2x1 ports make sure that the
220 * SAS Address for PE1 and PE3 are different since they can not be
221 * part of the same port. */
222 if (port_agent->phy_valid_port_range[2].min_index == 2 &&
223 port_agent->phy_valid_port_range[3].min_index == 3) {
224 sci_phy_get_sas_address(&ihost->phys[1], &first_address);
225 sci_phy_get_sas_address(&ihost->phys[3], &second_address);
226
227 if (sci_sas_address_compare(first_address, second_address) == 0) {
228 return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
229 }
230 }
231
232 return SCI_SUCCESS;
233}
234
235/*
236 * ******************************************************************************
237 * Manual port configuration agent routines
238 * ****************************************************************************** */
239
240/* verify all of the phys in the same port are using the same SAS address */
241static enum sci_status
242sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost,
243 struct sci_port_configuration_agent *port_agent)
244{
245 u32 phy_mask;
246 u32 assigned_phy_mask;
247 struct sci_sas_address sas_address;
248 struct sci_sas_address phy_assigned_address;
249 u8 port_index;
250 u8 phy_index;
251
252 assigned_phy_mask = 0;
253 sas_address.high = 0;
254 sas_address.low = 0;
255
256 for (port_index = 0; port_index < SCI_MAX_PORTS; port_index++) {
257 phy_mask = ihost->oem_parameters.ports[port_index].phy_mask;
258
259 if (!phy_mask)
260 continue;
261 /*
262 * Make sure that one or more of the phys were not already assinged to
263 * a different port. */
264 if ((phy_mask & ~assigned_phy_mask) == 0) {
265 return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
266 }
267
268 /* Find the starting phy index for this round through the loop */
269 for (phy_index = 0; phy_index < SCI_MAX_PHYS; phy_index++) {
270 if ((phy_mask & (1 << phy_index)) == 0)
271 continue;
272 sci_phy_get_sas_address(&ihost->phys[phy_index],
273 &sas_address);
274
275 /*
276 * The phy_index can be used as the starting point for the
277 * port range since the hardware starts all logical ports
278 * the same as the PE index. */
279 port_agent->phy_valid_port_range[phy_index].min_index = port_index;
280 port_agent->phy_valid_port_range[phy_index].max_index = phy_index;
281
282 if (phy_index != port_index) {
283 return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
284 }
285
286 break;
287 }
288
289 /*
290 * See how many additional phys are being added to this logical port.
291 * Note: We have not moved the current phy_index so we will actually
292 * compare the startting phy with itself.
293 * This is expected and required to add the phy to the port. */
294 while (phy_index < SCI_MAX_PHYS) {
295 if ((phy_mask & (1 << phy_index)) == 0)
296 continue;
297 sci_phy_get_sas_address(&ihost->phys[phy_index],
298 &phy_assigned_address);
299
300 if (sci_sas_address_compare(sas_address, phy_assigned_address) != 0) {
301 /*
302 * The phy mask specified that this phy is part of the same port
303 * as the starting phy and it is not so fail this configuration */
304 return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
305 }
306
307 port_agent->phy_valid_port_range[phy_index].min_index = port_index;
308 port_agent->phy_valid_port_range[phy_index].max_index = phy_index;
309
310 sci_port_add_phy(&ihost->ports[port_index],
311 &ihost->phys[phy_index]);
312
313 assigned_phy_mask |= (1 << phy_index);
314 }
315
316 phy_index++;
317 }
318
319 return sci_port_configuration_agent_validate_ports(ihost, port_agent);
320}
321
322static void mpc_agent_timeout(unsigned long data)
323{
324 u8 index;
325 struct sci_timer *tmr = (struct sci_timer *)data;
326 struct sci_port_configuration_agent *port_agent;
327 struct isci_host *ihost;
328 unsigned long flags;
329 u16 configure_phy_mask;
330
331 port_agent = container_of(tmr, typeof(*port_agent), timer);
332 ihost = container_of(port_agent, typeof(*ihost), port_agent);
333
334 spin_lock_irqsave(&ihost->scic_lock, flags);
335
336 if (tmr->cancel)
337 goto done;
338
339 port_agent->timer_pending = false;
340
341 /* Find the mask of phys that are reported read but as yet unconfigured into a port */
342 configure_phy_mask = ~port_agent->phy_configured_mask & port_agent->phy_ready_mask;
343
344 for (index = 0; index < SCI_MAX_PHYS; index++) {
345 struct isci_phy *iphy = &ihost->phys[index];
346
347 if (configure_phy_mask & (1 << index)) {
348 port_agent->link_up_handler(ihost, port_agent,
349 phy_get_non_dummy_port(iphy),
350 iphy);
351 }
352 }
353
354done:
355 spin_unlock_irqrestore(&ihost->scic_lock, flags);
356}
357
358static void sci_mpc_agent_link_up(struct isci_host *ihost,
359 struct sci_port_configuration_agent *port_agent,
360 struct isci_port *iport,
361 struct isci_phy *iphy)
362{
363 /* If the port is NULL then the phy was not assigned to a port.
364 * This is because the phy was not given the same SAS Address as
365 * the other PHYs in the port.
366 */
367 if (!iport)
368 return;
369
370 port_agent->phy_ready_mask |= (1 << iphy->phy_index);
371 sci_port_link_up(iport, iphy);
372 if ((iport->active_phy_mask & (1 << iphy->phy_index)))
373 port_agent->phy_configured_mask |= (1 << iphy->phy_index);
374}
375
376/**
377 *
378 * @controller: This is the controller object that receives the link down
379 * notification.
380 * @port: This is the port object associated with the phy. If the is no
381 * associated port this is an NULL. The port is an invalid
382 * handle only if the phy was never port of this port. This happens when
383 * the phy is not broadcasting the same SAS address as the other phys in the
384 * assigned port.
385 * @phy: This is the phy object which has gone link down.
386 *
387 * This function handles the manual port configuration link down notifications.
388 * Since all ports and phys are associated at initialization time we just turn
389 * around and notifiy the port object of the link down event. If this PHY is
390 * not associated with a port there is no action taken. Is it possible to get a
391 * link down notification from a phy that has no assocoated port?
392 */
393static void sci_mpc_agent_link_down(
394 struct isci_host *ihost,
395 struct sci_port_configuration_agent *port_agent,
396 struct isci_port *iport,
397 struct isci_phy *iphy)
398{
399 if (iport != NULL) {
400 /*
401 * If we can form a new port from the remainder of the phys
402 * then we want to start the timer to allow the SCI User to
403 * cleanup old devices and rediscover the port before
404 * rebuilding the port with the phys that remain in the ready
405 * state.
406 */
407 port_agent->phy_ready_mask &= ~(1 << iphy->phy_index);
408 port_agent->phy_configured_mask &= ~(1 << iphy->phy_index);
409
410 /*
411 * Check to see if there are more phys waiting to be
412 * configured into a port. If there are allow the SCI User
413 * to tear down this port, if necessary, and then reconstruct
414 * the port after the timeout.
415 */
416 if ((port_agent->phy_configured_mask == 0x0000) &&
417 (port_agent->phy_ready_mask != 0x0000) &&
418 !port_agent->timer_pending) {
419 port_agent->timer_pending = true;
420
421 sci_mod_timer(&port_agent->timer,
422 SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT);
423 }
424
425 sci_port_link_down(iport, iphy);
426 }
427}
428
429/* verify phys are assigned a valid SAS address for automatic port
430 * configuration mode.
431 */
432static enum sci_status
433sci_apc_agent_validate_phy_configuration(struct isci_host *ihost,
434 struct sci_port_configuration_agent *port_agent)
435{
436 u8 phy_index;
437 u8 port_index;
438 struct sci_sas_address sas_address;
439 struct sci_sas_address phy_assigned_address;
440
441 phy_index = 0;
442
443 while (phy_index < SCI_MAX_PHYS) {
444 port_index = phy_index;
445
446 /* Get the assigned SAS Address for the first PHY on the controller. */
447 sci_phy_get_sas_address(&ihost->phys[phy_index],
448 &sas_address);
449
450 while (++phy_index < SCI_MAX_PHYS) {
451 sci_phy_get_sas_address(&ihost->phys[phy_index],
452 &phy_assigned_address);
453
454 /* Verify each of the SAS address are all the same for every PHY */
455 if (sci_sas_address_compare(sas_address, phy_assigned_address) == 0) {
456 port_agent->phy_valid_port_range[phy_index].min_index = port_index;
457 port_agent->phy_valid_port_range[phy_index].max_index = phy_index;
458 } else {
459 port_agent->phy_valid_port_range[phy_index].min_index = phy_index;
460 port_agent->phy_valid_port_range[phy_index].max_index = phy_index;
461 break;
462 }
463 }
464 }
465
466 return sci_port_configuration_agent_validate_ports(ihost, port_agent);
467}
468
469static void sci_apc_agent_configure_ports(struct isci_host *ihost,
470 struct sci_port_configuration_agent *port_agent,
471 struct isci_phy *iphy,
472 bool start_timer)
473{
474 u8 port_index;
475 enum sci_status status;
476 struct isci_port *iport;
477 enum SCIC_SDS_APC_ACTIVITY apc_activity = SCIC_SDS_APC_SKIP_PHY;
478
479 iport = sci_port_configuration_agent_find_port(ihost, iphy);
480
481 if (iport) {
482 if (sci_port_is_valid_phy_assignment(iport, iphy->phy_index))
483 apc_activity = SCIC_SDS_APC_ADD_PHY;
484 else
485 apc_activity = SCIC_SDS_APC_SKIP_PHY;
486 } else {
487 /*
488 * There is no matching Port for this PHY so lets search through the
489 * Ports and see if we can add the PHY to its own port or maybe start
490 * the timer and wait to see if a wider port can be made.
491 *
492 * Note the break when we reach the condition of the port id == phy id */
493 for (port_index = port_agent->phy_valid_port_range[iphy->phy_index].min_index;
494 port_index <= port_agent->phy_valid_port_range[iphy->phy_index].max_index;
495 port_index++) {
496
497 iport = &ihost->ports[port_index];
498
499 /* First we must make sure that this PHY can be added to this Port. */
500 if (sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) {
501 /*
502 * Port contains a PHY with a greater PHY ID than the current
503 * PHY that has gone link up. This phy can not be part of any
504 * port so skip it and move on. */
505 if (iport->active_phy_mask > (1 << iphy->phy_index)) {
506 apc_activity = SCIC_SDS_APC_SKIP_PHY;
507 break;
508 }
509
510 /*
511 * We have reached the end of our Port list and have not found
512 * any reason why we should not either add the PHY to the port
513 * or wait for more phys to become active. */
514 if (iport->physical_port_index == iphy->phy_index) {
515 /*
516 * The Port either has no active PHYs.
517 * Consider that if the port had any active PHYs we would have
518 * or active PHYs with
519 * a lower PHY Id than this PHY. */
520 if (apc_activity != SCIC_SDS_APC_START_TIMER) {
521 apc_activity = SCIC_SDS_APC_ADD_PHY;
522 }
523
524 break;
525 }
526
527 /*
528 * The current Port has no active PHYs and this PHY could be part
529 * of this Port. Since we dont know as yet setup to start the
530 * timer and see if there is a better configuration. */
531 if (iport->active_phy_mask == 0) {
532 apc_activity = SCIC_SDS_APC_START_TIMER;
533 }
534 } else if (iport->active_phy_mask != 0) {
535 /*
536 * The Port has an active phy and the current Phy can not
537 * participate in this port so skip the PHY and see if
538 * there is a better configuration. */
539 apc_activity = SCIC_SDS_APC_SKIP_PHY;
540 }
541 }
542 }
543
544 /*
545 * Check to see if the start timer operations should instead map to an
546 * add phy operation. This is caused because we have been waiting to
547 * add a phy to a port but could not becuase the automatic port
548 * configuration engine had a choice of possible ports for the phy.
549 * Since we have gone through a timeout we are going to restrict the
550 * choice to the smallest possible port. */
551 if (
552 (start_timer == false)
553 && (apc_activity == SCIC_SDS_APC_START_TIMER)
554 ) {
555 apc_activity = SCIC_SDS_APC_ADD_PHY;
556 }
557
558 switch (apc_activity) {
559 case SCIC_SDS_APC_ADD_PHY:
560 status = sci_port_add_phy(iport, iphy);
561
562 if (status == SCI_SUCCESS) {
563 port_agent->phy_configured_mask |= (1 << iphy->phy_index);
564 }
565 break;
566
567 case SCIC_SDS_APC_START_TIMER:
568 /*
569 * This can occur for either a link down event, or a link
570 * up event where we cannot yet tell the port to which a
571 * phy belongs.
572 */
573 if (port_agent->timer_pending)
574 sci_del_timer(&port_agent->timer);
575
576 port_agent->timer_pending = true;
577 sci_mod_timer(&port_agent->timer,
578 SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
579 break;
580
581 case SCIC_SDS_APC_SKIP_PHY:
582 default:
583 /* do nothing the PHY can not be made part of a port at this time. */
584 break;
585 }
586}
587
588/**
589 * sci_apc_agent_link_up - handle apc link up events
590 * @scic: This is the controller object that receives the link up
591 * notification.
592 * @sci_port: This is the port object associated with the phy. If the is no
593 * associated port this is an NULL.
594 * @sci_phy: This is the phy object which has gone link up.
595 *
596 * This method handles the automatic port configuration for link up
597 * notifications. Is it possible to get a link down notification from a phy
598 * that has no assocoated port?
599 */
600static void sci_apc_agent_link_up(struct isci_host *ihost,
601 struct sci_port_configuration_agent *port_agent,
602 struct isci_port *iport,
603 struct isci_phy *iphy)
604{
605 u8 phy_index = iphy->phy_index;
606
607 if (!iport) {
608 /* the phy is not the part of this port */
609 port_agent->phy_ready_mask |= 1 << phy_index;
610 sci_apc_agent_configure_ports(ihost, port_agent, iphy, true);
611 } else {
612 /* the phy is already the part of the port */
613 u32 port_state = iport->sm.current_state_id;
614
615 /* if the PORT'S state is resetting then the link up is from
616 * port hard reset in this case, we need to tell the port
617 * that link up is recieved
618 */
619 BUG_ON(port_state != SCI_PORT_RESETTING);
620 port_agent->phy_ready_mask |= 1 << phy_index;
621 sci_port_link_up(iport, iphy);
622 }
623}
624
625/**
626 *
627 * @controller: This is the controller object that receives the link down
628 * notification.
629 * @iport: This is the port object associated with the phy. If the is no
630 * associated port this is an NULL.
631 * @iphy: This is the phy object which has gone link down.
632 *
633 * This method handles the automatic port configuration link down
634 * notifications. not associated with a port there is no action taken. Is it
635 * possible to get a link down notification from a phy that has no assocoated
636 * port?
637 */
638static void sci_apc_agent_link_down(
639 struct isci_host *ihost,
640 struct sci_port_configuration_agent *port_agent,
641 struct isci_port *iport,
642 struct isci_phy *iphy)
643{
644 port_agent->phy_ready_mask &= ~(1 << iphy->phy_index);
645
646 if (!iport)
647 return;
648 if (port_agent->phy_configured_mask & (1 << iphy->phy_index)) {
649 enum sci_status status;
650
651 status = sci_port_remove_phy(iport, iphy);
652
653 if (status == SCI_SUCCESS)
654 port_agent->phy_configured_mask &= ~(1 << iphy->phy_index);
655 }
656}
657
658/* configure the phys into ports when the timer fires */
659static void apc_agent_timeout(unsigned long data)
660{
661 u32 index;
662 struct sci_timer *tmr = (struct sci_timer *)data;
663 struct sci_port_configuration_agent *port_agent;
664 struct isci_host *ihost;
665 unsigned long flags;
666 u16 configure_phy_mask;
667
668 port_agent = container_of(tmr, typeof(*port_agent), timer);
669 ihost = container_of(port_agent, typeof(*ihost), port_agent);
670
671 spin_lock_irqsave(&ihost->scic_lock, flags);
672
673 if (tmr->cancel)
674 goto done;
675
676 port_agent->timer_pending = false;
677
678 configure_phy_mask = ~port_agent->phy_configured_mask & port_agent->phy_ready_mask;
679
680 if (!configure_phy_mask)
681 return;
682
683 for (index = 0; index < SCI_MAX_PHYS; index++) {
684 if ((configure_phy_mask & (1 << index)) == 0)
685 continue;
686
687 sci_apc_agent_configure_ports(ihost, port_agent,
688 &ihost->phys[index], false);
689 }
690
691done:
692 spin_unlock_irqrestore(&ihost->scic_lock, flags);
693}
694
695/*
696 * ******************************************************************************
697 * Public port configuration agent routines
698 * ****************************************************************************** */
699
700/**
701 *
702 *
703 * This method will construct the port configuration agent for operation. This
704 * call is universal for both manual port configuration and automatic port
705 * configuration modes.
706 */
707void sci_port_configuration_agent_construct(
708 struct sci_port_configuration_agent *port_agent)
709{
710 u32 index;
711
712 port_agent->phy_configured_mask = 0x00;
713 port_agent->phy_ready_mask = 0x00;
714
715 port_agent->link_up_handler = NULL;
716 port_agent->link_down_handler = NULL;
717
718 port_agent->timer_pending = false;
719
720 for (index = 0; index < SCI_MAX_PORTS; index++) {
721 port_agent->phy_valid_port_range[index].min_index = 0;
722 port_agent->phy_valid_port_range[index].max_index = 0;
723 }
724}
725
726enum sci_status sci_port_configuration_agent_initialize(
727 struct isci_host *ihost,
728 struct sci_port_configuration_agent *port_agent)
729{
730 enum sci_status status;
731 enum sci_port_configuration_mode mode;
732
733 mode = ihost->oem_parameters.controller.mode_type;
734
735 if (mode == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
736 status = sci_mpc_agent_validate_phy_configuration(
737 ihost, port_agent);
738
739 port_agent->link_up_handler = sci_mpc_agent_link_up;
740 port_agent->link_down_handler = sci_mpc_agent_link_down;
741
742 sci_init_timer(&port_agent->timer, mpc_agent_timeout);
743 } else {
744 status = sci_apc_agent_validate_phy_configuration(
745 ihost, port_agent);
746
747 port_agent->link_up_handler = sci_apc_agent_link_up;
748 port_agent->link_down_handler = sci_apc_agent_link_down;
749
750 sci_init_timer(&port_agent->timer, apc_agent_timeout);
751 }
752
753 return status;
754}
diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c
new file mode 100644
index 000000000000..b5f4341de243
--- /dev/null
+++ b/drivers/scsi/isci/probe_roms.c
@@ -0,0 +1,243 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 */
24
25/* probe_roms - scan for oem parameters */
26
27#include <linux/kernel.h>
28#include <linux/firmware.h>
29#include <linux/uaccess.h>
30#include <linux/efi.h>
31#include <asm/probe_roms.h>
32
33#include "isci.h"
34#include "task.h"
35#include "probe_roms.h"
36
37static efi_char16_t isci_efivar_name[] = {
38 'R', 's', 't', 'S', 'c', 'u', 'O'
39};
40
41struct isci_orom *isci_request_oprom(struct pci_dev *pdev)
42{
43 void __iomem *oprom = pci_map_biosrom(pdev);
44 struct isci_orom *rom = NULL;
45 size_t len, i;
46 int j;
47 char oem_sig[4];
48 struct isci_oem_hdr oem_hdr;
49 u8 *tmp, sum;
50
51 if (!oprom)
52 return NULL;
53
54 len = pci_biosrom_size(pdev);
55 rom = devm_kzalloc(&pdev->dev, sizeof(*rom), GFP_KERNEL);
56 if (!rom) {
57 dev_warn(&pdev->dev,
58 "Unable to allocate memory for orom\n");
59 return NULL;
60 }
61
62 for (i = 0; i < len && rom; i += ISCI_OEM_SIG_SIZE) {
63 memcpy_fromio(oem_sig, oprom + i, ISCI_OEM_SIG_SIZE);
64
65 /* we think we found the OEM table */
66 if (memcmp(oem_sig, ISCI_OEM_SIG, ISCI_OEM_SIG_SIZE) == 0) {
67 size_t copy_len;
68
69 memcpy_fromio(&oem_hdr, oprom + i, sizeof(oem_hdr));
70
71 copy_len = min(oem_hdr.len - sizeof(oem_hdr),
72 sizeof(*rom));
73
74 memcpy_fromio(rom,
75 oprom + i + sizeof(oem_hdr),
76 copy_len);
77
78 /* calculate checksum */
79 tmp = (u8 *)&oem_hdr;
80 for (j = 0, sum = 0; j < sizeof(oem_hdr); j++, tmp++)
81 sum += *tmp;
82
83 tmp = (u8 *)rom;
84 for (j = 0; j < sizeof(*rom); j++, tmp++)
85 sum += *tmp;
86
87 if (sum != 0) {
88 dev_warn(&pdev->dev,
89 "OEM table checksum failed\n");
90 continue;
91 }
92
93 /* keep going if that's not the oem param table */
94 if (memcmp(rom->hdr.signature,
95 ISCI_ROM_SIG,
96 ISCI_ROM_SIG_SIZE) != 0)
97 continue;
98
99 dev_info(&pdev->dev,
100 "OEM parameter table found in OROM\n");
101 break;
102 }
103 }
104
105 if (i >= len) {
106 dev_err(&pdev->dev, "oprom parse error\n");
107 devm_kfree(&pdev->dev, rom);
108 rom = NULL;
109 }
110 pci_unmap_biosrom(oprom);
111
112 return rom;
113}
114
115enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem,
116 struct isci_orom *orom, int scu_index)
117{
118 /* check for valid inputs */
119 if (scu_index < 0 || scu_index >= SCI_MAX_CONTROLLERS ||
120 scu_index > orom->hdr.num_elements || !oem)
121 return -EINVAL;
122
123 *oem = orom->ctrl[scu_index];
124 return 0;
125}
126
127struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw)
128{
129 struct isci_orom *orom = NULL, *data;
130 int i, j;
131
132 if (request_firmware(&fw, ISCI_FW_NAME, &pdev->dev) != 0)
133 return NULL;
134
135 if (fw->size < sizeof(*orom))
136 goto out;
137
138 data = (struct isci_orom *)fw->data;
139
140 if (strncmp(ISCI_ROM_SIG, data->hdr.signature,
141 strlen(ISCI_ROM_SIG)) != 0)
142 goto out;
143
144 orom = devm_kzalloc(&pdev->dev, fw->size, GFP_KERNEL);
145 if (!orom)
146 goto out;
147
148 memcpy(orom, fw->data, fw->size);
149
150 if (is_c0(pdev))
151 goto out;
152
153 /*
154 * deprecated: override default amp_control for pre-preproduction
155 * silicon revisions
156 */
157 for (i = 0; i < ARRAY_SIZE(orom->ctrl); i++)
158 for (j = 0; j < ARRAY_SIZE(orom->ctrl[i].phys); j++) {
159 orom->ctrl[i].phys[j].afe_tx_amp_control0 = 0xe7c03;
160 orom->ctrl[i].phys[j].afe_tx_amp_control1 = 0xe7c03;
161 orom->ctrl[i].phys[j].afe_tx_amp_control2 = 0xe7c03;
162 orom->ctrl[i].phys[j].afe_tx_amp_control3 = 0xe7c03;
163 }
164 out:
165 release_firmware(fw);
166
167 return orom;
168}
169
170static struct efi *get_efi(void)
171{
172#ifdef CONFIG_EFI
173 return &efi;
174#else
175 return NULL;
176#endif
177}
178
179struct isci_orom *isci_get_efi_var(struct pci_dev *pdev)
180{
181 efi_status_t status;
182 struct isci_orom *rom;
183 struct isci_oem_hdr *oem_hdr;
184 u8 *tmp, sum;
185 int j;
186 unsigned long data_len;
187 u8 *efi_data;
188 u32 efi_attrib = 0;
189
190 data_len = 1024;
191 efi_data = devm_kzalloc(&pdev->dev, data_len, GFP_KERNEL);
192 if (!efi_data) {
193 dev_warn(&pdev->dev,
194 "Unable to allocate memory for EFI data\n");
195 return NULL;
196 }
197
198 rom = (struct isci_orom *)(efi_data + sizeof(struct isci_oem_hdr));
199
200 if (get_efi())
201 status = get_efi()->get_variable(isci_efivar_name,
202 &ISCI_EFI_VENDOR_GUID,
203 &efi_attrib,
204 &data_len,
205 efi_data);
206 else
207 status = EFI_NOT_FOUND;
208
209 if (status != EFI_SUCCESS) {
210 dev_warn(&pdev->dev,
211 "Unable to obtain EFI var data for OEM parms\n");
212 return NULL;
213 }
214
215 oem_hdr = (struct isci_oem_hdr *)efi_data;
216
217 if (memcmp(oem_hdr->sig, ISCI_OEM_SIG, ISCI_OEM_SIG_SIZE) != 0) {
218 dev_warn(&pdev->dev,
219 "Invalid OEM header signature\n");
220 return NULL;
221 }
222
223 /* calculate checksum */
224 tmp = (u8 *)efi_data;
225 for (j = 0, sum = 0; j < (sizeof(*oem_hdr) + sizeof(*rom)); j++, tmp++)
226 sum += *tmp;
227
228 if (sum != 0) {
229 dev_warn(&pdev->dev,
230 "OEM table checksum failed\n");
231 return NULL;
232 }
233
234 if (memcmp(rom->hdr.signature,
235 ISCI_ROM_SIG,
236 ISCI_ROM_SIG_SIZE) != 0) {
237 dev_warn(&pdev->dev,
238 "Invalid OEM table signature\n");
239 return NULL;
240 }
241
242 return rom;
243}
diff --git a/drivers/scsi/isci/probe_roms.h b/drivers/scsi/isci/probe_roms.h
new file mode 100644
index 000000000000..dc007e692f4e
--- /dev/null
+++ b/drivers/scsi/isci/probe_roms.h
@@ -0,0 +1,249 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55#ifndef _ISCI_PROBE_ROMS_H_
56#define _ISCI_PROBE_ROMS_H_
57
58#ifdef __KERNEL__
59#include <linux/firmware.h>
60#include <linux/pci.h>
61#include <linux/efi.h>
62#include "isci.h"
63
64#define SCIC_SDS_PARM_NO_SPEED 0
65
66/* generation 1 (i.e. 1.5 Gb/s) */
67#define SCIC_SDS_PARM_GEN1_SPEED 1
68
69/* generation 2 (i.e. 3.0 Gb/s) */
70#define SCIC_SDS_PARM_GEN2_SPEED 2
71
72/* generation 3 (i.e. 6.0 Gb/s) */
73#define SCIC_SDS_PARM_GEN3_SPEED 3
74#define SCIC_SDS_PARM_MAX_SPEED SCIC_SDS_PARM_GEN3_SPEED
75
76/* parameters that can be set by module parameters */
77struct sci_user_parameters {
78 struct sci_phy_user_params {
79 /**
80 * This field specifies the NOTIFY (ENABLE SPIN UP) primitive
81 * insertion frequency for this phy index.
82 */
83 u32 notify_enable_spin_up_insertion_frequency;
84
85 /**
86 * This method specifies the number of transmitted DWORDs within which
87 * to transmit a single ALIGN primitive. This value applies regardless
88 * of what type of device is attached or connection state. A value of
89 * 0 indicates that no ALIGN primitives will be inserted.
90 */
91 u16 align_insertion_frequency;
92
93 /**
94 * This method specifies the number of transmitted DWORDs within which
95 * to transmit 2 ALIGN primitives. This applies for SAS connections
96 * only. A minimum value of 3 is required for this field.
97 */
98 u16 in_connection_align_insertion_frequency;
99
100 /**
101 * This field indicates the maximum speed generation to be utilized
102 * by phys in the supplied port.
103 * - A value of 1 indicates generation 1 (i.e. 1.5 Gb/s).
104 * - A value of 2 indicates generation 2 (i.e. 3.0 Gb/s).
105 * - A value of 3 indicates generation 3 (i.e. 6.0 Gb/s).
106 */
107 u8 max_speed_generation;
108
109 } phys[SCI_MAX_PHYS];
110
111 /**
112 * This field specifies the maximum number of direct attached devices
113 * that can have power supplied to them simultaneously.
114 */
115 u8 max_number_concurrent_device_spin_up;
116
117 /**
118 * This field specifies the number of seconds to allow a phy to consume
119 * power before yielding to another phy.
120 *
121 */
122 u8 phy_spin_up_delay_interval;
123
124 /**
125 * These timer values specifies how long a link will remain open with no
126 * activity in increments of a microsecond, it can be in increments of
127 * 100 microseconds if the upper most bit is set.
128 *
129 */
130 u16 stp_inactivity_timeout;
131 u16 ssp_inactivity_timeout;
132
133 /**
134 * These timer values specifies how long a link will remain open in increments
135 * of 100 microseconds.
136 *
137 */
138 u16 stp_max_occupancy_timeout;
139 u16 ssp_max_occupancy_timeout;
140
141 /**
142 * This timer value specifies how long a link will remain open with no
143 * outbound traffic in increments of a microsecond.
144 *
145 */
146 u8 no_outbound_task_timeout;
147
148};
149
150#define SCIC_SDS_PARM_PHY_MASK_MIN 0x0
151#define SCIC_SDS_PARM_PHY_MASK_MAX 0xF
152#define MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT 4
153
154struct sci_oem_params;
155int sci_oem_parameters_validate(struct sci_oem_params *oem);
156
157struct isci_orom;
158struct isci_orom *isci_request_oprom(struct pci_dev *pdev);
159enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem,
160 struct isci_orom *orom, int scu_index);
161struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw);
162struct isci_orom *isci_get_efi_var(struct pci_dev *pdev);
163
164struct isci_oem_hdr {
165 u8 sig[4];
166 u8 rev_major;
167 u8 rev_minor;
168 u16 len;
169 u8 checksum;
170 u8 reserved1;
171 u16 reserved2;
172} __attribute__ ((packed));
173
174#else
175#define SCI_MAX_PORTS 4
176#define SCI_MAX_PHYS 4
177#define SCI_MAX_CONTROLLERS 2
178#endif
179
180#define ISCI_FW_NAME "isci/isci_firmware.bin"
181
182#define ROMSIGNATURE 0xaa55
183
184#define ISCI_OEM_SIG "$OEM"
185#define ISCI_OEM_SIG_SIZE 4
186#define ISCI_ROM_SIG "ISCUOEMB"
187#define ISCI_ROM_SIG_SIZE 8
188
189#define ISCI_EFI_VENDOR_GUID \
190 EFI_GUID(0x193dfefa, 0xa445, 0x4302, 0x99, 0xd8, 0xef, 0x3a, 0xad, \
191 0x1a, 0x04, 0xc6)
192#define ISCI_EFI_VAR_NAME "RstScuO"
193
194/* Allowed PORT configuration modes APC Automatic PORT configuration mode is
195 * defined by the OEM configuration parameters providing no PHY_MASK parameters
196 * for any PORT. i.e. There are no phys assigned to any of the ports at start.
197 * MPC Manual PORT configuration mode is defined by the OEM configuration
198 * parameters providing a PHY_MASK value for any PORT. It is assumed that any
199 * PORT with no PHY_MASK is an invalid port and not all PHYs must be assigned.
200 * A PORT_PHY mask that assigns just a single PHY to a port and no other PHYs
201 * being assigned is sufficient to declare manual PORT configuration.
202 */
203enum sci_port_configuration_mode {
204 SCIC_PORT_MANUAL_CONFIGURATION_MODE = 0,
205 SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE = 1
206};
207
208struct sci_bios_oem_param_block_hdr {
209 uint8_t signature[ISCI_ROM_SIG_SIZE];
210 uint16_t total_block_length;
211 uint8_t hdr_length;
212 uint8_t version;
213 uint8_t preboot_source;
214 uint8_t num_elements;
215 uint16_t element_length;
216 uint8_t reserved[8];
217} __attribute__ ((packed));
218
219struct sci_oem_params {
220 struct {
221 uint8_t mode_type;
222 uint8_t max_concurrent_dev_spin_up;
223 uint8_t do_enable_ssc;
224 uint8_t reserved;
225 } controller;
226
227 struct {
228 uint8_t phy_mask;
229 } ports[SCI_MAX_PORTS];
230
231 struct sci_phy_oem_params {
232 struct {
233 uint32_t high;
234 uint32_t low;
235 } sas_address;
236
237 uint32_t afe_tx_amp_control0;
238 uint32_t afe_tx_amp_control1;
239 uint32_t afe_tx_amp_control2;
240 uint32_t afe_tx_amp_control3;
241 } phys[SCI_MAX_PHYS];
242} __attribute__ ((packed));
243
244struct isci_orom {
245 struct sci_bios_oem_param_block_hdr hdr;
246 struct sci_oem_params ctrl[SCI_MAX_CONTROLLERS];
247} __attribute__ ((packed));
248
249#endif
diff --git a/drivers/scsi/isci/registers.h b/drivers/scsi/isci/registers.h
new file mode 100644
index 000000000000..9b266c7428e8
--- /dev/null
+++ b/drivers/scsi/isci/registers.h
@@ -0,0 +1,1934 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#ifndef _SCU_REGISTERS_H_
57#define _SCU_REGISTERS_H_
58
59/**
60 * This file contains the constants and structures for the SCU memory mapped
61 * registers.
62 *
63 *
64 */
65
66#define SCU_VIIT_ENTRY_ID_MASK (0xC0000000)
67#define SCU_VIIT_ENTRY_ID_SHIFT (30)
68
69#define SCU_VIIT_ENTRY_FUNCTION_MASK (0x0FF00000)
70#define SCU_VIIT_ENTRY_FUNCTION_SHIFT (20)
71
72#define SCU_VIIT_ENTRY_IPPTMODE_MASK (0x0001F800)
73#define SCU_VIIT_ENTRY_IPPTMODE_SHIFT (12)
74
75#define SCU_VIIT_ENTRY_LPVIE_MASK (0x00000F00)
76#define SCU_VIIT_ENTRY_LPVIE_SHIFT (8)
77
78#define SCU_VIIT_ENTRY_STATUS_MASK (0x000000FF)
79#define SCU_VIIT_ENTRY_STATUS_SHIFT (0)
80
81#define SCU_VIIT_ENTRY_ID_INVALID (0 << SCU_VIIT_ENTRY_ID_SHIFT)
82#define SCU_VIIT_ENTRY_ID_VIIT (1 << SCU_VIIT_ENTRY_ID_SHIFT)
83#define SCU_VIIT_ENTRY_ID_IIT (2 << SCU_VIIT_ENTRY_ID_SHIFT)
84#define SCU_VIIT_ENTRY_ID_VIRT_EXP (3 << SCU_VIIT_ENTRY_ID_SHIFT)
85
86#define SCU_VIIT_IPPT_SSP_INITIATOR (0x01 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT)
87#define SCU_VIIT_IPPT_SMP_INITIATOR (0x02 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT)
88#define SCU_VIIT_IPPT_STP_INITIATOR (0x04 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT)
89#define SCU_VIIT_IPPT_INITIATOR \
90 (\
91 SCU_VIIT_IPPT_SSP_INITIATOR \
92 | SCU_VIIT_IPPT_SMP_INITIATOR \
93 | SCU_VIIT_IPPT_STP_INITIATOR \
94 )
95
96#define SCU_VIIT_STATUS_RNC_VALID (0x01 << SCU_VIIT_ENTRY_STATUS_SHIFT)
97#define SCU_VIIT_STATUS_ADDRESS_VALID (0x02 << SCU_VIIT_ENTRY_STATUS_SHIFT)
98#define SCU_VIIT_STATUS_RNI_VALID (0x04 << SCU_VIIT_ENTRY_STATUS_SHIFT)
99#define SCU_VIIT_STATUS_ALL_VALID \
100 (\
101 SCU_VIIT_STATUS_RNC_VALID \
102 | SCU_VIIT_STATUS_ADDRESS_VALID \
103 | SCU_VIIT_STATUS_RNI_VALID \
104 )
105
106#define SCU_VIIT_IPPT_SMP_TARGET (0x10 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT)
107
108/**
109 * struct scu_viit_entry - This is the SCU Virtual Initiator Table Entry
110 *
111 *
112 */
113struct scu_viit_entry {
114 /**
115 * This must be encoded as to the type of initiator that is being constructed
116 * for this port.
117 */
118 u32 status;
119
120 /**
121 * Virtual initiator high SAS Address
122 */
123 u32 initiator_sas_address_hi;
124
125 /**
126 * Virtual initiator low SAS Address
127 */
128 u32 initiator_sas_address_lo;
129
130 /**
131 * This must be 0
132 */
133 u32 reserved;
134
135};
136
137
138/* IIT Status Defines */
139#define SCU_IIT_ENTRY_ID_MASK (0xC0000000)
140#define SCU_IIT_ENTRY_ID_SHIFT (30)
141
142#define SCU_IIT_ENTRY_STATUS_UPDATE_MASK (0x20000000)
143#define SCU_IIT_ENTRY_STATUS_UPDATE_SHIFT (29)
144
145#define SCU_IIT_ENTRY_LPI_MASK (0x00000F00)
146#define SCU_IIT_ENTRY_LPI_SHIFT (8)
147
148#define SCU_IIT_ENTRY_STATUS_MASK (0x000000FF)
149#define SCU_IIT_ENTRY_STATUS_SHIFT (0)
150
151/* IIT Remote Initiator Defines */
152#define SCU_IIT_ENTRY_REMOTE_TAG_MASK (0x0000FFFF)
153#define SCU_IIT_ENTRY_REMOTE_TAG_SHIFT (0)
154
155#define SCU_IIT_ENTRY_REMOTE_RNC_MASK (0x0FFF0000)
156#define SCU_IIT_ENTRY_REMOTE_RNC_SHIFT (16)
157
158#define SCU_IIT_ENTRY_ID_INVALID (0 << SCU_IIT_ENTRY_ID_SHIFT)
159#define SCU_IIT_ENTRY_ID_VIIT (1 << SCU_IIT_ENTRY_ID_SHIFT)
160#define SCU_IIT_ENTRY_ID_IIT (2 << SCU_IIT_ENTRY_ID_SHIFT)
161#define SCU_IIT_ENTRY_ID_VIRT_EXP (3 << SCU_IIT_ENTRY_ID_SHIFT)
162
163/**
164 * struct scu_iit_entry - This will be implemented later when we support
165 * virtual functions
166 *
167 *
168 */
169struct scu_iit_entry {
170 u32 status;
171 u32 remote_initiator_sas_address_hi;
172 u32 remote_initiator_sas_address_lo;
173 u32 remote_initiator;
174
175};
176
177/* Generate a value for an SCU register */
178#define SCU_GEN_VALUE(name, value) \
179 (((value) << name ## _SHIFT) & (name ## _MASK))
180
181/*
182 * Generate a bit value for an SCU register
183 * Make sure that the register MASK is just a single bit */
184#define SCU_GEN_BIT(name) \
185 SCU_GEN_VALUE(name, ((u32)1))
186
187#define SCU_SET_BIT(name, reg_value) \
188 ((reg_value) | SCU_GEN_BIT(name))
189
190#define SCU_CLEAR_BIT(name, reg_value) \
191 ((reg_value)$ ~(SCU_GEN_BIT(name)))
192
193/*
194 * *****************************************************************************
195 * Unions for bitfield definitions of SCU Registers
196 * SMU Post Context Port
197 * ***************************************************************************** */
198#define SMU_POST_CONTEXT_PORT_CONTEXT_INDEX_SHIFT (0)
199#define SMU_POST_CONTEXT_PORT_CONTEXT_INDEX_MASK (0x00000FFF)
200#define SMU_POST_CONTEXT_PORT_LOGICAL_PORT_INDEX_SHIFT (12)
201#define SMU_POST_CONTEXT_PORT_LOGICAL_PORT_INDEX_MASK (0x0000F000)
202#define SMU_POST_CONTEXT_PORT_PROTOCOL_ENGINE_SHIFT (16)
203#define SMU_POST_CONTEXT_PORT_PROTOCOL_ENGINE_MASK (0x00030000)
204#define SMU_POST_CONTEXT_PORT_COMMAND_CONTEXT_SHIFT (18)
205#define SMU_POST_CONTEXT_PORT_COMMAND_CONTEXT_MASK (0x00FC0000)
206#define SMU_POST_CONTEXT_PORT_RESERVED_MASK (0xFF000000)
207
208#define SMU_PCP_GEN_VAL(name, value) \
209 SCU_GEN_VALUE(SMU_POST_CONTEXT_PORT_ ## name, value)
210
211/* ***************************************************************************** */
212#define SMU_INTERRUPT_STATUS_COMPLETION_SHIFT (31)
213#define SMU_INTERRUPT_STATUS_COMPLETION_MASK (0x80000000)
214#define SMU_INTERRUPT_STATUS_QUEUE_SUSPEND_SHIFT (1)
215#define SMU_INTERRUPT_STATUS_QUEUE_SUSPEND_MASK (0x00000002)
216#define SMU_INTERRUPT_STATUS_QUEUE_ERROR_SHIFT (0)
217#define SMU_INTERRUPT_STATUS_QUEUE_ERROR_MASK (0x00000001)
218#define SMU_INTERRUPT_STATUS_RESERVED_MASK (0x7FFFFFFC)
219
220#define SMU_ISR_GEN_BIT(name) \
221 SCU_GEN_BIT(SMU_INTERRUPT_STATUS_ ## name)
222
223#define SMU_ISR_QUEUE_ERROR SMU_ISR_GEN_BIT(QUEUE_ERROR)
224#define SMU_ISR_QUEUE_SUSPEND SMU_ISR_GEN_BIT(QUEUE_SUSPEND)
225#define SMU_ISR_COMPLETION SMU_ISR_GEN_BIT(COMPLETION)
226
227/* ***************************************************************************** */
228#define SMU_INTERRUPT_MASK_COMPLETION_SHIFT (31)
229#define SMU_INTERRUPT_MASK_COMPLETION_MASK (0x80000000)
230#define SMU_INTERRUPT_MASK_QUEUE_SUSPEND_SHIFT (1)
231#define SMU_INTERRUPT_MASK_QUEUE_SUSPEND_MASK (0x00000002)
232#define SMU_INTERRUPT_MASK_QUEUE_ERROR_SHIFT (0)
233#define SMU_INTERRUPT_MASK_QUEUE_ERROR_MASK (0x00000001)
234#define SMU_INTERRUPT_MASK_RESERVED_MASK (0x7FFFFFFC)
235
236#define SMU_IMR_GEN_BIT(name) \
237 SCU_GEN_BIT(SMU_INTERRUPT_MASK_ ## name)
238
239#define SMU_IMR_QUEUE_ERROR SMU_IMR_GEN_BIT(QUEUE_ERROR)
240#define SMU_IMR_QUEUE_SUSPEND SMU_IMR_GEN_BIT(QUEUE_SUSPEND)
241#define SMU_IMR_COMPLETION SMU_IMR_GEN_BIT(COMPLETION)
242
243/* ***************************************************************************** */
244#define SMU_INTERRUPT_COALESCING_CONTROL_TIMER_SHIFT (0)
245#define SMU_INTERRUPT_COALESCING_CONTROL_TIMER_MASK (0x0000001F)
246#define SMU_INTERRUPT_COALESCING_CONTROL_NUMBER_SHIFT (8)
247#define SMU_INTERRUPT_COALESCING_CONTROL_NUMBER_MASK (0x0000FF00)
248#define SMU_INTERRUPT_COALESCING_CONTROL_RESERVED_MASK (0xFFFF00E0)
249
250#define SMU_ICC_GEN_VAL(name, value) \
251 SCU_GEN_VALUE(SMU_INTERRUPT_COALESCING_CONTROL_ ## name, value)
252
253/* ***************************************************************************** */
254#define SMU_TASK_CONTEXT_RANGE_START_SHIFT (0)
255#define SMU_TASK_CONTEXT_RANGE_START_MASK (0x00000FFF)
256#define SMU_TASK_CONTEXT_RANGE_ENDING_SHIFT (16)
257#define SMU_TASK_CONTEXT_RANGE_ENDING_MASK (0x0FFF0000)
258#define SMU_TASK_CONTEXT_RANGE_ENABLE_SHIFT (31)
259#define SMU_TASK_CONTEXT_RANGE_ENABLE_MASK (0x80000000)
260#define SMU_TASK_CONTEXT_RANGE_RESERVED_MASK (0x7000F000)
261
262#define SMU_TCR_GEN_VAL(name, value) \
263 SCU_GEN_VALUE(SMU_TASK_CONTEXT_RANGE_ ## name, value)
264
265#define SMU_TCR_GEN_BIT(name, value) \
266 SCU_GEN_BIT(SMU_TASK_CONTEXT_RANGE_ ## name)
267
268/* ***************************************************************************** */
269
270#define SMU_COMPLETION_QUEUE_PUT_POINTER_SHIFT (0)
271#define SMU_COMPLETION_QUEUE_PUT_POINTER_MASK (0x00003FFF)
272#define SMU_COMPLETION_QUEUE_PUT_CYCLE_BIT_SHIFT (15)
273#define SMU_COMPLETION_QUEUE_PUT_CYCLE_BIT_MASK (0x00008000)
274#define SMU_COMPLETION_QUEUE_PUT_EVENT_POINTER_SHIFT (16)
275#define SMU_COMPLETION_QUEUE_PUT_EVENT_POINTER_MASK (0x03FF0000)
276#define SMU_COMPLETION_QUEUE_PUT_EVENT_CYCLE_BIT_SHIFT (26)
277#define SMU_COMPLETION_QUEUE_PUT_EVENT_CYCLE_BIT_MASK (0x04000000)
278#define SMU_COMPLETION_QUEUE_PUT_RESERVED_MASK (0xF8004000)
279
280#define SMU_CQPR_GEN_VAL(name, value) \
281 SCU_GEN_VALUE(SMU_COMPLETION_QUEUE_PUT_ ## name, value)
282
283#define SMU_CQPR_GEN_BIT(name) \
284 SCU_GEN_BIT(SMU_COMPLETION_QUEUE_PUT_ ## name)
285
286/* ***************************************************************************** */
287
288#define SMU_COMPLETION_QUEUE_GET_POINTER_SHIFT (0)
289#define SMU_COMPLETION_QUEUE_GET_POINTER_MASK (0x00003FFF)
290#define SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT (15)
291#define SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_MASK (0x00008000)
292#define SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT (16)
293#define SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK (0x03FF0000)
294#define SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT (26)
295#define SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_MASK (0x04000000)
296#define SMU_COMPLETION_QUEUE_GET_ENABLE_SHIFT (30)
297#define SMU_COMPLETION_QUEUE_GET_ENABLE_MASK (0x40000000)
298#define SMU_COMPLETION_QUEUE_GET_EVENT_ENABLE_SHIFT (31)
299#define SMU_COMPLETION_QUEUE_GET_EVENT_ENABLE_MASK (0x80000000)
300#define SMU_COMPLETION_QUEUE_GET_RESERVED_MASK (0x38004000)
301
302#define SMU_CQGR_GEN_VAL(name, value) \
303 SCU_GEN_VALUE(SMU_COMPLETION_QUEUE_GET_ ## name, value)
304
305#define SMU_CQGR_GEN_BIT(name) \
306 SCU_GEN_BIT(SMU_COMPLETION_QUEUE_GET_ ## name)
307
308#define SMU_CQGR_CYCLE_BIT \
309 SMU_CQGR_GEN_BIT(CYCLE_BIT)
310
311#define SMU_CQGR_EVENT_CYCLE_BIT \
312 SMU_CQGR_GEN_BIT(EVENT_CYCLE_BIT)
313
314#define SMU_CQGR_GET_POINTER_SET(value) \
315 SMU_CQGR_GEN_VAL(POINTER, value)
316
317
318/* ***************************************************************************** */
319#define SMU_COMPLETION_QUEUE_CONTROL_QUEUE_LIMIT_SHIFT (0)
320#define SMU_COMPLETION_QUEUE_CONTROL_QUEUE_LIMIT_MASK (0x00003FFF)
321#define SMU_COMPLETION_QUEUE_CONTROL_EVENT_LIMIT_SHIFT (16)
322#define SMU_COMPLETION_QUEUE_CONTROL_EVENT_LIMIT_MASK (0x03FF0000)
323#define SMU_COMPLETION_QUEUE_CONTROL_RESERVED_MASK (0xFC00C000)
324
325#define SMU_CQC_GEN_VAL(name, value) \
326 SCU_GEN_VALUE(SMU_COMPLETION_QUEUE_CONTROL_ ## name, value)
327
328#define SMU_CQC_QUEUE_LIMIT_SET(value) \
329 SMU_CQC_GEN_VAL(QUEUE_LIMIT, value)
330
331#define SMU_CQC_EVENT_LIMIT_SET(value) \
332 SMU_CQC_GEN_VAL(EVENT_LIMIT, value)
333
334
335/* ***************************************************************************** */
336#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT (0)
337#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK (0x00000FFF)
338#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT (12)
339#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK (0x00007000)
340#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT (15)
341#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK (0x07FF8000)
342#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_PEG_SHIFT (27)
343#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_PEG_MASK (0x08000000)
344#define SMU_DEVICE_CONTEXT_CAPACITY_RESERVED_MASK (0xF0000000)
345
346#define SMU_DCC_GEN_VAL(name, value) \
347 SCU_GEN_VALUE(SMU_DEVICE_CONTEXT_CAPACITY_ ## name, value)
348
349#define SMU_DCC_GET_MAX_PEG(value) \
350 (\
351 ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_PEG_MASK) \
352 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT \
353 )
354
355#define SMU_DCC_GET_MAX_LP(value) \
356 (\
357 ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \
358 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT \
359 )
360
361#define SMU_DCC_GET_MAX_TC(value) \
362 (\
363 ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \
364 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT \
365 )
366
367#define SMU_DCC_GET_MAX_RNC(value) \
368 (\
369 ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \
370 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT \
371 )
372
373/* -------------------------------------------------------------------------- */
374
375#define SMU_CONTROL_STATUS_TASK_CONTEXT_RANGE_ENABLE_SHIFT (0)
376#define SMU_CONTROL_STATUS_TASK_CONTEXT_RANGE_ENABLE_MASK (0x00000001)
377#define SMU_CONTROL_STATUS_COMPLETION_BYTE_SWAP_ENABLE_SHIFT (1)
378#define SMU_CONTROL_STATUS_COMPLETION_BYTE_SWAP_ENABLE_MASK (0x00000002)
379#define SMU_CONTROL_STATUS_CONTEXT_RAM_INIT_COMPLETED_SHIFT (16)
380#define SMU_CONTROL_STATUS_CONTEXT_RAM_INIT_COMPLETED_MASK (0x00010000)
381#define SMU_CONTROL_STATUS_SCHEDULER_RAM_INIT_COMPLETED_SHIFT (17)
382#define SMU_CONTROL_STATUS_SCHEDULER_RAM_INIT_COMPLETED_MASK (0x00020000)
383#define SMU_CONTROL_STATUS_RESERVED_MASK (0xFFFCFFFC)
384
385#define SMU_SMUCSR_GEN_BIT(name) \
386 SCU_GEN_BIT(SMU_CONTROL_STATUS_ ## name)
387
388#define SMU_SMUCSR_SCHEDULER_RAM_INIT_COMPLETED \
389 (SMU_SMUCSR_GEN_BIT(SCHEDULER_RAM_INIT_COMPLETED))
390
391#define SMU_SMUCSR_CONTEXT_RAM_INIT_COMPLETED \
392 (SMU_SMUCSR_GEN_BIT(CONTEXT_RAM_INIT_COMPLETED))
393
394#define SCU_RAM_INIT_COMPLETED \
395 (\
396 SMU_SMUCSR_CONTEXT_RAM_INIT_COMPLETED \
397 | SMU_SMUCSR_SCHEDULER_RAM_INIT_COMPLETED \
398 )
399
400/* -------------------------------------------------------------------------- */
401
402#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE0_SHIFT (0)
403#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE0_MASK (0x00000001)
404#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE1_SHIFT (1)
405#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE1_MASK (0x00000002)
406#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE2_SHIFT (2)
407#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE2_MASK (0x00000004)
408#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE3_SHIFT (3)
409#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE3_MASK (0x00000008)
410#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE0_SHIFT (8)
411#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE0_MASK (0x00000100)
412#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE1_SHIFT (9)
413#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE1_MASK (0x00000200)
414#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE2_SHIFT (10)
415#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE2_MASK (0x00000400)
416#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE3_SHIFT (11)
417#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE3_MASK (0x00000800)
418
419#define SMU_RESET_PROTOCOL_ENGINE(peg, pe) \
420 ((1 << (pe)) << ((peg) * 8))
421
422#define SMU_RESET_PEG_PROTOCOL_ENGINES(peg) \
423 (\
424 SMU_RESET_PROTOCOL_ENGINE(peg, 0) \
425 | SMU_RESET_PROTOCOL_ENGINE(peg, 1) \
426 | SMU_RESET_PROTOCOL_ENGINE(peg, 2) \
427 | SMU_RESET_PROTOCOL_ENGINE(peg, 3) \
428 )
429
430#define SMU_RESET_ALL_PROTOCOL_ENGINES() \
431 (\
432 SMU_RESET_PEG_PROTOCOL_ENGINES(0) \
433 | SMU_RESET_PEG_PROTOCOL_ENGINES(1) \
434 )
435
436#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP0_SHIFT (16)
437#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP0_MASK (0x00010000)
438#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP2_SHIFT (17)
439#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP2_MASK (0x00020000)
440#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP0_SHIFT (18)
441#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP0_MASK (0x00040000)
442#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP2_SHIFT (19)
443#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP2_MASK (0x00080000)
444
445#define SMU_RESET_WIDE_PORT_QUEUE(peg, wide_port) \
446 ((1 << ((wide_port) / 2)) << ((peg) * 2) << 16)
447
448#define SMU_SOFTRESET_CONTROL_RESET_PEG0_SHIFT (20)
449#define SMU_SOFTRESET_CONTROL_RESET_PEG0_MASK (0x00100000)
450#define SMU_SOFTRESET_CONTROL_RESET_PEG1_SHIFT (21)
451#define SMU_SOFTRESET_CONTROL_RESET_PEG1_MASK (0x00200000)
452#define SMU_SOFTRESET_CONTROL_RESET_SCU_SHIFT (22)
453#define SMU_SOFTRESET_CONTROL_RESET_SCU_MASK (0x00400000)
454
455/*
456 * It seems to make sense that if you are going to reset the protocol
457 * engine group that you would also reset all of the protocol engines */
458#define SMU_RESET_PROTOCOL_ENGINE_GROUP(peg) \
459 (\
460 (1 << ((peg) + 20)) \
461 | SMU_RESET_WIDE_PORT_QUEUE(peg, 0) \
462 | SMU_RESET_WIDE_PORT_QUEUE(peg, 1) \
463 | SMU_RESET_PEG_PROTOCOL_ENGINES(peg) \
464 )
465
466#define SMU_RESET_ALL_PROTOCOL_ENGINE_GROUPS() \
467 (\
468 SMU_RESET_PROTOCOL_ENGINE_GROUP(0) \
469 | SMU_RESET_PROTOCOL_ENGINE_GROUP(1) \
470 )
471
472#define SMU_RESET_SCU() (0xFFFFFFFF)
473
474
475
476/* ***************************************************************************** */
477#define SMU_TASK_CONTEXT_ASSIGNMENT_STARTING_SHIFT (0)
478#define SMU_TASK_CONTEXT_ASSIGNMENT_STARTING_MASK (0x00000FFF)
479#define SMU_TASK_CONTEXT_ASSIGNMENT_ENDING_SHIFT (16)
480#define SMU_TASK_CONTEXT_ASSIGNMENT_ENDING_MASK (0x0FFF0000)
481#define SMU_TASK_CONTEXT_ASSIGNMENT_RANGE_CHECK_ENABLE_SHIFT (31)
482#define SMU_TASK_CONTEXT_ASSIGNMENT_RANGE_CHECK_ENABLE_MASK (0x80000000)
483#define SMU_TASK_CONTEXT_ASSIGNMENT_RESERVED_MASK (0x7000F000)
484
485#define SMU_TCA_GEN_VAL(name, value) \
486 SCU_GEN_VALUE(SMU_TASK_CONTEXT_ASSIGNMENT_ ## name, value)
487
488#define SMU_TCA_GEN_BIT(name) \
489 SCU_GEN_BIT(SMU_TASK_CONTEXT_ASSIGNMENT_ ## name)
490
491/* ***************************************************************************** */
492#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_QUEUE_SIZE_SHIFT (0)
493#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_QUEUE_SIZE_MASK (0x00000FFF)
494#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_RESERVED_MASK (0xFFFFF000)
495
496#define SCU_UFQC_GEN_VAL(name, value) \
497 SCU_GEN_VALUE(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_ ## name, value)
498
499#define SCU_UFQC_QUEUE_SIZE_SET(value) \
500 SCU_UFQC_GEN_VAL(QUEUE_SIZE, value)
501
502/* ***************************************************************************** */
503#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_POINTER_SHIFT (0)
504#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_POINTER_MASK (0x00000FFF)
505#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_CYCLE_BIT_SHIFT (12)
506#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_CYCLE_BIT_MASK (0x00001000)
507#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_RESERVED_MASK (0xFFFFE000)
508
509#define SCU_UFQPP_GEN_VAL(name, value) \
510 SCU_GEN_VALUE(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_ ## name, value)
511
512#define SCU_UFQPP_GEN_BIT(name) \
513 SCU_GEN_BIT(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_ ## name)
514
515/*
516 * *****************************************************************************
517 * * SDMA Registers
518 * ***************************************************************************** */
519#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_POINTER_SHIFT (0)
520#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_POINTER_MASK (0x00000FFF)
521#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_CYCLE_BIT_SHIFT (12)
522#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_CYCLE_BIT_MASK (12)
523#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ENABLE_BIT_SHIFT (31)
524#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ENABLE_BIT_MASK (0x80000000)
525#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_RESERVED_MASK (0x7FFFE000)
526
527#define SCU_UFQGP_GEN_VAL(name, value) \
528 SCU_GEN_VALUE(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ ## name, value)
529
530#define SCU_UFQGP_GEN_BIT(name) \
531 SCU_GEN_BIT(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ ## name)
532
533#define SCU_UFQGP_CYCLE_BIT(value) \
534 SCU_UFQGP_GEN_BIT(CYCLE_BIT, value)
535
536#define SCU_UFQGP_GET_POINTER(value) \
537 SCU_UFQGP_GEN_VALUE(POINTER, value)
538
539#define SCU_UFQGP_ENABLE(value) \
540 (SCU_UFQGP_GEN_BIT(ENABLE) | value)
541
542#define SCU_UFQGP_DISABLE(value) \
543 (~SCU_UFQGP_GEN_BIT(ENABLE) & value)
544
545#define SCU_UFQGP_VALUE(bit, value) \
546 (SCU_UFQGP_CYCLE_BIT(bit) | SCU_UFQGP_GET_POINTER(value))
547
548/* ***************************************************************************** */
549#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_SHIFT (0)
550#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_MASK (0x0000FFFF)
551#define SCU_PDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_SHIFT (16)
552#define SCU_PDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_MASK (0x00010000)
553#define SCU_PDMA_CONFIGURATION_PCI_NO_SNOOP_ENABLE_SHIFT (17)
554#define SCU_PDMA_CONFIGURATION_PCI_NO_SNOOP_ENABLE_MASK (0x00020000)
555#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_BYTE_SWAP_SHIFT (18)
556#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_BYTE_SWAP_MASK (0x00040000)
557#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_SGL_FETCH_SHIFT (19)
558#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_SGL_FETCH_MASK (0x00080000)
559#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_RX_HEADER_RAM_WRITE_SHIFT (20)
560#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_RX_HEADER_RAM_WRITE_MASK (0x00100000)
561#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_UF_ADDRESS_FETCH_SHIFT (21)
562#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_UF_ADDRESS_FETCH_MASK (0x00200000)
563#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_SELECT_SHIFT (22)
564#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_SELECT_MASK (0x00400000)
565#define SCU_PDMA_CONFIGURATION_RESERVED_MASK (0xFF800000)
566
567#define SCU_PDMACR_GEN_VALUE(name, value) \
568 SCU_GEN_VALUE(SCU_PDMA_CONFIGURATION_ ## name, value)
569
570#define SCU_PDMACR_GEN_BIT(name) \
571 SCU_GEN_BIT(SCU_PDMA_CONFIGURATION_ ## name)
572
573#define SCU_PDMACR_BE_GEN_BIT(name) \
574 SCU_PCMACR_GEN_BIT(BIG_ENDIAN_CONTROL_ ## name)
575
576/* ***************************************************************************** */
577#define SCU_CDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_SHIFT (8)
578#define SCU_CDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_MASK (0x00000100)
579
580#define SCU_CDMACR_GEN_BIT(name) \
581 SCU_GEN_BIT(SCU_CDMA_CONFIGURATION_ ## name)
582
583/*
584 * *****************************************************************************
585 * * SCU Link Layer Registers
586 * ***************************************************************************** */
587#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_TIMEOUT_SHIFT (0)
588#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_TIMEOUT_MASK (0x000000FF)
589#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_LOCK_TIME_SHIFT (8)
590#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_LOCK_TIME_MASK (0x0000FF00)
591#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_RATE_CHANGE_DELAY_SHIFT (16)
592#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_RATE_CHANGE_DELAY_MASK (0x00FF0000)
593#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_DWORD_SYNC_TIMEOUT_SHIFT (24)
594#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_DWORD_SYNC_TIMEOUT_MASK (0xFF000000)
595#define SCU_LINK_LAYER_SPEED_NECGOIATION_TIMER_VALUES_REQUIRED_MASK (0x00000000)
596#define SCU_LINK_LAYER_SPEED_NECGOIATION_TIMER_VALUES_DEFAULT_MASK (0x7D00676F)
597#define SCU_LINK_LAYER_SPEED_NECGOIATION_TIMER_VALUES_RESERVED_MASK (0x00FF0000)
598
599#define SCU_SAS_SPDTOV_GEN_VALUE(name, value) \
600 SCU_GEN_VALUE(SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_ ## name, value)
601
602
603#define SCU_LINK_STATUS_DWORD_SYNC_AQUIRED_SHIFT (2)
604#define SCU_LINK_STATUS_DWORD_SYNC_AQUIRED_MASK (0x00000004)
605#define SCU_LINK_STATUS_TRANSMIT_PORT_SELECTION_DONE_SHIFT (4)
606#define SCU_LINK_STATUS_TRANSMIT_PORT_SELECTION_DONE_MASK (0x00000010)
607#define SCU_LINK_STATUS_RECEIVER_CREDIT_EXHAUSTED_SHIFT (5)
608#define SCU_LINK_STATUS_RECEIVER_CREDIT_EXHAUSTED_MASK (0x00000020)
609#define SCU_LINK_STATUS_RESERVED_MASK (0xFFFFFFCD)
610
611#define SCU_SAS_LLSTA_GEN_BIT(name) \
612 SCU_GEN_BIT(SCU_LINK_STATUS_ ## name)
613
614
615/* TODO: Where is the SATA_PSELTOV register? */
616
617/*
618 * *****************************************************************************
619 * * SCU SAS Maximum Arbitration Wait Time Timeout Register
620 * ***************************************************************************** */
621#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_VALUE_SHIFT (0)
622#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_VALUE_MASK (0x00007FFF)
623#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_SCALE_SHIFT (15)
624#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_SCALE_MASK (0x00008000)
625
626#define SCU_SAS_MAWTTOV_GEN_VALUE(name, value) \
627 SCU_GEN_VALUE(SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_ ## name, value)
628
629#define SCU_SAS_MAWTTOV_GEN_BIT(name) \
630 SCU_GEN_BIT(SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_ ## name)
631
632
633/*
634 * TODO: Where is the SAS_LNKTOV regsiter?
635 * TODO: Where is the SAS_PHYTOV register? */
636
637#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_TARGET_SHIFT (1)
638#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_TARGET_MASK (0x00000002)
639#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_TARGET_SHIFT (2)
640#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_TARGET_MASK (0x00000004)
641#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_TARGET_SHIFT (3)
642#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_TARGET_MASK (0x00000008)
643#define SCU_SAS_TRANSMIT_IDENTIFICATION_DA_SATA_HOST_SHIFT (8)
644#define SCU_SAS_TRANSMIT_IDENTIFICATION_DA_SATA_HOST_MASK (0x00000100)
645#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_INITIATOR_SHIFT (9)
646#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_INITIATOR_MASK (0x00000200)
647#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_INITIATOR_SHIFT (10)
648#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_INITIATOR_MASK (0x00000400)
649#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_INITIATOR_SHIFT (11)
650#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_INITIATOR_MASK (0x00000800)
651#define SCU_SAS_TRANSMIT_IDENTIFICATION_REASON_CODE_SHIFT (16)
652#define SCU_SAS_TRANSMIT_IDENTIFICATION_REASON_CODE_MASK (0x000F0000)
653#define SCU_SAS_TRANSMIT_IDENTIFICATION_ADDRESS_FRAME_TYPE_SHIFT (24)
654#define SCU_SAS_TRANSMIT_IDENTIFICATION_ADDRESS_FRAME_TYPE_MASK (0x0F000000)
655#define SCU_SAS_TRANSMIT_IDENTIFICATION_DEVICE_TYPE_SHIFT (28)
656#define SCU_SAS_TRANSMIT_IDENTIFICATION_DEVICE_TYPE_MASK (0x70000000)
657#define SCU_SAS_TRANSMIT_IDENTIFICATION_RESERVED_MASK (0x80F0F1F1)
658
659#define SCU_SAS_TIID_GEN_VAL(name, value) \
660 SCU_GEN_VALUE(SCU_SAS_TRANSMIT_IDENTIFICATION_ ## name, value)
661
662#define SCU_SAS_TIID_GEN_BIT(name) \
663 SCU_GEN_BIT(SCU_SAS_TRANSMIT_IDENTIFICATION_ ## name)
664
665/* SAS Identify Frame PHY Identifier Register */
666#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_BREAK_REPLY_CAPABLE_SHIFT (16)
667#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_BREAK_REPLY_CAPABLE_MASK (0x00010000)
668#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_REQUESTED_INSIDE_ZPSDS_SHIFT (17)
669#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_REQUESTED_INSIDE_ZPSDS_MASK (0x00020000)
670#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_INSIDE_ZPSDS_PERSISTENT_SHIFT (18)
671#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_INSIDE_ZPSDS_PERSISTENT_MASK (0x00040000)
672#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ID_SHIFT (24)
673#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ID_MASK (0xFF000000)
674#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_RESERVED_MASK (0x00F800FF)
675
676#define SCU_SAS_TIPID_GEN_VALUE(name, value) \
677 SCU_GEN_VALUE(SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ ## name, value)
678
679#define SCU_SAS_TIPID_GEN_BIT(name) \
680 SCU_GEN_BIT(SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ ## name)
681
682
683#define SCU_SAS_PHY_CONFIGURATION_TX_PARITY_CHECK_SHIFT (4)
684#define SCU_SAS_PHY_CONFIGURATION_TX_PARITY_CHECK_MASK (0x00000010)
685#define SCU_SAS_PHY_CONFIGURATION_TX_BAD_CRC_SHIFT (6)
686#define SCU_SAS_PHY_CONFIGURATION_TX_BAD_CRC_MASK (0x00000040)
687#define SCU_SAS_PHY_CONFIGURATION_DISABLE_SCRAMBLER_SHIFT (7)
688#define SCU_SAS_PHY_CONFIGURATION_DISABLE_SCRAMBLER_MASK (0x00000080)
689#define SCU_SAS_PHY_CONFIGURATION_DISABLE_DESCRAMBLER_SHIFT (8)
690#define SCU_SAS_PHY_CONFIGURATION_DISABLE_DESCRAMBLER_MASK (0x00000100)
691#define SCU_SAS_PHY_CONFIGURATION_DISABLE_CREDIT_INSERTION_SHIFT (9)
692#define SCU_SAS_PHY_CONFIGURATION_DISABLE_CREDIT_INSERTION_MASK (0x00000200)
693#define SCU_SAS_PHY_CONFIGURATION_SUSPEND_PROTOCOL_ENGINE_SHIFT (11)
694#define SCU_SAS_PHY_CONFIGURATION_SUSPEND_PROTOCOL_ENGINE_MASK (0x00000800)
695#define SCU_SAS_PHY_CONFIGURATION_SATA_SPINUP_HOLD_SHIFT (12)
696#define SCU_SAS_PHY_CONFIGURATION_SATA_SPINUP_HOLD_MASK (0x00001000)
697#define SCU_SAS_PHY_CONFIGURATION_TRANSMIT_PORT_SELECTION_SIGNAL_SHIFT (13)
698#define SCU_SAS_PHY_CONFIGURATION_TRANSMIT_PORT_SELECTION_SIGNAL_MASK (0x00002000)
699#define SCU_SAS_PHY_CONFIGURATION_HARD_RESET_SHIFT (14)
700#define SCU_SAS_PHY_CONFIGURATION_HARD_RESET_MASK (0x00004000)
701#define SCU_SAS_PHY_CONFIGURATION_OOB_ENABLE_SHIFT (15)
702#define SCU_SAS_PHY_CONFIGURATION_OOB_ENABLE_MASK (0x00008000)
703#define SCU_SAS_PHY_CONFIGURATION_ENABLE_FRAME_TX_INSERT_ALIGN_SHIFT (23)
704#define SCU_SAS_PHY_CONFIGURATION_ENABLE_FRAME_TX_INSERT_ALIGN_MASK (0x00800000)
705#define SCU_SAS_PHY_CONFIGURATION_FORWARD_IDENTIFY_FRAME_SHIFT (27)
706#define SCU_SAS_PHY_CONFIGURATION_FORWARD_IDENTIFY_FRAME_MASK (0x08000000)
707#define SCU_SAS_PHY_CONFIGURATION_DISABLE_BYTE_TRANSPOSE_STP_FRAME_SHIFT (28)
708#define SCU_SAS_PHY_CONFIGURATION_DISABLE_BYTE_TRANSPOSE_STP_FRAME_MASK (0x10000000)
709#define SCU_SAS_PHY_CONFIGURATION_OOB_RESET_SHIFT (29)
710#define SCU_SAS_PHY_CONFIGURATION_OOB_RESET_MASK (0x20000000)
711#define SCU_SAS_PHY_CONFIGURATION_THREE_IAF_ENABLE_SHIFT (30)
712#define SCU_SAS_PHY_CONFIGURATION_THREE_IAF_ENABLE_MASK (0x40000000)
713#define SCU_SAS_PHY_CONFIGURATION_OOB_ALIGN0_ENABLE_SHIFT (31)
714#define SCU_SAS_PHY_CONFIGURATION_OOB_ALIGN0_ENABLE_MASK (0x80000000)
715#define SCU_SAS_PHY_CONFIGURATION_REQUIRED_MASK (0x0100000F)
716#define SCU_SAS_PHY_CONFIGURATION_DEFAULT_MASK (0x4180100F)
717#define SCU_SAS_PHY_CONFIGURATION_RESERVED_MASK (0x00000000)
718
719#define SCU_SAS_PCFG_GEN_BIT(name) \
720 SCU_GEN_BIT(SCU_SAS_PHY_CONFIGURATION_ ## name)
721
722#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_GENERAL_SHIFT (0)
723#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_GENERAL_MASK (0x000007FF)
724#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_CONNECTED_SHIFT (16)
725#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_CONNECTED_MASK (0x00ff0000)
726
727#define SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(name, value) \
728 SCU_GEN_VALUE(SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_##name, value)
729
730#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_COUNT_SHIFT (0)
731#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_COUNT_MASK (0x0003FFFF)
732#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ENABLE_SHIFT (31)
733#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ENABLE_MASK (0x80000000)
734#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_RESERVED_MASK (0x7FFC0000)
735
736#define SCU_ENSPINUP_GEN_VAL(name, value) \
737 SCU_GEN_VALUE(SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ ## name, value)
738
739#define SCU_ENSPINUP_GEN_BIT(name) \
740 SCU_GEN_BIT(SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ ## name)
741
742
743#define SCU_LINK_LAYER_PHY_CAPABILITIES_TXSSCTYPE_SHIFT (1)
744#define SCU_LINK_LAYER_PHY_CAPABILITIES_TXSSCTYPE_MASK (0x00000002)
745#define SCU_LINK_LAYER_PHY_CAPABILITIES_RLLRATE_SHIFT (4)
746#define SCU_LINK_LAYER_PHY_CAPABILITIES_RLLRATE_MASK (0x000000F0)
747#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO15GBPS_SHIFT (8)
748#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO15GBPS_MASK (0x00000100)
749#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW15GBPS_SHIFT (9)
750#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW15GBPS_MASK (0x00000201)
751#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO30GBPS_SHIFT (10)
752#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO30GBPS_MASK (0x00000401)
753#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW30GBPS_SHIFT (11)
754#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW30GBPS_MASK (0x00000801)
755#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO60GBPS_SHIFT (12)
756#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO60GBPS_MASK (0x00001001)
757#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW60GBPS_SHIFT (13)
758#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW60GBPS_MASK (0x00002001)
759#define SCU_LINK_LAYER_PHY_CAPABILITIES_EVEN_PARITY_SHIFT (31)
760#define SCU_LINK_LAYER_PHY_CAPABILITIES_EVEN_PARITY_MASK (0x80000000)
761#define SCU_LINK_LAYER_PHY_CAPABILITIES_DEFAULT_MASK (0x00003F01)
762#define SCU_LINK_LAYER_PHY_CAPABILITIES_REQUIRED_MASK (0x00000001)
763#define SCU_LINK_LAYER_PHY_CAPABILITIES_RESERVED_MASK (0x7FFFC00D)
764
765#define SCU_SAS_PHYCAP_GEN_VAL(name, value) \
766 SCU_GEN_VALUE(SCU_LINK_LAYER_PHY_CAPABILITIES_ ## name, value)
767
768#define SCU_SAS_PHYCAP_GEN_BIT(name) \
769 SCU_GEN_BIT(SCU_LINK_LAYER_PHY_CAPABILITIES_ ## name)
770
771
772#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_VIRTUAL_EXPANDER_PHY_ZONE_GROUP_SHIFT (0)
773#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_VIRTUAL_EXPANDER_PHY_ZONE_GROUP_MASK (0x000000FF)
774#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_INSIDE_SOURCE_ZONE_GROUP_SHIFT (31)
775#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_INSIDE_SOURCE_ZONE_GROUP_MASK (0x80000000)
776#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_RESERVED_MASK (0x7FFFFF00)
777
778#define SCU_PSZGCR_GEN_VAL(name, value) \
779 SCU_GEN_VALUE(SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_ ## name, value)
780
781#define SCU_PSZGCR_GEN_BIT(name) \
782 SCU_GEN_BIT(SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_ ## name)
783
784#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_LOCKED_SHIFT (1)
785#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_LOCKED_MASK (0x00000002)
786#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_UPDATING_SHIFT (2)
787#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_UPDATING_MASK (0x00000004)
788#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_LOCKED_SHIFT (4)
789#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_LOCKED_MASK (0x00000010)
790#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_UPDATING_SHIFT (5)
791#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_UPDATING_MASK (0x00000020)
792#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE0_SHIFT (16)
793#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE0_MASK (0x00030000)
794#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE0_SHIFT (19)
795#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE0_MASK (0x00080000)
796#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE1_SHIFT (20)
797#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE1_MASK (0x00300000)
798#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE1_SHIFT (23)
799#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE1_MASK (0x00800000)
800#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE2_SHIFT (24)
801#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE2_MASK (0x03000000)
802#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE2_SHIFT (27)
803#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE2_MASK (0x08000000)
804#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE3_SHIFT (28)
805#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE3_MASK (0x30000000)
806#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE3_SHIFT (31)
807#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE3_MASK (0x80000000)
808#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_RESERVED_MASK (0x4444FFC9)
809
810#define SCU_PEG_SCUVZECR_GEN_VAL(name, val) \
811 SCU_GEN_VALUE(SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ ## name, val)
812
813#define SCU_PEG_SCUVZECR_GEN_BIT(name) \
814 SCU_GEN_BIT(SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ ## name)
815
816
817/*
818 * *****************************************************************************
819 * * Port Task Scheduler registers shift and mask values
820 * ***************************************************************************** */
821#define SCU_PTSG_CONTROL_IT_NEXUS_TIMEOUT_SHIFT (0)
822#define SCU_PTSG_CONTROL_IT_NEXUS_TIMEOUT_MASK (0x0000FFFF)
823#define SCU_PTSG_CONTROL_TASK_TIMEOUT_SHIFT (16)
824#define SCU_PTSG_CONTROL_TASK_TIMEOUT_MASK (0x00FF0000)
825#define SCU_PTSG_CONTROL_PTSG_ENABLE_SHIFT (24)
826#define SCU_PTSG_CONTROL_PTSG_ENABLE_MASK (0x01000000)
827#define SCU_PTSG_CONTROL_ETM_ENABLE_SHIFT (25)
828#define SCU_PTSG_CONTROL_ETM_ENABLE_MASK (0x02000000)
829#define SCU_PTSG_CONTROL_DEFAULT_MASK (0x00020002)
830#define SCU_PTSG_CONTROL_REQUIRED_MASK (0x00000000)
831#define SCU_PTSG_CONTROL_RESERVED_MASK (0xFC000000)
832
833#define SCU_PTSGCR_GEN_VAL(name, val) \
834 SCU_GEN_VALUE(SCU_PTSG_CONTROL_ ## name, val)
835
836#define SCU_PTSGCR_GEN_BIT(name) \
837 SCU_GEN_BIT(SCU_PTSG_CONTROL_ ## name)
838
839
840/* ***************************************************************************** */
841#define SCU_PTSG_REAL_TIME_CLOCK_SHIFT (0)
842#define SCU_PTSG_REAL_TIME_CLOCK_MASK (0x0000FFFF)
843#define SCU_PTSG_REAL_TIME_CLOCK_RESERVED_MASK (0xFFFF0000)
844
845#define SCU_RTCR_GEN_VAL(name, val) \
846 SCU_GEN_VALUE(SCU_PTSG_ ## name, val)
847
848
849#define SCU_PTSG_REAL_TIME_CLOCK_CONTROL_PRESCALER_VALUE_SHIFT (0)
850#define SCU_PTSG_REAL_TIME_CLOCK_CONTROL_PRESCALER_VALUE_MASK (0x00FFFFFF)
851#define SCU_PTSG_REAL_TIME_CLOCK_CONTROL_RESERVED_MASK (0xFF000000)
852
853#define SCU_RTCCR_GEN_VAL(name, val) \
854 SCU_GEN_VALUE(SCU_PTSG_REAL_TIME_CLOCK_CONTROL_ ## name, val)
855
856
857#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_SUSPEND_SHIFT (0)
858#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_SUSPEND_MASK (0x00000001)
859#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_ENABLE_SHIFT (1)
860#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_ENABLE_MASK (0x00000002)
861#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_RESERVED_MASK (0xFFFFFFFC)
862
863#define SCU_PTSxCR_GEN_BIT(name) \
864 SCU_GEN_BIT(SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_ ## name)
865
866
867#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_NEXT_RN_VALID_SHIFT (0)
868#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_NEXT_RN_VALID_MASK (0x00000001)
869#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_ACTIVE_RNSC_LIST_VALID_SHIFT (1)
870#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_ACTIVE_RNSC_LIST_VALID_MASK (0x00000002)
871#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_PTS_SUSPENDED_SHIFT (2)
872#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_PTS_SUSPENDED_MASK (0x00000004)
873#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_RESERVED_MASK (0xFFFFFFF8)
874
875#define SCU_PTSxSR_GEN_BIT(name) \
876 SCU_GEN_BIT(SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_ ## name)
877
878
879/*
880 * *****************************************************************************
881 * * SGPIO Register shift and mask values
882 * ***************************************************************************** */
883#define SCU_SGPIO_CONTROL_SGPIO_ENABLE_SHIFT (0)
884#define SCU_SGPIO_CONTROL_SGPIO_ENABLE_MASK (0x00000001)
885#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_CLOCK_SELECT_SHIFT (1)
886#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_CLOCK_SELECT_MASK (0x00000002)
887#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_SHIFT_WIDTH_SELECT_SHIFT (2)
888#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_SHIFT_WIDTH_SELECT_MASK (0x00000004)
889#define SCU_SGPIO_CONTROL_SGPIO_TEST_BIT_SHIFT (15)
890#define SCU_SGPIO_CONTROL_SGPIO_TEST_BIT_MASK (0x00008000)
891#define SCU_SGPIO_CONTROL_SGPIO_RESERVED_MASK (0xFFFF7FF8)
892
893#define SCU_SGICRx_GEN_BIT(name) \
894 SCU_GEN_BIT(SCU_SGPIO_CONTROL_SGPIO_ ## name)
895
896#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R0_SHIFT (0)
897#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R0_MASK (0x0000000F)
898#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R1_SHIFT (4)
899#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R1_MASK (0x000000F0)
900#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R2_SHIFT (8)
901#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R2_MASK (0x00000F00)
902#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R3_SHIFT (12)
903#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R3_MASK (0x0000F000)
904#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_RESERVED_MASK (0xFFFF0000)
905
906#define SCU_SGPBRx_GEN_VAL(name, value) \
907 SCU_GEN_VALUE(SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_ ## name, value)
908
909#define SCU_SGPIO_START_DRIVE_LOWER_R0_SHIFT (0)
910#define SCU_SGPIO_START_DRIVE_LOWER_R0_MASK (0x00000003)
911#define SCU_SGPIO_START_DRIVE_LOWER_R1_SHIFT (4)
912#define SCU_SGPIO_START_DRIVE_LOWER_R1_MASK (0x00000030)
913#define SCU_SGPIO_START_DRIVE_LOWER_R2_SHIFT (8)
914#define SCU_SGPIO_START_DRIVE_LOWER_R2_MASK (0x00000300)
915#define SCU_SGPIO_START_DRIVE_LOWER_R3_SHIFT (12)
916#define SCU_SGPIO_START_DRIVE_LOWER_R3_MASK (0x00003000)
917#define SCU_SGPIO_START_DRIVE_LOWER_RESERVED_MASK (0xFFFF8888)
918
919#define SCU_SGSDLRx_GEN_VAL(name, value) \
920 SCU_GEN_VALUE(SCU_SGPIO_START_DRIVE_LOWER_ ## name, value)
921
922#define SCU_SGPIO_START_DRIVE_UPPER_R0_SHIFT (0)
923#define SCU_SGPIO_START_DRIVE_UPPER_R0_MASK (0x00000003)
924#define SCU_SGPIO_START_DRIVE_UPPER_R1_SHIFT (4)
925#define SCU_SGPIO_START_DRIVE_UPPER_R1_MASK (0x00000030)
926#define SCU_SGPIO_START_DRIVE_UPPER_R2_SHIFT (8)
927#define SCU_SGPIO_START_DRIVE_UPPER_R2_MASK (0x00000300)
928#define SCU_SGPIO_START_DRIVE_UPPER_R3_SHIFT (12)
929#define SCU_SGPIO_START_DRIVE_UPPER_R3_MASK (0x00003000)
930#define SCU_SGPIO_START_DRIVE_UPPER_RESERVED_MASK (0xFFFF8888)
931
932#define SCU_SGSDURx_GEN_VAL(name, value) \
933 SCU_GEN_VALUE(SCU_SGPIO_START_DRIVE_LOWER_ ## name, value)
934
935#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D0_SHIFT (0)
936#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D0_MASK (0x00000003)
937#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D1_SHIFT (4)
938#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D1_MASK (0x00000030)
939#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D2_SHIFT (8)
940#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D2_MASK (0x00000300)
941#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D3_SHIFT (12)
942#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D3_MASK (0x00003000)
943#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_RESERVED_MASK (0xFFFF8888)
944
945#define SCU_SGSIDLRx_GEN_VAL(name, value) \
946 SCU_GEN_VALUE(SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_ ## name, value)
947
948#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D0_SHIFT (0)
949#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D0_MASK (0x00000003)
950#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D1_SHIFT (4)
951#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D1_MASK (0x00000030)
952#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D2_SHIFT (8)
953#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D2_MASK (0x00000300)
954#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D3_SHIFT (12)
955#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D3_MASK (0x00003000)
956#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_RESERVED_MASK (0xFFFF8888)
957
958#define SCU_SGSIDURx_GEN_VAL(name, value) \
959 SCU_GEN_VALUE(SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_ ## name, value)
960
961#define SCU_SGPIO_VENDOR_SPECIFIC_CODE_SHIFT (0)
962#define SCU_SGPIO_VENDOR_SPECIFIC_CODE_MASK (0x0000000F)
963#define SCU_SGPIO_VENDOR_SPECIFIC_CODE_RESERVED_MASK (0xFFFFFFF0)
964
965#define SCU_SGVSCR_GEN_VAL(value) \
966 SCU_GEN_VALUE(SCU_SGPIO_VENDOR_SPECIFIC_CODE ## name, value)
967
968#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA0_SHIFT (0)
969#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA0_MASK (0x00000003)
970#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA0_SHIFT (2)
971#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA0_MASK (0x00000004)
972#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA0_SHIFT (3)
973#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA0_MASK (0x00000008)
974#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA1_SHIFT (4)
975#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA1_MASK (0x00000030)
976#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA1_SHIFT (6)
977#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA1_MASK (0x00000040)
978#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA1_SHIFT (7)
979#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA1_MASK (0x00000080)
980#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA2_SHIFT (8)
981#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA2_MASK (0x00000300)
982#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA2_SHIFT (10)
983#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA2_MASK (0x00000400)
984#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA2_SHIFT (11)
985#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA2_MASK (0x00000800)
986#define SCU_SGPIO_OUPUT_DATA_SELECT_RESERVED_MASK (0xFFFFF000)
987
988#define SCU_SGODSR_GEN_VAL(name, value) \
989 SCU_GEN_VALUE(SCU_SGPIO_OUPUT_DATA_SELECT_ ## name, value)
990
991#define SCU_SGODSR_GEN_BIT(name) \
992 SCU_GEN_BIT(SCU_SGPIO_OUPUT_DATA_SELECT_ ## name)
993
994/*
995 * *****************************************************************************
996 * * SMU Registers
997 * ***************************************************************************** */
998
999/*
1000 * ----------------------------------------------------------------------------
1001 * SMU Registers
1002 * These registers are based off of BAR0
1003 *
1004 * To calculate the offset for other functions use
1005 * BAR0 + FN# * SystemPageSize * 2
1006 *
1007 * The TCA is only accessable from FN#0 (Physical Function) and each
1008 * is programmed by (BAR0 + SCU_SMU_TCA_OFFSET + (FN# * 0x04)) or
1009 * TCA0 for FN#0 is at BAR0 + 0x0400
1010 * TCA1 for FN#1 is at BAR0 + 0x0404
1011 * etc.
1012 * ----------------------------------------------------------------------------
1013 * Accessable to all FN#s */
1014#define SCU_SMU_PCP_OFFSET 0x0000
1015#define SCU_SMU_AMR_OFFSET 0x0004
1016#define SCU_SMU_ISR_OFFSET 0x0010
1017#define SCU_SMU_IMR_OFFSET 0x0014
1018#define SCU_SMU_ICC_OFFSET 0x0018
1019#define SCU_SMU_HTTLBAR_OFFSET 0x0020
1020#define SCU_SMU_HTTUBAR_OFFSET 0x0024
1021#define SCU_SMU_TCR_OFFSET 0x0028
1022#define SCU_SMU_CQLBAR_OFFSET 0x0030
1023#define SCU_SMU_CQUBAR_OFFSET 0x0034
1024#define SCU_SMU_CQPR_OFFSET 0x0040
1025#define SCU_SMU_CQGR_OFFSET 0x0044
1026#define SCU_SMU_CQC_OFFSET 0x0048
1027/* Accessable to FN#0 only */
1028#define SCU_SMU_RNCLBAR_OFFSET 0x0080
1029#define SCU_SMU_RNCUBAR_OFFSET 0x0084
1030#define SCU_SMU_DCC_OFFSET 0x0090
1031#define SCU_SMU_DFC_OFFSET 0x0094
1032#define SCU_SMU_SMUCSR_OFFSET 0x0098
1033#define SCU_SMU_SCUSRCR_OFFSET 0x009C
1034#define SCU_SMU_SMAW_OFFSET 0x00A0
1035#define SCU_SMU_SMDW_OFFSET 0x00A4
1036/* Accessable to FN#0 only */
1037#define SCU_SMU_TCA_OFFSET 0x0400
1038/* Accessable to all FN#s */
1039#define SCU_SMU_MT_MLAR0_OFFSET 0x2000
1040#define SCU_SMU_MT_MUAR0_OFFSET 0x2004
1041#define SCU_SMU_MT_MDR0_OFFSET 0x2008
1042#define SCU_SMU_MT_VCR0_OFFSET 0x200C
1043#define SCU_SMU_MT_MLAR1_OFFSET 0x2010
1044#define SCU_SMU_MT_MUAR1_OFFSET 0x2014
1045#define SCU_SMU_MT_MDR1_OFFSET 0x2018
1046#define SCU_SMU_MT_VCR1_OFFSET 0x201C
1047#define SCU_SMU_MPBA_OFFSET 0x3000
1048
1049/**
1050 * struct smu_registers - These are the SMU registers
1051 *
1052 *
1053 */
1054struct smu_registers {
1055/* 0x0000 PCP */
1056 u32 post_context_port;
1057/* 0x0004 AMR */
1058 u32 address_modifier;
1059 u32 reserved_08;
1060 u32 reserved_0C;
1061/* 0x0010 ISR */
1062 u32 interrupt_status;
1063/* 0x0014 IMR */
1064 u32 interrupt_mask;
1065/* 0x0018 ICC */
1066 u32 interrupt_coalesce_control;
1067 u32 reserved_1C;
1068/* 0x0020 HTTLBAR */
1069 u32 host_task_table_lower;
1070/* 0x0024 HTTUBAR */
1071 u32 host_task_table_upper;
1072/* 0x0028 TCR */
1073 u32 task_context_range;
1074 u32 reserved_2C;
1075/* 0x0030 CQLBAR */
1076 u32 completion_queue_lower;
1077/* 0x0034 CQUBAR */
1078 u32 completion_queue_upper;
1079 u32 reserved_38;
1080 u32 reserved_3C;
1081/* 0x0040 CQPR */
1082 u32 completion_queue_put;
1083/* 0x0044 CQGR */
1084 u32 completion_queue_get;
1085/* 0x0048 CQC */
1086 u32 completion_queue_control;
1087 u32 reserved_4C;
1088 u32 reserved_5x[4];
1089 u32 reserved_6x[4];
1090 u32 reserved_7x[4];
1091/*
1092 * Accessable to FN#0 only
1093 * 0x0080 RNCLBAR */
1094 u32 remote_node_context_lower;
1095/* 0x0084 RNCUBAR */
1096 u32 remote_node_context_upper;
1097 u32 reserved_88;
1098 u32 reserved_8C;
1099/* 0x0090 DCC */
1100 u32 device_context_capacity;
1101/* 0x0094 DFC */
1102 u32 device_function_capacity;
1103/* 0x0098 SMUCSR */
1104 u32 control_status;
1105/* 0x009C SCUSRCR */
1106 u32 soft_reset_control;
1107/* 0x00A0 SMAW */
1108 u32 mmr_address_window;
1109/* 0x00A4 SMDW */
1110 u32 mmr_data_window;
1111 u32 reserved_A8;
1112 u32 reserved_AC;
1113/* A whole bunch of reserved space */
1114 u32 reserved_Bx[4];
1115 u32 reserved_Cx[4];
1116 u32 reserved_Dx[4];
1117 u32 reserved_Ex[4];
1118 u32 reserved_Fx[4];
1119 u32 reserved_1xx[64];
1120 u32 reserved_2xx[64];
1121 u32 reserved_3xx[64];
1122/*
1123 * Accessable to FN#0 only
1124 * 0x0400 TCA */
1125 u32 task_context_assignment[256];
1126/* MSI-X registers not included */
1127};
1128
1129/*
1130 * *****************************************************************************
1131 * SDMA Registers
1132 * ***************************************************************************** */
1133#define SCU_SDMA_BASE 0x6000
1134#define SCU_SDMA_PUFATLHAR_OFFSET 0x0000
1135#define SCU_SDMA_PUFATUHAR_OFFSET 0x0004
1136#define SCU_SDMA_UFLHBAR_OFFSET 0x0008
1137#define SCU_SDMA_UFUHBAR_OFFSET 0x000C
1138#define SCU_SDMA_UFQC_OFFSET 0x0010
1139#define SCU_SDMA_UFQPP_OFFSET 0x0014
1140#define SCU_SDMA_UFQGP_OFFSET 0x0018
1141#define SCU_SDMA_PDMACR_OFFSET 0x001C
1142#define SCU_SDMA_CDMACR_OFFSET 0x0080
1143
1144/**
1145 * struct scu_sdma_registers - These are the SCU SDMA Registers
1146 *
1147 *
1148 */
1149struct scu_sdma_registers {
1150/* 0x0000 PUFATLHAR */
1151 u32 uf_address_table_lower;
1152/* 0x0004 PUFATUHAR */
1153 u32 uf_address_table_upper;
1154/* 0x0008 UFLHBAR */
1155 u32 uf_header_base_address_lower;
1156/* 0x000C UFUHBAR */
1157 u32 uf_header_base_address_upper;
1158/* 0x0010 UFQC */
1159 u32 unsolicited_frame_queue_control;
1160/* 0x0014 UFQPP */
1161 u32 unsolicited_frame_put_pointer;
1162/* 0x0018 UFQGP */
1163 u32 unsolicited_frame_get_pointer;
1164/* 0x001C PDMACR */
1165 u32 pdma_configuration;
1166/* Reserved until offset 0x80 */
1167 u32 reserved_0020_007C[0x18];
1168/* 0x0080 CDMACR */
1169 u32 cdma_configuration;
1170/* Remainder SDMA register space */
1171 u32 reserved_0084_0400[0xDF];
1172
1173};
1174
1175/*
1176 * *****************************************************************************
1177 * * SCU Link Registers
1178 * ***************************************************************************** */
1179#define SCU_PEG0_OFFSET 0x0000
1180#define SCU_PEG1_OFFSET 0x8000
1181
1182#define SCU_TL0_OFFSET 0x0000
1183#define SCU_TL1_OFFSET 0x0400
1184#define SCU_TL2_OFFSET 0x0800
1185#define SCU_TL3_OFFSET 0x0C00
1186
1187#define SCU_LL_OFFSET 0x0080
1188#define SCU_LL0_OFFSET (SCU_TL0_OFFSET + SCU_LL_OFFSET)
1189#define SCU_LL1_OFFSET (SCU_TL1_OFFSET + SCU_LL_OFFSET)
1190#define SCU_LL2_OFFSET (SCU_TL2_OFFSET + SCU_LL_OFFSET)
1191#define SCU_LL3_OFFSET (SCU_TL3_OFFSET + SCU_LL_OFFSET)
1192
1193/* Transport Layer Offsets (PEG + TL) */
1194#define SCU_TLCR_OFFSET 0x0000
1195#define SCU_TLADTR_OFFSET 0x0004
1196#define SCU_TLTTMR_OFFSET 0x0008
1197#define SCU_TLEECR0_OFFSET 0x000C
1198#define SCU_STPTLDARNI_OFFSET 0x0010
1199
1200
1201#define SCU_TLCR_HASH_SAS_CHECKING_ENABLE_SHIFT (0)
1202#define SCU_TLCR_HASH_SAS_CHECKING_ENABLE_MASK (0x00000001)
1203#define SCU_TLCR_CLEAR_TCI_NCQ_MAPPING_TABLE_SHIFT (1)
1204#define SCU_TLCR_CLEAR_TCI_NCQ_MAPPING_TABLE_MASK (0x00000002)
1205#define SCU_TLCR_STP_WRITE_DATA_PREFETCH_SHIFT (3)
1206#define SCU_TLCR_STP_WRITE_DATA_PREFETCH_MASK (0x00000008)
1207#define SCU_TLCR_CMD_NAK_STATUS_CODE_SHIFT (4)
1208#define SCU_TLCR_CMD_NAK_STATUS_CODE_MASK (0x00000010)
1209#define SCU_TLCR_RESERVED_MASK (0xFFFFFFEB)
1210
1211#define SCU_TLCR_GEN_BIT(name) \
1212 SCU_GEN_BIT(SCU_TLCR_ ## name)
1213
1214/**
1215 * struct scu_transport_layer_registers - These are the SCU Transport Layer
1216 * registers
1217 *
1218 *
1219 */
1220struct scu_transport_layer_registers {
1221 /* 0x0000 TLCR */
1222 u32 control;
1223 /* 0x0004 TLADTR */
1224 u32 arbitration_delay_timer;
1225 /* 0x0008 TLTTMR */
1226 u32 timer_test_mode;
1227 /* 0x000C reserved */
1228 u32 reserved_0C;
1229 /* 0x0010 STPTLDARNI */
1230 u32 stp_rni;
1231 /* 0x0014 TLFEWPORCTRL */
1232 u32 tlfe_wpo_read_control;
1233 /* 0x0018 TLFEWPORDATA */
1234 u32 tlfe_wpo_read_data;
1235 /* 0x001C RXTLSSCSR1 */
1236 u32 rxtl_single_step_control_status_1;
1237 /* 0x0020 RXTLSSCSR2 */
1238 u32 rxtl_single_step_control_status_2;
1239 /* 0x0024 AWTRDDCR */
1240 u32 tlfe_awt_retry_delay_debug_control;
1241 /* Remainder of TL memory space */
1242 u32 reserved_0028_007F[0x16];
1243
1244};
1245
1246/* Protocol Engine Group Registers */
1247#define SCU_SCUVZECRx_OFFSET 0x1080
1248
1249/* Link Layer Offsets (PEG + TL + LL) */
1250#define SCU_SAS_SPDTOV_OFFSET 0x0000
1251#define SCU_SAS_LLSTA_OFFSET 0x0004
1252#define SCU_SATA_PSELTOV_OFFSET 0x0008
1253#define SCU_SAS_TIMETOV_OFFSET 0x0010
1254#define SCU_SAS_LOSTOT_OFFSET 0x0014
1255#define SCU_SAS_LNKTOV_OFFSET 0x0018
1256#define SCU_SAS_PHYTOV_OFFSET 0x001C
1257#define SCU_SAS_AFERCNT_OFFSET 0x0020
1258#define SCU_SAS_WERCNT_OFFSET 0x0024
1259#define SCU_SAS_TIID_OFFSET 0x0028
1260#define SCU_SAS_TIDNH_OFFSET 0x002C
1261#define SCU_SAS_TIDNL_OFFSET 0x0030
1262#define SCU_SAS_TISSAH_OFFSET 0x0034
1263#define SCU_SAS_TISSAL_OFFSET 0x0038
1264#define SCU_SAS_TIPID_OFFSET 0x003C
1265#define SCU_SAS_TIRES2_OFFSET 0x0040
1266#define SCU_SAS_ADRSTA_OFFSET 0x0044
1267#define SCU_SAS_MAWTTOV_OFFSET 0x0048
1268#define SCU_SAS_FRPLDFIL_OFFSET 0x0054
1269#define SCU_SAS_RFCNT_OFFSET 0x0060
1270#define SCU_SAS_TFCNT_OFFSET 0x0064
1271#define SCU_SAS_RFDCNT_OFFSET 0x0068
1272#define SCU_SAS_TFDCNT_OFFSET 0x006C
1273#define SCU_SAS_LERCNT_OFFSET 0x0070
1274#define SCU_SAS_RDISERRCNT_OFFSET 0x0074
1275#define SCU_SAS_CRERCNT_OFFSET 0x0078
1276#define SCU_STPCTL_OFFSET 0x007C
1277#define SCU_SAS_PCFG_OFFSET 0x0080
1278#define SCU_SAS_CLKSM_OFFSET 0x0084
1279#define SCU_SAS_TXCOMWAKE_OFFSET 0x0088
1280#define SCU_SAS_TXCOMINIT_OFFSET 0x008C
1281#define SCU_SAS_TXCOMSAS_OFFSET 0x0090
1282#define SCU_SAS_COMINIT_OFFSET 0x0094
1283#define SCU_SAS_COMWAKE_OFFSET 0x0098
1284#define SCU_SAS_COMSAS_OFFSET 0x009C
1285#define SCU_SAS_SFERCNT_OFFSET 0x00A0
1286#define SCU_SAS_CDFERCNT_OFFSET 0x00A4
1287#define SCU_SAS_DNFERCNT_OFFSET 0x00A8
1288#define SCU_SAS_PRSTERCNT_OFFSET 0x00AC
1289#define SCU_SAS_CNTCTL_OFFSET 0x00B0
1290#define SCU_SAS_SSPTOV_OFFSET 0x00B4
1291#define SCU_FTCTL_OFFSET 0x00B8
1292#define SCU_FRCTL_OFFSET 0x00BC
1293#define SCU_FTWMRK_OFFSET 0x00C0
1294#define SCU_ENSPINUP_OFFSET 0x00C4
1295#define SCU_SAS_TRNTOV_OFFSET 0x00C8
1296#define SCU_SAS_PHYCAP_OFFSET 0x00CC
1297#define SCU_SAS_PHYCTL_OFFSET 0x00D0
1298#define SCU_SAS_LLCTL_OFFSET 0x00D8
1299#define SCU_AFE_XCVRCR_OFFSET 0x00DC
1300#define SCU_AFE_LUTCR_OFFSET 0x00E0
1301
1302#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_SHIFT (0)
1303#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_MASK (0x00000003)
1304#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1 (0)
1305#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN2 (1)
1306#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN3 (2)
1307#define SCU_SAS_LINK_LAYER_CONTROL_BROADCAST_PRIMITIVE_SHIFT (2)
1308#define SCU_SAS_LINK_LAYER_CONTROL_BROADCAST_PRIMITIVE_MASK (0x000003FC)
1309#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_ACTIVE_TASK_DISABLE_SHIFT (16)
1310#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_ACTIVE_TASK_DISABLE_MASK (0x00010000)
1311#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_OUTBOUND_TASK_DISABLE_SHIFT (17)
1312#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_OUTBOUND_TASK_DISABLE_MASK (0x00020000)
1313#define SCU_SAS_LINK_LAYER_CONTROL_NO_OUTBOUND_TASK_TIMEOUT_SHIFT (24)
1314#define SCU_SAS_LINK_LAYER_CONTROL_NO_OUTBOUND_TASK_TIMEOUT_MASK (0xFF000000)
1315#define SCU_SAS_LINK_LAYER_CONTROL_RESERVED (0x00FCFC00)
1316
1317#define SCU_SAS_LLCTL_GEN_VAL(name, value) \
1318 SCU_GEN_VALUE(SCU_SAS_LINK_LAYER_CONTROL_ ## name, value)
1319
1320#define SCU_SAS_LLCTL_GEN_BIT(name) \
1321 SCU_GEN_BIT(SCU_SAS_LINK_LAYER_CONTROL_ ## name)
1322
1323
1324/* #define SCU_FRXHECR_DCNT_OFFSET 0x00B0 */
1325#define SCU_PSZGCR_OFFSET 0x00E4
1326#define SCU_SAS_RECPHYCAP_OFFSET 0x00E8
1327/* #define SCU_TX_LUTSEL_OFFSET 0x00B8 */
1328
1329#define SCU_SAS_PTxC_OFFSET 0x00D4 /* Same offset as SAS_TCTSTM */
1330
1331/**
1332 * struct scu_link_layer_registers - SCU Link Layer Registers
1333 *
1334 *
1335 */
1336struct scu_link_layer_registers {
1337/* 0x0000 SAS_SPDTOV */
1338 u32 speed_negotiation_timers;
1339/* 0x0004 SAS_LLSTA */
1340 u32 link_layer_status;
1341/* 0x0008 SATA_PSELTOV */
1342 u32 port_selector_timeout;
1343 u32 reserved0C;
1344/* 0x0010 SAS_TIMETOV */
1345 u32 timeout_unit_value;
1346/* 0x0014 SAS_RCDTOV */
1347 u32 rcd_timeout;
1348/* 0x0018 SAS_LNKTOV */
1349 u32 link_timer_timeouts;
1350/* 0x001C SAS_PHYTOV */
1351 u32 sas_phy_timeouts;
1352/* 0x0020 SAS_AFERCNT */
1353 u32 received_address_frame_error_counter;
1354/* 0x0024 SAS_WERCNT */
1355 u32 invalid_dword_counter;
1356/* 0x0028 SAS_TIID */
1357 u32 transmit_identification;
1358/* 0x002C SAS_TIDNH */
1359 u32 sas_device_name_high;
1360/* 0x0030 SAS_TIDNL */
1361 u32 sas_device_name_low;
1362/* 0x0034 SAS_TISSAH */
1363 u32 source_sas_address_high;
1364/* 0x0038 SAS_TISSAL */
1365 u32 source_sas_address_low;
1366/* 0x003C SAS_TIPID */
1367 u32 identify_frame_phy_id;
1368/* 0x0040 SAS_TIRES2 */
1369 u32 identify_frame_reserved;
1370/* 0x0044 SAS_ADRSTA */
1371 u32 received_address_frame;
1372/* 0x0048 SAS_MAWTTOV */
1373 u32 maximum_arbitration_wait_timer_timeout;
1374/* 0x004C SAS_PTxC */
1375 u32 transmit_primitive;
1376/* 0x0050 SAS_RORES */
1377 u32 error_counter_event_notification_control;
1378/* 0x0054 SAS_FRPLDFIL */
1379 u32 frxq_payload_fill_threshold;
1380/* 0x0058 SAS_LLHANG_TOT */
1381 u32 link_layer_hang_detection_timeout;
1382 u32 reserved_5C;
1383/* 0x0060 SAS_RFCNT */
1384 u32 received_frame_count;
1385/* 0x0064 SAS_TFCNT */
1386 u32 transmit_frame_count;
1387/* 0x0068 SAS_RFDCNT */
1388 u32 received_dword_count;
1389/* 0x006C SAS_TFDCNT */
1390 u32 transmit_dword_count;
1391/* 0x0070 SAS_LERCNT */
1392 u32 loss_of_sync_error_count;
1393/* 0x0074 SAS_RDISERRCNT */
1394 u32 running_disparity_error_count;
1395/* 0x0078 SAS_CRERCNT */
1396 u32 received_frame_crc_error_count;
1397/* 0x007C STPCTL */
1398 u32 stp_control;
1399/* 0x0080 SAS_PCFG */
1400 u32 phy_configuration;
1401/* 0x0084 SAS_CLKSM */
1402 u32 clock_skew_management;
1403/* 0x0088 SAS_TXCOMWAKE */
1404 u32 transmit_comwake_signal;
1405/* 0x008C SAS_TXCOMINIT */
1406 u32 transmit_cominit_signal;
1407/* 0x0090 SAS_TXCOMSAS */
1408 u32 transmit_comsas_signal;
1409/* 0x0094 SAS_COMINIT */
1410 u32 cominit_control;
1411/* 0x0098 SAS_COMWAKE */
1412 u32 comwake_control;
1413/* 0x009C SAS_COMSAS */
1414 u32 comsas_control;
1415/* 0x00A0 SAS_SFERCNT */
1416 u32 received_short_frame_count;
1417/* 0x00A4 SAS_CDFERCNT */
1418 u32 received_frame_without_credit_count;
1419/* 0x00A8 SAS_DNFERCNT */
1420 u32 received_frame_after_done_count;
1421/* 0x00AC SAS_PRSTERCNT */
1422 u32 phy_reset_problem_count;
1423/* 0x00B0 SAS_CNTCTL */
1424 u32 counter_control;
1425/* 0x00B4 SAS_SSPTOV */
1426 u32 ssp_timer_timeout_values;
1427/* 0x00B8 FTCTL */
1428 u32 ftx_control;
1429/* 0x00BC FRCTL */
1430 u32 frx_control;
1431/* 0x00C0 FTWMRK */
1432 u32 ftx_watermark;
1433/* 0x00C4 ENSPINUP */
1434 u32 notify_enable_spinup_control;
1435/* 0x00C8 SAS_TRNTOV */
1436 u32 sas_training_sequence_timer_values;
1437/* 0x00CC SAS_PHYCAP */
1438 u32 phy_capabilities;
1439/* 0x00D0 SAS_PHYCTL */
1440 u32 phy_control;
1441 u32 reserved_d4;
1442/* 0x00D8 LLCTL */
1443 u32 link_layer_control;
1444/* 0x00DC AFE_XCVRCR */
1445 u32 afe_xcvr_control;
1446/* 0x00E0 AFE_LUTCR */
1447 u32 afe_lookup_table_control;
1448/* 0x00E4 PSZGCR */
1449 u32 phy_source_zone_group_control;
1450/* 0x00E8 SAS_RECPHYCAP */
1451 u32 receive_phycap;
1452 u32 reserved_ec;
1453/* 0x00F0 SNAFERXRSTCTL */
1454 u32 speed_negotiation_afe_rx_reset_control;
1455/* 0x00F4 SAS_SSIPMCTL */
1456 u32 power_management_control;
1457/* 0x00F8 SAS_PSPREQ_PRIM */
1458 u32 sas_pm_partial_request_primitive;
1459/* 0x00FC SAS_PSSREQ_PRIM */
1460 u32 sas_pm_slumber_request_primitive;
1461/* 0x0100 SAS_PPSACK_PRIM */
1462 u32 sas_pm_ack_primitive_register;
1463/* 0x0104 SAS_PSNAK_PRIM */
1464 u32 sas_pm_nak_primitive_register;
1465/* 0x0108 SAS_SSIPMTOV */
1466 u32 sas_primitive_timeout;
1467 u32 reserved_10c;
1468/* 0x0110 - 0x011C PLAPRDCTRLxREG */
1469 u32 pla_product_control[4];
1470/* 0x0120 PLAPRDSUMREG */
1471 u32 pla_product_sum;
1472/* 0x0124 PLACONTROLREG */
1473 u32 pla_control;
1474/* Remainder of memory space 896 bytes */
1475 u32 reserved_0128_037f[0x96];
1476
1477};
1478
1479/*
1480 * 0x00D4 // Same offset as SAS_TCTSTM SAS_PTxC
1481 * u32 primitive_transmit_control; */
1482
1483/*
1484 * ----------------------------------------------------------------------------
1485 * SGPIO
1486 * ---------------------------------------------------------------------------- */
1487#define SCU_SGPIO_OFFSET 0x1400
1488
1489/* #define SCU_SGPIO_OFFSET 0x6000 // later moves to 0x1400 see HSD 652625 */
1490#define SCU_SGPIO_SGICR_OFFSET 0x0000
1491#define SCU_SGPIO_SGPBR_OFFSET 0x0004
1492#define SCU_SGPIO_SGSDLR_OFFSET 0x0008
1493#define SCU_SGPIO_SGSDUR_OFFSET 0x000C
1494#define SCU_SGPIO_SGSIDLR_OFFSET 0x0010
1495#define SCU_SGPIO_SGSIDUR_OFFSET 0x0014
1496#define SCU_SGPIO_SGVSCR_OFFSET 0x0018
1497/* Address from 0x0820 to 0x083C */
1498#define SCU_SGPIO_SGODSR_OFFSET 0x0020
1499
1500/**
1501 * struct scu_sgpio_registers - SCU SGPIO Registers
1502 *
1503 *
1504 */
1505struct scu_sgpio_registers {
1506/* 0x0000 SGPIO_SGICR */
1507 u32 interface_control;
1508/* 0x0004 SGPIO_SGPBR */
1509 u32 blink_rate;
1510/* 0x0008 SGPIO_SGSDLR */
1511 u32 start_drive_lower;
1512/* 0x000C SGPIO_SGSDUR */
1513 u32 start_drive_upper;
1514/* 0x0010 SGPIO_SGSIDLR */
1515 u32 serial_input_lower;
1516/* 0x0014 SGPIO_SGSIDUR */
1517 u32 serial_input_upper;
1518/* 0x0018 SGPIO_SGVSCR */
1519 u32 vendor_specific_code;
1520/* 0x0020 SGPIO_SGODSR */
1521 u32 ouput_data_select[8];
1522/* Remainder of memory space 256 bytes */
1523 u32 reserved_1444_14ff[0x31];
1524
1525};
1526
1527/*
1528 * *****************************************************************************
1529 * * Defines for VIIT entry offsets
1530 * * Access additional entries by SCU_VIIT_BASE + index * 0x10
1531 * ***************************************************************************** */
1532#define SCU_VIIT_BASE 0x1c00
1533
1534struct scu_viit_registers {
1535 u32 registers[256];
1536};
1537
1538/*
1539 * *****************************************************************************
1540 * * SCU PORT TASK SCHEDULER REGISTERS
1541 * ***************************************************************************** */
1542
1543#define SCU_PTSG_BASE 0x1000
1544
1545#define SCU_PTSG_PTSGCR_OFFSET 0x0000
1546#define SCU_PTSG_RTCR_OFFSET 0x0004
1547#define SCU_PTSG_RTCCR_OFFSET 0x0008
1548#define SCU_PTSG_PTS0CR_OFFSET 0x0010
1549#define SCU_PTSG_PTS0SR_OFFSET 0x0014
1550#define SCU_PTSG_PTS1CR_OFFSET 0x0018
1551#define SCU_PTSG_PTS1SR_OFFSET 0x001C
1552#define SCU_PTSG_PTS2CR_OFFSET 0x0020
1553#define SCU_PTSG_PTS2SR_OFFSET 0x0024
1554#define SCU_PTSG_PTS3CR_OFFSET 0x0028
1555#define SCU_PTSG_PTS3SR_OFFSET 0x002C
1556#define SCU_PTSG_PCSPE0CR_OFFSET 0x0030
1557#define SCU_PTSG_PCSPE1CR_OFFSET 0x0034
1558#define SCU_PTSG_PCSPE2CR_OFFSET 0x0038
1559#define SCU_PTSG_PCSPE3CR_OFFSET 0x003C
1560#define SCU_PTSG_ETMTSCCR_OFFSET 0x0040
1561#define SCU_PTSG_ETMRNSCCR_OFFSET 0x0044
1562
1563/**
1564 * struct scu_port_task_scheduler_registers - These are the control/stats pairs
1565 * for each Port Task Scheduler.
1566 *
1567 *
1568 */
1569struct scu_port_task_scheduler_registers {
1570 u32 control;
1571 u32 status;
1572};
1573
1574/**
1575 * struct scu_port_task_scheduler_group_registers - These are the PORT Task
1576 * Scheduler registers
1577 *
1578 *
1579 */
1580struct scu_port_task_scheduler_group_registers {
1581/* 0x0000 PTSGCR */
1582 u32 control;
1583/* 0x0004 RTCR */
1584 u32 real_time_clock;
1585/* 0x0008 RTCCR */
1586 u32 real_time_clock_control;
1587/* 0x000C */
1588 u32 reserved_0C;
1589/*
1590 * 0x0010 PTS0CR
1591 * 0x0014 PTS0SR
1592 * 0x0018 PTS1CR
1593 * 0x001C PTS1SR
1594 * 0x0020 PTS2CR
1595 * 0x0024 PTS2SR
1596 * 0x0028 PTS3CR
1597 * 0x002C PTS3SR */
1598 struct scu_port_task_scheduler_registers port[4];
1599/*
1600 * 0x0030 PCSPE0CR
1601 * 0x0034 PCSPE1CR
1602 * 0x0038 PCSPE2CR
1603 * 0x003C PCSPE3CR */
1604 u32 protocol_engine[4];
1605/* 0x0040 ETMTSCCR */
1606 u32 tc_scanning_interval_control;
1607/* 0x0044 ETMRNSCCR */
1608 u32 rnc_scanning_interval_control;
1609/* Remainder of memory space 128 bytes */
1610 u32 reserved_1048_107f[0x0E];
1611
1612};
1613
1614#define SCU_PTSG_SCUVZECR_OFFSET 0x003C
1615
1616/*
1617 * *****************************************************************************
1618 * * AFE REGISTERS
1619 * ***************************************************************************** */
1620#define SCU_AFE_MMR_BASE 0xE000
1621
1622/*
1623 * AFE 0 is at offset 0x0800
1624 * AFE 1 is at offset 0x0900
1625 * AFE 2 is at offset 0x0a00
1626 * AFE 3 is at offset 0x0b00 */
1627struct scu_afe_transceiver {
1628 /* 0x0000 AFE_XCVR_CTRL0 */
1629 u32 afe_xcvr_control0;
1630 /* 0x0004 AFE_XCVR_CTRL1 */
1631 u32 afe_xcvr_control1;
1632 /* 0x0008 */
1633 u32 reserved_0008;
1634 /* 0x000c afe_dfx_rx_control0 */
1635 u32 afe_dfx_rx_control0;
1636 /* 0x0010 AFE_DFX_RX_CTRL1 */
1637 u32 afe_dfx_rx_control1;
1638 /* 0x0014 */
1639 u32 reserved_0014;
1640 /* 0x0018 AFE_DFX_RX_STS0 */
1641 u32 afe_dfx_rx_status0;
1642 /* 0x001c AFE_DFX_RX_STS1 */
1643 u32 afe_dfx_rx_status1;
1644 /* 0x0020 */
1645 u32 reserved_0020;
1646 /* 0x0024 AFE_TX_CTRL */
1647 u32 afe_tx_control;
1648 /* 0x0028 AFE_TX_AMP_CTRL0 */
1649 u32 afe_tx_amp_control0;
1650 /* 0x002c AFE_TX_AMP_CTRL1 */
1651 u32 afe_tx_amp_control1;
1652 /* 0x0030 AFE_TX_AMP_CTRL2 */
1653 u32 afe_tx_amp_control2;
1654 /* 0x0034 AFE_TX_AMP_CTRL3 */
1655 u32 afe_tx_amp_control3;
1656 /* 0x0038 afe_tx_ssc_control */
1657 u32 afe_tx_ssc_control;
1658 /* 0x003c */
1659 u32 reserved_003c;
1660 /* 0x0040 AFE_RX_SSC_CTRL0 */
1661 u32 afe_rx_ssc_control0;
1662 /* 0x0044 AFE_RX_SSC_CTRL1 */
1663 u32 afe_rx_ssc_control1;
1664 /* 0x0048 AFE_RX_SSC_CTRL2 */
1665 u32 afe_rx_ssc_control2;
1666 /* 0x004c AFE_RX_EQ_STS0 */
1667 u32 afe_rx_eq_status0;
1668 /* 0x0050 AFE_RX_EQ_STS1 */
1669 u32 afe_rx_eq_status1;
1670 /* 0x0054 AFE_RX_CDR_STS */
1671 u32 afe_rx_cdr_status;
1672 /* 0x0058 */
1673 u32 reserved_0058;
1674 /* 0x005c AFE_CHAN_CTRL */
1675 u32 afe_channel_control;
1676 /* 0x0060-0x006c */
1677 u32 reserved_0060_006c[0x04];
1678 /* 0x0070 AFE_XCVR_EC_STS0 */
1679 u32 afe_xcvr_error_capture_status0;
1680 /* 0x0074 AFE_XCVR_EC_STS1 */
1681 u32 afe_xcvr_error_capture_status1;
1682 /* 0x0078 AFE_XCVR_EC_STS2 */
1683 u32 afe_xcvr_error_capture_status2;
1684 /* 0x007c afe_xcvr_ec_status3 */
1685 u32 afe_xcvr_error_capture_status3;
1686 /* 0x0080 AFE_XCVR_EC_STS4 */
1687 u32 afe_xcvr_error_capture_status4;
1688 /* 0x0084 AFE_XCVR_EC_STS5 */
1689 u32 afe_xcvr_error_capture_status5;
1690 /* 0x0088-0x00fc */
1691 u32 reserved_008c_00fc[0x1e];
1692};
1693
1694/**
1695 * struct scu_afe_registers - AFE Regsiters
1696 *
1697 *
1698 */
1699/* Uaoa AFE registers */
1700struct scu_afe_registers {
1701 /* 0Xe000 AFE_BIAS_CTRL */
1702 u32 afe_bias_control;
1703 u32 reserved_0004;
1704 /* 0x0008 AFE_PLL_CTRL0 */
1705 u32 afe_pll_control0;
1706 /* 0x000c AFE_PLL_CTRL1 */
1707 u32 afe_pll_control1;
1708 /* 0x0010 AFE_PLL_CTRL2 */
1709 u32 afe_pll_control2;
1710 /* 0x0014 AFE_CB_STS */
1711 u32 afe_common_block_status;
1712 /* 0x0018-0x007c */
1713 u32 reserved_18_7c[0x1a];
1714 /* 0x0080 AFE_PMSN_MCTRL0 */
1715 u32 afe_pmsn_master_control0;
1716 /* 0x0084 AFE_PMSN_MCTRL1 */
1717 u32 afe_pmsn_master_control1;
1718 /* 0x0088 AFE_PMSN_MCTRL2 */
1719 u32 afe_pmsn_master_control2;
1720 /* 0x008C-0x00fc */
1721 u32 reserved_008c_00fc[0x1D];
1722 /* 0x0100 AFE_DFX_MST_CTRL0 */
1723 u32 afe_dfx_master_control0;
1724 /* 0x0104 AFE_DFX_MST_CTRL1 */
1725 u32 afe_dfx_master_control1;
1726 /* 0x0108 AFE_DFX_DCL_CTRL */
1727 u32 afe_dfx_dcl_control;
1728 /* 0x010c AFE_DFX_DMON_CTRL */
1729 u32 afe_dfx_digital_monitor_control;
1730 /* 0x0110 AFE_DFX_AMONP_CTRL */
1731 u32 afe_dfx_analog_p_monitor_control;
1732 /* 0x0114 AFE_DFX_AMONN_CTRL */
1733 u32 afe_dfx_analog_n_monitor_control;
1734 /* 0x0118 AFE_DFX_NTL_STS */
1735 u32 afe_dfx_ntl_status;
1736 /* 0x011c AFE_DFX_FIFO_STS0 */
1737 u32 afe_dfx_fifo_status0;
1738 /* 0x0120 AFE_DFX_FIFO_STS1 */
1739 u32 afe_dfx_fifo_status1;
1740 /* 0x0124 AFE_DFX_MPAT_CTRL */
1741 u32 afe_dfx_master_pattern_control;
1742 /* 0x0128 AFE_DFX_P0_CTRL */
1743 u32 afe_dfx_p0_control;
1744 /* 0x012c-0x01a8 AFE_DFX_P0_DRx */
1745 u32 afe_dfx_p0_data[32];
1746 /* 0x01ac */
1747 u32 reserved_01ac;
1748 /* 0x01b0-0x020c AFE_DFX_P0_IRx */
1749 u32 afe_dfx_p0_instruction[24];
1750 /* 0x0210 */
1751 u32 reserved_0210;
1752 /* 0x0214 AFE_DFX_P1_CTRL */
1753 u32 afe_dfx_p1_control;
1754 /* 0x0218-0x245 AFE_DFX_P1_DRx */
1755 u32 afe_dfx_p1_data[16];
1756 /* 0x0258-0x029c */
1757 u32 reserved_0258_029c[0x12];
1758 /* 0x02a0-0x02bc AFE_DFX_P1_IRx */
1759 u32 afe_dfx_p1_instruction[8];
1760 /* 0x02c0-0x2fc */
1761 u32 reserved_02c0_02fc[0x10];
1762 /* 0x0300 AFE_DFX_TX_PMSN_CTRL */
1763 u32 afe_dfx_tx_pmsn_control;
1764 /* 0x0304 AFE_DFX_RX_PMSN_CTRL */
1765 u32 afe_dfx_rx_pmsn_control;
1766 u32 reserved_0308;
1767 /* 0x030c AFE_DFX_NOA_CTRL0 */
1768 u32 afe_dfx_noa_control0;
1769 /* 0x0310 AFE_DFX_NOA_CTRL1 */
1770 u32 afe_dfx_noa_control1;
1771 /* 0x0314 AFE_DFX_NOA_CTRL2 */
1772 u32 afe_dfx_noa_control2;
1773 /* 0x0318 AFE_DFX_NOA_CTRL3 */
1774 u32 afe_dfx_noa_control3;
1775 /* 0x031c AFE_DFX_NOA_CTRL4 */
1776 u32 afe_dfx_noa_control4;
1777 /* 0x0320 AFE_DFX_NOA_CTRL5 */
1778 u32 afe_dfx_noa_control5;
1779 /* 0x0324 AFE_DFX_NOA_CTRL6 */
1780 u32 afe_dfx_noa_control6;
1781 /* 0x0328 AFE_DFX_NOA_CTRL7 */
1782 u32 afe_dfx_noa_control7;
1783 /* 0x032c-0x07fc */
1784 u32 reserved_032c_07fc[0x135];
1785
1786 /* 0x0800-0x0bfc */
1787 struct scu_afe_transceiver scu_afe_xcvr[4];
1788
1789 /* 0x0c00-0x0ffc */
1790 u32 reserved_0c00_0ffc[0x0100];
1791};
1792
1793struct scu_protocol_engine_group_registers {
1794 u32 table[0xE0];
1795};
1796
1797
1798struct scu_viit_iit {
1799 u32 table[256];
1800};
1801
1802/**
1803 * Placeholder for the ZONE Partition Table information ZONING will not be
1804 * included in the 1.1 release.
1805 *
1806 *
1807 */
1808struct scu_zone_partition_table {
1809 u32 table[2048];
1810};
1811
1812/**
1813 * Placeholder for the CRAM register since I am not sure if we need to
1814 * read/write to these registers as yet.
1815 *
1816 *
1817 */
1818struct scu_completion_ram {
1819 u32 ram[128];
1820};
1821
1822/**
1823 * Placeholder for the FBRAM registers since I am not sure if we need to
1824 * read/write to these registers as yet.
1825 *
1826 *
1827 */
1828struct scu_frame_buffer_ram {
1829 u32 ram[128];
1830};
1831
1832#define scu_scratch_ram_SIZE_IN_DWORDS 256
1833
1834/**
1835 * Placeholder for the scratch RAM registers.
1836 *
1837 *
1838 */
1839struct scu_scratch_ram {
1840 u32 ram[scu_scratch_ram_SIZE_IN_DWORDS];
1841};
1842
1843/**
1844 * Placeholder since I am not yet sure what these registers are here for.
1845 *
1846 *
1847 */
1848struct noa_protocol_engine_partition {
1849 u32 reserved[64];
1850};
1851
1852/**
1853 * Placeholder since I am not yet sure what these registers are here for.
1854 *
1855 *
1856 */
1857struct noa_hub_partition {
1858 u32 reserved[64];
1859};
1860
1861/**
1862 * Placeholder since I am not yet sure what these registers are here for.
1863 *
1864 *
1865 */
1866struct noa_host_interface_partition {
1867 u32 reserved[64];
1868};
1869
1870/**
1871 * struct transport_link_layer_pair - The SCU Hardware pairs up the TL
1872 * registers with the LL registers so we must place them adjcent to make the
1873 * array of registers in the PEG.
1874 *
1875 *
1876 */
1877struct transport_link_layer_pair {
1878 struct scu_transport_layer_registers tl;
1879 struct scu_link_layer_registers ll;
1880};
1881
1882/**
1883 * struct scu_peg_registers - SCU Protocol Engine Memory mapped register space.
1884 * These registers are unique to each protocol engine group. There can be
1885 * at most two PEG for a single SCU part.
1886 *
1887 *
1888 */
1889struct scu_peg_registers {
1890 struct transport_link_layer_pair pe[4];
1891 struct scu_port_task_scheduler_group_registers ptsg;
1892 struct scu_protocol_engine_group_registers peg;
1893 struct scu_sgpio_registers sgpio;
1894 u32 reserved_01500_1BFF[0x1C0];
1895 struct scu_viit_entry viit[64];
1896 struct scu_zone_partition_table zpt0;
1897 struct scu_zone_partition_table zpt1;
1898};
1899
1900/**
1901 * struct scu_registers - SCU regsiters including both PEG registers if we turn
1902 * on that compile option. All of these registers are in the memory mapped
1903 * space returned from BAR1.
1904 *
1905 *
1906 */
1907struct scu_registers {
1908 /* 0x0000 - PEG 0 */
1909 struct scu_peg_registers peg0;
1910
1911 /* 0x6000 - SDMA and Miscellaneous */
1912 struct scu_sdma_registers sdma;
1913 struct scu_completion_ram cram;
1914 struct scu_frame_buffer_ram fbram;
1915 u32 reserved_6800_69FF[0x80];
1916 struct noa_protocol_engine_partition noa_pe;
1917 struct noa_hub_partition noa_hub;
1918 struct noa_host_interface_partition noa_if;
1919 u32 reserved_6d00_7fff[0x4c0];
1920
1921 /* 0x8000 - PEG 1 */
1922 struct scu_peg_registers peg1;
1923
1924 /* 0xE000 - AFE Registers */
1925 struct scu_afe_registers afe;
1926
1927 /* 0xF000 - reserved */
1928 u32 reserved_f000_211fff[0x80c00];
1929
1930 /* 0x212000 - scratch RAM */
1931 struct scu_scratch_ram scratch_ram;
1932};
1933
1934#endif /* _SCU_REGISTERS_HEADER_ */
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
new file mode 100644
index 000000000000..b6e6368c2665
--- /dev/null
+++ b/drivers/scsi/isci/remote_device.c
@@ -0,0 +1,1501 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55#include <scsi/sas.h>
56#include "isci.h"
57#include "port.h"
58#include "remote_device.h"
59#include "request.h"
60#include "remote_node_context.h"
61#include "scu_event_codes.h"
62#include "task.h"
63
64/**
65 * isci_remote_device_not_ready() - This function is called by the ihost when
66 * the remote device is not ready. We mark the isci device as ready (not
67 * "ready_for_io") and signal the waiting proccess.
68 * @isci_host: This parameter specifies the isci host object.
69 * @isci_device: This parameter specifies the remote device
70 *
71 * sci_lock is held on entrance to this function.
72 */
73static void isci_remote_device_not_ready(struct isci_host *ihost,
74 struct isci_remote_device *idev, u32 reason)
75{
76 struct isci_request *ireq;
77
78 dev_dbg(&ihost->pdev->dev,
79 "%s: isci_device = %p\n", __func__, idev);
80
81 switch (reason) {
82 case SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED:
83 set_bit(IDEV_GONE, &idev->flags);
84 break;
85 case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED:
86 set_bit(IDEV_IO_NCQERROR, &idev->flags);
87
88 /* Kill all outstanding requests for the device. */
89 list_for_each_entry(ireq, &idev->reqs_in_process, dev_node) {
90
91 dev_dbg(&ihost->pdev->dev,
92 "%s: isci_device = %p request = %p\n",
93 __func__, idev, ireq);
94
95 sci_controller_terminate_request(ihost,
96 idev,
97 ireq);
98 }
99 /* Fall through into the default case... */
100 default:
101 clear_bit(IDEV_IO_READY, &idev->flags);
102 break;
103 }
104}
105
106/**
107 * isci_remote_device_ready() - This function is called by the ihost when the
108 * remote device is ready. We mark the isci device as ready and signal the
109 * waiting proccess.
110 * @ihost: our valid isci_host
111 * @idev: remote device
112 *
113 */
114static void isci_remote_device_ready(struct isci_host *ihost, struct isci_remote_device *idev)
115{
116 dev_dbg(&ihost->pdev->dev,
117 "%s: idev = %p\n", __func__, idev);
118
119 clear_bit(IDEV_IO_NCQERROR, &idev->flags);
120 set_bit(IDEV_IO_READY, &idev->flags);
121 if (test_and_clear_bit(IDEV_START_PENDING, &idev->flags))
122 wake_up(&ihost->eventq);
123}
124
125/* called once the remote node context is ready to be freed.
126 * The remote device can now report that its stop operation is complete. none
127 */
128static void rnc_destruct_done(void *_dev)
129{
130 struct isci_remote_device *idev = _dev;
131
132 BUG_ON(idev->started_request_count != 0);
133 sci_change_state(&idev->sm, SCI_DEV_STOPPED);
134}
135
136static enum sci_status sci_remote_device_terminate_requests(struct isci_remote_device *idev)
137{
138 struct isci_host *ihost = idev->owning_port->owning_controller;
139 enum sci_status status = SCI_SUCCESS;
140 u32 i;
141
142 for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
143 struct isci_request *ireq = ihost->reqs[i];
144 enum sci_status s;
145
146 if (!test_bit(IREQ_ACTIVE, &ireq->flags) ||
147 ireq->target_device != idev)
148 continue;
149
150 s = sci_controller_terminate_request(ihost, idev, ireq);
151 if (s != SCI_SUCCESS)
152 status = s;
153 }
154
155 return status;
156}
157
158enum sci_status sci_remote_device_stop(struct isci_remote_device *idev,
159 u32 timeout)
160{
161 struct sci_base_state_machine *sm = &idev->sm;
162 enum sci_remote_device_states state = sm->current_state_id;
163
164 switch (state) {
165 case SCI_DEV_INITIAL:
166 case SCI_DEV_FAILED:
167 case SCI_DEV_FINAL:
168 default:
169 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
170 __func__, state);
171 return SCI_FAILURE_INVALID_STATE;
172 case SCI_DEV_STOPPED:
173 return SCI_SUCCESS;
174 case SCI_DEV_STARTING:
175 /* device not started so there had better be no requests */
176 BUG_ON(idev->started_request_count != 0);
177 sci_remote_node_context_destruct(&idev->rnc,
178 rnc_destruct_done, idev);
179 /* Transition to the stopping state and wait for the
180 * remote node to complete being posted and invalidated.
181 */
182 sci_change_state(sm, SCI_DEV_STOPPING);
183 return SCI_SUCCESS;
184 case SCI_DEV_READY:
185 case SCI_STP_DEV_IDLE:
186 case SCI_STP_DEV_CMD:
187 case SCI_STP_DEV_NCQ:
188 case SCI_STP_DEV_NCQ_ERROR:
189 case SCI_STP_DEV_AWAIT_RESET:
190 case SCI_SMP_DEV_IDLE:
191 case SCI_SMP_DEV_CMD:
192 sci_change_state(sm, SCI_DEV_STOPPING);
193 if (idev->started_request_count == 0) {
194 sci_remote_node_context_destruct(&idev->rnc,
195 rnc_destruct_done, idev);
196 return SCI_SUCCESS;
197 } else
198 return sci_remote_device_terminate_requests(idev);
199 break;
200 case SCI_DEV_STOPPING:
201 /* All requests should have been terminated, but if there is an
202 * attempt to stop a device already in the stopping state, then
203 * try again to terminate.
204 */
205 return sci_remote_device_terminate_requests(idev);
206 case SCI_DEV_RESETTING:
207 sci_change_state(sm, SCI_DEV_STOPPING);
208 return SCI_SUCCESS;
209 }
210}
211
212enum sci_status sci_remote_device_reset(struct isci_remote_device *idev)
213{
214 struct sci_base_state_machine *sm = &idev->sm;
215 enum sci_remote_device_states state = sm->current_state_id;
216
217 switch (state) {
218 case SCI_DEV_INITIAL:
219 case SCI_DEV_STOPPED:
220 case SCI_DEV_STARTING:
221 case SCI_SMP_DEV_IDLE:
222 case SCI_SMP_DEV_CMD:
223 case SCI_DEV_STOPPING:
224 case SCI_DEV_FAILED:
225 case SCI_DEV_RESETTING:
226 case SCI_DEV_FINAL:
227 default:
228 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
229 __func__, state);
230 return SCI_FAILURE_INVALID_STATE;
231 case SCI_DEV_READY:
232 case SCI_STP_DEV_IDLE:
233 case SCI_STP_DEV_CMD:
234 case SCI_STP_DEV_NCQ:
235 case SCI_STP_DEV_NCQ_ERROR:
236 case SCI_STP_DEV_AWAIT_RESET:
237 sci_change_state(sm, SCI_DEV_RESETTING);
238 return SCI_SUCCESS;
239 }
240}
241
242enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev)
243{
244 struct sci_base_state_machine *sm = &idev->sm;
245 enum sci_remote_device_states state = sm->current_state_id;
246
247 if (state != SCI_DEV_RESETTING) {
248 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
249 __func__, state);
250 return SCI_FAILURE_INVALID_STATE;
251 }
252
253 sci_change_state(sm, SCI_DEV_READY);
254 return SCI_SUCCESS;
255}
256
257enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev,
258 u32 suspend_type)
259{
260 struct sci_base_state_machine *sm = &idev->sm;
261 enum sci_remote_device_states state = sm->current_state_id;
262
263 if (state != SCI_STP_DEV_CMD) {
264 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
265 __func__, state);
266 return SCI_FAILURE_INVALID_STATE;
267 }
268
269 return sci_remote_node_context_suspend(&idev->rnc,
270 suspend_type, NULL, NULL);
271}
272
273enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev,
274 u32 frame_index)
275{
276 struct sci_base_state_machine *sm = &idev->sm;
277 enum sci_remote_device_states state = sm->current_state_id;
278 struct isci_host *ihost = idev->owning_port->owning_controller;
279 enum sci_status status;
280
281 switch (state) {
282 case SCI_DEV_INITIAL:
283 case SCI_DEV_STOPPED:
284 case SCI_DEV_STARTING:
285 case SCI_STP_DEV_IDLE:
286 case SCI_SMP_DEV_IDLE:
287 case SCI_DEV_FINAL:
288 default:
289 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
290 __func__, state);
291 /* Return the frame back to the controller */
292 sci_controller_release_frame(ihost, frame_index);
293 return SCI_FAILURE_INVALID_STATE;
294 case SCI_DEV_READY:
295 case SCI_STP_DEV_NCQ_ERROR:
296 case SCI_STP_DEV_AWAIT_RESET:
297 case SCI_DEV_STOPPING:
298 case SCI_DEV_FAILED:
299 case SCI_DEV_RESETTING: {
300 struct isci_request *ireq;
301 struct ssp_frame_hdr hdr;
302 void *frame_header;
303 ssize_t word_cnt;
304
305 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
306 frame_index,
307 &frame_header);
308 if (status != SCI_SUCCESS)
309 return status;
310
311 word_cnt = sizeof(hdr) / sizeof(u32);
312 sci_swab32_cpy(&hdr, frame_header, word_cnt);
313
314 ireq = sci_request_by_tag(ihost, be16_to_cpu(hdr.tag));
315 if (ireq && ireq->target_device == idev) {
316 /* The IO request is now in charge of releasing the frame */
317 status = sci_io_request_frame_handler(ireq, frame_index);
318 } else {
319 /* We could not map this tag to a valid IO
320 * request Just toss the frame and continue
321 */
322 sci_controller_release_frame(ihost, frame_index);
323 }
324 break;
325 }
326 case SCI_STP_DEV_NCQ: {
327 struct dev_to_host_fis *hdr;
328
329 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
330 frame_index,
331 (void **)&hdr);
332 if (status != SCI_SUCCESS)
333 return status;
334
335 if (hdr->fis_type == FIS_SETDEVBITS &&
336 (hdr->status & ATA_ERR)) {
337 idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
338
339 /* TODO Check sactive and complete associated IO if any. */
340 sci_change_state(sm, SCI_STP_DEV_NCQ_ERROR);
341 } else if (hdr->fis_type == FIS_REGD2H &&
342 (hdr->status & ATA_ERR)) {
343 /*
344 * Some devices return D2H FIS when an NCQ error is detected.
345 * Treat this like an SDB error FIS ready reason.
346 */
347 idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
348 sci_change_state(&idev->sm, SCI_STP_DEV_NCQ_ERROR);
349 } else
350 status = SCI_FAILURE;
351
352 sci_controller_release_frame(ihost, frame_index);
353 break;
354 }
355 case SCI_STP_DEV_CMD:
356 case SCI_SMP_DEV_CMD:
357 /* The device does not process any UF received from the hardware while
358 * in this state. All unsolicited frames are forwarded to the io request
359 * object.
360 */
361 status = sci_io_request_frame_handler(idev->working_request, frame_index);
362 break;
363 }
364
365 return status;
366}
367
368static bool is_remote_device_ready(struct isci_remote_device *idev)
369{
370
371 struct sci_base_state_machine *sm = &idev->sm;
372 enum sci_remote_device_states state = sm->current_state_id;
373
374 switch (state) {
375 case SCI_DEV_READY:
376 case SCI_STP_DEV_IDLE:
377 case SCI_STP_DEV_CMD:
378 case SCI_STP_DEV_NCQ:
379 case SCI_STP_DEV_NCQ_ERROR:
380 case SCI_STP_DEV_AWAIT_RESET:
381 case SCI_SMP_DEV_IDLE:
382 case SCI_SMP_DEV_CMD:
383 return true;
384 default:
385 return false;
386 }
387}
388
389enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
390 u32 event_code)
391{
392 struct sci_base_state_machine *sm = &idev->sm;
393 enum sci_remote_device_states state = sm->current_state_id;
394 enum sci_status status;
395
396 switch (scu_get_event_type(event_code)) {
397 case SCU_EVENT_TYPE_RNC_OPS_MISC:
398 case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
399 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
400 status = sci_remote_node_context_event_handler(&idev->rnc, event_code);
401 break;
402 case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
403 if (scu_get_event_code(event_code) == SCU_EVENT_IT_NEXUS_TIMEOUT) {
404 status = SCI_SUCCESS;
405
406 /* Suspend the associated RNC */
407 sci_remote_node_context_suspend(&idev->rnc,
408 SCI_SOFTWARE_SUSPENSION,
409 NULL, NULL);
410
411 dev_dbg(scirdev_to_dev(idev),
412 "%s: device: %p event code: %x: %s\n",
413 __func__, idev, event_code,
414 is_remote_device_ready(idev)
415 ? "I_T_Nexus_Timeout event"
416 : "I_T_Nexus_Timeout event in wrong state");
417
418 break;
419 }
420 /* Else, fall through and treat as unhandled... */
421 default:
422 dev_dbg(scirdev_to_dev(idev),
423 "%s: device: %p event code: %x: %s\n",
424 __func__, idev, event_code,
425 is_remote_device_ready(idev)
426 ? "unexpected event"
427 : "unexpected event in wrong state");
428 status = SCI_FAILURE_INVALID_STATE;
429 break;
430 }
431
432 if (status != SCI_SUCCESS)
433 return status;
434
435 if (state == SCI_STP_DEV_IDLE) {
436
437 /* We pick up suspension events to handle specifically to this
438 * state. We resume the RNC right away.
439 */
440 if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX ||
441 scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX)
442 status = sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
443 }
444
445 return status;
446}
447
448static void sci_remote_device_start_request(struct isci_remote_device *idev,
449 struct isci_request *ireq,
450 enum sci_status status)
451{
452 struct isci_port *iport = idev->owning_port;
453
454 /* cleanup requests that failed after starting on the port */
455 if (status != SCI_SUCCESS)
456 sci_port_complete_io(iport, idev, ireq);
457 else {
458 kref_get(&idev->kref);
459 idev->started_request_count++;
460 }
461}
462
463enum sci_status sci_remote_device_start_io(struct isci_host *ihost,
464 struct isci_remote_device *idev,
465 struct isci_request *ireq)
466{
467 struct sci_base_state_machine *sm = &idev->sm;
468 enum sci_remote_device_states state = sm->current_state_id;
469 struct isci_port *iport = idev->owning_port;
470 enum sci_status status;
471
472 switch (state) {
473 case SCI_DEV_INITIAL:
474 case SCI_DEV_STOPPED:
475 case SCI_DEV_STARTING:
476 case SCI_STP_DEV_NCQ_ERROR:
477 case SCI_DEV_STOPPING:
478 case SCI_DEV_FAILED:
479 case SCI_DEV_RESETTING:
480 case SCI_DEV_FINAL:
481 default:
482 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
483 __func__, state);
484 return SCI_FAILURE_INVALID_STATE;
485 case SCI_DEV_READY:
486 /* attempt to start an io request for this device object. The remote
487 * device object will issue the start request for the io and if
488 * successful it will start the request for the port object then
489 * increment its own request count.
490 */
491 status = sci_port_start_io(iport, idev, ireq);
492 if (status != SCI_SUCCESS)
493 return status;
494
495 status = sci_remote_node_context_start_io(&idev->rnc, ireq);
496 if (status != SCI_SUCCESS)
497 break;
498
499 status = sci_request_start(ireq);
500 break;
501 case SCI_STP_DEV_IDLE: {
502 /* handle the start io operation for a sata device that is in
503 * the command idle state. - Evalute the type of IO request to
504 * be started - If its an NCQ request change to NCQ substate -
505 * If its any other command change to the CMD substate
506 *
507 * If this is a softreset we may want to have a different
508 * substate.
509 */
510 enum sci_remote_device_states new_state;
511 struct sas_task *task = isci_request_access_task(ireq);
512
513 status = sci_port_start_io(iport, idev, ireq);
514 if (status != SCI_SUCCESS)
515 return status;
516
517 status = sci_remote_node_context_start_io(&idev->rnc, ireq);
518 if (status != SCI_SUCCESS)
519 break;
520
521 status = sci_request_start(ireq);
522 if (status != SCI_SUCCESS)
523 break;
524
525 if (task->ata_task.use_ncq)
526 new_state = SCI_STP_DEV_NCQ;
527 else {
528 idev->working_request = ireq;
529 new_state = SCI_STP_DEV_CMD;
530 }
531 sci_change_state(sm, new_state);
532 break;
533 }
534 case SCI_STP_DEV_NCQ: {
535 struct sas_task *task = isci_request_access_task(ireq);
536
537 if (task->ata_task.use_ncq) {
538 status = sci_port_start_io(iport, idev, ireq);
539 if (status != SCI_SUCCESS)
540 return status;
541
542 status = sci_remote_node_context_start_io(&idev->rnc, ireq);
543 if (status != SCI_SUCCESS)
544 break;
545
546 status = sci_request_start(ireq);
547 } else
548 return SCI_FAILURE_INVALID_STATE;
549 break;
550 }
551 case SCI_STP_DEV_AWAIT_RESET:
552 return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
553 case SCI_SMP_DEV_IDLE:
554 status = sci_port_start_io(iport, idev, ireq);
555 if (status != SCI_SUCCESS)
556 return status;
557
558 status = sci_remote_node_context_start_io(&idev->rnc, ireq);
559 if (status != SCI_SUCCESS)
560 break;
561
562 status = sci_request_start(ireq);
563 if (status != SCI_SUCCESS)
564 break;
565
566 idev->working_request = ireq;
567 sci_change_state(&idev->sm, SCI_SMP_DEV_CMD);
568 break;
569 case SCI_STP_DEV_CMD:
570 case SCI_SMP_DEV_CMD:
571 /* device is already handling a command it can not accept new commands
572 * until this one is complete.
573 */
574 return SCI_FAILURE_INVALID_STATE;
575 }
576
577 sci_remote_device_start_request(idev, ireq, status);
578 return status;
579}
580
581static enum sci_status common_complete_io(struct isci_port *iport,
582 struct isci_remote_device *idev,
583 struct isci_request *ireq)
584{
585 enum sci_status status;
586
587 status = sci_request_complete(ireq);
588 if (status != SCI_SUCCESS)
589 return status;
590
591 status = sci_port_complete_io(iport, idev, ireq);
592 if (status != SCI_SUCCESS)
593 return status;
594
595 sci_remote_device_decrement_request_count(idev);
596 return status;
597}
598
599enum sci_status sci_remote_device_complete_io(struct isci_host *ihost,
600 struct isci_remote_device *idev,
601 struct isci_request *ireq)
602{
603 struct sci_base_state_machine *sm = &idev->sm;
604 enum sci_remote_device_states state = sm->current_state_id;
605 struct isci_port *iport = idev->owning_port;
606 enum sci_status status;
607
608 switch (state) {
609 case SCI_DEV_INITIAL:
610 case SCI_DEV_STOPPED:
611 case SCI_DEV_STARTING:
612 case SCI_STP_DEV_IDLE:
613 case SCI_SMP_DEV_IDLE:
614 case SCI_DEV_FAILED:
615 case SCI_DEV_FINAL:
616 default:
617 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
618 __func__, state);
619 return SCI_FAILURE_INVALID_STATE;
620 case SCI_DEV_READY:
621 case SCI_STP_DEV_AWAIT_RESET:
622 case SCI_DEV_RESETTING:
623 status = common_complete_io(iport, idev, ireq);
624 break;
625 case SCI_STP_DEV_CMD:
626 case SCI_STP_DEV_NCQ:
627 case SCI_STP_DEV_NCQ_ERROR:
628 status = common_complete_io(iport, idev, ireq);
629 if (status != SCI_SUCCESS)
630 break;
631
632 if (ireq->sci_status == SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
633 /* This request causes hardware error, device needs to be Lun Reset.
634 * So here we force the state machine to IDLE state so the rest IOs
635 * can reach RNC state handler, these IOs will be completed by RNC with
636 * status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE".
637 */
638 sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET);
639 } else if (idev->started_request_count == 0)
640 sci_change_state(sm, SCI_STP_DEV_IDLE);
641 break;
642 case SCI_SMP_DEV_CMD:
643 status = common_complete_io(iport, idev, ireq);
644 if (status != SCI_SUCCESS)
645 break;
646 sci_change_state(sm, SCI_SMP_DEV_IDLE);
647 break;
648 case SCI_DEV_STOPPING:
649 status = common_complete_io(iport, idev, ireq);
650 if (status != SCI_SUCCESS)
651 break;
652
653 if (idev->started_request_count == 0)
654 sci_remote_node_context_destruct(&idev->rnc,
655 rnc_destruct_done,
656 idev);
657 break;
658 }
659
660 if (status != SCI_SUCCESS)
661 dev_err(scirdev_to_dev(idev),
662 "%s: Port:0x%p Device:0x%p Request:0x%p Status:0x%x "
663 "could not complete\n", __func__, iport,
664 idev, ireq, status);
665 else
666 isci_put_device(idev);
667
668 return status;
669}
670
671static void sci_remote_device_continue_request(void *dev)
672{
673 struct isci_remote_device *idev = dev;
674
675 /* we need to check if this request is still valid to continue. */
676 if (idev->working_request)
677 sci_controller_continue_io(idev->working_request);
678}
679
680enum sci_status sci_remote_device_start_task(struct isci_host *ihost,
681 struct isci_remote_device *idev,
682 struct isci_request *ireq)
683{
684 struct sci_base_state_machine *sm = &idev->sm;
685 enum sci_remote_device_states state = sm->current_state_id;
686 struct isci_port *iport = idev->owning_port;
687 enum sci_status status;
688
689 switch (state) {
690 case SCI_DEV_INITIAL:
691 case SCI_DEV_STOPPED:
692 case SCI_DEV_STARTING:
693 case SCI_SMP_DEV_IDLE:
694 case SCI_SMP_DEV_CMD:
695 case SCI_DEV_STOPPING:
696 case SCI_DEV_FAILED:
697 case SCI_DEV_RESETTING:
698 case SCI_DEV_FINAL:
699 default:
700 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
701 __func__, state);
702 return SCI_FAILURE_INVALID_STATE;
703 case SCI_STP_DEV_IDLE:
704 case SCI_STP_DEV_CMD:
705 case SCI_STP_DEV_NCQ:
706 case SCI_STP_DEV_NCQ_ERROR:
707 case SCI_STP_DEV_AWAIT_RESET:
708 status = sci_port_start_io(iport, idev, ireq);
709 if (status != SCI_SUCCESS)
710 return status;
711
712 status = sci_remote_node_context_start_task(&idev->rnc, ireq);
713 if (status != SCI_SUCCESS)
714 goto out;
715
716 status = sci_request_start(ireq);
717 if (status != SCI_SUCCESS)
718 goto out;
719
720 /* Note: If the remote device state is not IDLE this will
721 * replace the request that probably resulted in the task
722 * management request.
723 */
724 idev->working_request = ireq;
725 sci_change_state(sm, SCI_STP_DEV_CMD);
726
727 /* The remote node context must cleanup the TCi to NCQ mapping
728 * table. The only way to do this correctly is to either write
729 * to the TLCR register or to invalidate and repost the RNC. In
730 * either case the remote node context state machine will take
731 * the correct action when the remote node context is suspended
732 * and later resumed.
733 */
734 sci_remote_node_context_suspend(&idev->rnc,
735 SCI_SOFTWARE_SUSPENSION, NULL, NULL);
736 sci_remote_node_context_resume(&idev->rnc,
737 sci_remote_device_continue_request,
738 idev);
739
740 out:
741 sci_remote_device_start_request(idev, ireq, status);
742 /* We need to let the controller start request handler know that
743 * it can't post TC yet. We will provide a callback function to
744 * post TC when RNC gets resumed.
745 */
746 return SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS;
747 case SCI_DEV_READY:
748 status = sci_port_start_io(iport, idev, ireq);
749 if (status != SCI_SUCCESS)
750 return status;
751
752 status = sci_remote_node_context_start_task(&idev->rnc, ireq);
753 if (status != SCI_SUCCESS)
754 break;
755
756 status = sci_request_start(ireq);
757 break;
758 }
759 sci_remote_device_start_request(idev, ireq, status);
760
761 return status;
762}
763
764void sci_remote_device_post_request(struct isci_remote_device *idev, u32 request)
765{
766 struct isci_port *iport = idev->owning_port;
767 u32 context;
768
769 context = request |
770 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
771 (iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
772 idev->rnc.remote_node_index;
773
774 sci_controller_post_request(iport->owning_controller, context);
775}
776
777/* called once the remote node context has transisitioned to a
778 * ready state. This is the indication that the remote device object can also
779 * transition to ready.
780 */
781static void remote_device_resume_done(void *_dev)
782{
783 struct isci_remote_device *idev = _dev;
784
785 if (is_remote_device_ready(idev))
786 return;
787
788 /* go 'ready' if we are not already in a ready state */
789 sci_change_state(&idev->sm, SCI_DEV_READY);
790}
791
792static void sci_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev)
793{
794 struct isci_remote_device *idev = _dev;
795 struct isci_host *ihost = idev->owning_port->owning_controller;
796
797 /* For NCQ operation we do not issue a isci_remote_device_not_ready().
798 * As a result, avoid sending the ready notification.
799 */
800 if (idev->sm.previous_state_id != SCI_STP_DEV_NCQ)
801 isci_remote_device_ready(ihost, idev);
802}
803
804static void sci_remote_device_initial_state_enter(struct sci_base_state_machine *sm)
805{
806 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
807
808 /* Initial state is a transitional state to the stopped state */
809 sci_change_state(&idev->sm, SCI_DEV_STOPPED);
810}
811
812/**
813 * sci_remote_device_destruct() - free remote node context and destruct
814 * @remote_device: This parameter specifies the remote device to be destructed.
815 *
816 * Remote device objects are a limited resource. As such, they must be
817 * protected. Thus calls to construct and destruct are mutually exclusive and
818 * non-reentrant. The return value shall indicate if the device was
819 * successfully destructed or if some failure occurred. enum sci_status This value
820 * is returned if the device is successfully destructed.
821 * SCI_FAILURE_INVALID_REMOTE_DEVICE This value is returned if the supplied
822 * device isn't valid (e.g. it's already been destoryed, the handle isn't
823 * valid, etc.).
824 */
825static enum sci_status sci_remote_device_destruct(struct isci_remote_device *idev)
826{
827 struct sci_base_state_machine *sm = &idev->sm;
828 enum sci_remote_device_states state = sm->current_state_id;
829 struct isci_host *ihost;
830
831 if (state != SCI_DEV_STOPPED) {
832 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
833 __func__, state);
834 return SCI_FAILURE_INVALID_STATE;
835 }
836
837 ihost = idev->owning_port->owning_controller;
838 sci_controller_free_remote_node_context(ihost, idev,
839 idev->rnc.remote_node_index);
840 idev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
841 sci_change_state(sm, SCI_DEV_FINAL);
842
843 return SCI_SUCCESS;
844}
845
846/**
847 * isci_remote_device_deconstruct() - This function frees an isci_remote_device.
848 * @ihost: This parameter specifies the isci host object.
849 * @idev: This parameter specifies the remote device to be freed.
850 *
851 */
852static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_remote_device *idev)
853{
854 dev_dbg(&ihost->pdev->dev,
855 "%s: isci_device = %p\n", __func__, idev);
856
857 /* There should not be any outstanding io's. All paths to
858 * here should go through isci_remote_device_nuke_requests.
859 * If we hit this condition, we will need a way to complete
860 * io requests in process */
861 BUG_ON(!list_empty(&idev->reqs_in_process));
862
863 sci_remote_device_destruct(idev);
864 list_del_init(&idev->node);
865 isci_put_device(idev);
866}
867
868static void sci_remote_device_stopped_state_enter(struct sci_base_state_machine *sm)
869{
870 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
871 struct isci_host *ihost = idev->owning_port->owning_controller;
872 u32 prev_state;
873
874 /* If we are entering from the stopping state let the SCI User know that
875 * the stop operation has completed.
876 */
877 prev_state = idev->sm.previous_state_id;
878 if (prev_state == SCI_DEV_STOPPING)
879 isci_remote_device_deconstruct(ihost, idev);
880
881 sci_controller_remote_device_stopped(ihost, idev);
882}
883
884static void sci_remote_device_starting_state_enter(struct sci_base_state_machine *sm)
885{
886 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
887 struct isci_host *ihost = idev->owning_port->owning_controller;
888
889 isci_remote_device_not_ready(ihost, idev,
890 SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED);
891}
892
893static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *sm)
894{
895 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
896 struct isci_host *ihost = idev->owning_port->owning_controller;
897 struct domain_device *dev = idev->domain_dev;
898
899 if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) {
900 sci_change_state(&idev->sm, SCI_STP_DEV_IDLE);
901 } else if (dev_is_expander(dev)) {
902 sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE);
903 } else
904 isci_remote_device_ready(ihost, idev);
905}
906
907static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm)
908{
909 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
910 struct domain_device *dev = idev->domain_dev;
911
912 if (dev->dev_type == SAS_END_DEV) {
913 struct isci_host *ihost = idev->owning_port->owning_controller;
914
915 isci_remote_device_not_ready(ihost, idev,
916 SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED);
917 }
918}
919
920static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm)
921{
922 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
923
924 sci_remote_node_context_suspend(
925 &idev->rnc, SCI_SOFTWARE_SUSPENSION, NULL, NULL);
926}
927
928static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm)
929{
930 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
931
932 sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
933}
934
935static void sci_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
936{
937 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
938
939 idev->working_request = NULL;
940 if (sci_remote_node_context_is_ready(&idev->rnc)) {
941 /*
942 * Since the RNC is ready, it's alright to finish completion
943 * processing (e.g. signal the remote device is ready). */
944 sci_stp_remote_device_ready_idle_substate_resume_complete_handler(idev);
945 } else {
946 sci_remote_node_context_resume(&idev->rnc,
947 sci_stp_remote_device_ready_idle_substate_resume_complete_handler,
948 idev);
949 }
950}
951
952static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
953{
954 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
955 struct isci_host *ihost = idev->owning_port->owning_controller;
956
957 BUG_ON(idev->working_request == NULL);
958
959 isci_remote_device_not_ready(ihost, idev,
960 SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED);
961}
962
963static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm)
964{
965 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
966 struct isci_host *ihost = idev->owning_port->owning_controller;
967
968 if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED)
969 isci_remote_device_not_ready(ihost, idev,
970 idev->not_ready_reason);
971}
972
973static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
974{
975 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
976 struct isci_host *ihost = idev->owning_port->owning_controller;
977
978 isci_remote_device_ready(ihost, idev);
979}
980
981static void sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
982{
983 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
984 struct isci_host *ihost = idev->owning_port->owning_controller;
985
986 BUG_ON(idev->working_request == NULL);
987
988 isci_remote_device_not_ready(ihost, idev,
989 SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED);
990}
991
992static void sci_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm)
993{
994 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
995
996 idev->working_request = NULL;
997}
998
999static const struct sci_base_state sci_remote_device_state_table[] = {
1000 [SCI_DEV_INITIAL] = {
1001 .enter_state = sci_remote_device_initial_state_enter,
1002 },
1003 [SCI_DEV_STOPPED] = {
1004 .enter_state = sci_remote_device_stopped_state_enter,
1005 },
1006 [SCI_DEV_STARTING] = {
1007 .enter_state = sci_remote_device_starting_state_enter,
1008 },
1009 [SCI_DEV_READY] = {
1010 .enter_state = sci_remote_device_ready_state_enter,
1011 .exit_state = sci_remote_device_ready_state_exit
1012 },
1013 [SCI_STP_DEV_IDLE] = {
1014 .enter_state = sci_stp_remote_device_ready_idle_substate_enter,
1015 },
1016 [SCI_STP_DEV_CMD] = {
1017 .enter_state = sci_stp_remote_device_ready_cmd_substate_enter,
1018 },
1019 [SCI_STP_DEV_NCQ] = { },
1020 [SCI_STP_DEV_NCQ_ERROR] = {
1021 .enter_state = sci_stp_remote_device_ready_ncq_error_substate_enter,
1022 },
1023 [SCI_STP_DEV_AWAIT_RESET] = { },
1024 [SCI_SMP_DEV_IDLE] = {
1025 .enter_state = sci_smp_remote_device_ready_idle_substate_enter,
1026 },
1027 [SCI_SMP_DEV_CMD] = {
1028 .enter_state = sci_smp_remote_device_ready_cmd_substate_enter,
1029 .exit_state = sci_smp_remote_device_ready_cmd_substate_exit,
1030 },
1031 [SCI_DEV_STOPPING] = { },
1032 [SCI_DEV_FAILED] = { },
1033 [SCI_DEV_RESETTING] = {
1034 .enter_state = sci_remote_device_resetting_state_enter,
1035 .exit_state = sci_remote_device_resetting_state_exit
1036 },
1037 [SCI_DEV_FINAL] = { },
1038};
1039
1040/**
1041 * sci_remote_device_construct() - common construction
1042 * @sci_port: SAS/SATA port through which this device is accessed.
1043 * @sci_dev: remote device to construct
1044 *
1045 * This routine just performs benign initialization and does not
1046 * allocate the remote_node_context which is left to
1047 * sci_remote_device_[de]a_construct(). sci_remote_device_destruct()
1048 * frees the remote_node_context(s) for the device.
1049 */
1050static void sci_remote_device_construct(struct isci_port *iport,
1051 struct isci_remote_device *idev)
1052{
1053 idev->owning_port = iport;
1054 idev->started_request_count = 0;
1055
1056 sci_init_sm(&idev->sm, sci_remote_device_state_table, SCI_DEV_INITIAL);
1057
1058 sci_remote_node_context_construct(&idev->rnc,
1059 SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
1060}
1061
1062/**
1063 * sci_remote_device_da_construct() - construct direct attached device.
1064 *
1065 * The information (e.g. IAF, Signature FIS, etc.) necessary to build
1066 * the device is known to the SCI Core since it is contained in the
1067 * sci_phy object. Remote node context(s) is/are a global resource
1068 * allocated by this routine, freed by sci_remote_device_destruct().
1069 *
1070 * Returns:
1071 * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
1072 * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to
1073 * sata-only controller instance.
1074 * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
1075 */
1076static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
1077 struct isci_remote_device *idev)
1078{
1079 enum sci_status status;
1080 struct domain_device *dev = idev->domain_dev;
1081
1082 sci_remote_device_construct(iport, idev);
1083
1084 /*
1085 * This information is request to determine how many remote node context
1086 * entries will be needed to store the remote node.
1087 */
1088 idev->is_direct_attached = true;
1089 status = sci_controller_allocate_remote_node_context(iport->owning_controller,
1090 idev,
1091 &idev->rnc.remote_node_index);
1092
1093 if (status != SCI_SUCCESS)
1094 return status;
1095
1096 if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV ||
1097 (dev->tproto & SAS_PROTOCOL_STP) || dev_is_expander(dev))
1098 /* pass */;
1099 else
1100 return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
1101
1102 idev->connection_rate = sci_port_get_max_allowed_speed(iport);
1103
1104 /* / @todo Should I assign the port width by reading all of the phys on the port? */
1105 idev->device_port_width = 1;
1106
1107 return SCI_SUCCESS;
1108}
1109
1110/**
1111 * sci_remote_device_ea_construct() - construct expander attached device
1112 *
1113 * Remote node context(s) is/are a global resource allocated by this
1114 * routine, freed by sci_remote_device_destruct().
1115 *
1116 * Returns:
1117 * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
1118 * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to
1119 * sata-only controller instance.
1120 * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
1121 */
1122static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport,
1123 struct isci_remote_device *idev)
1124{
1125 struct domain_device *dev = idev->domain_dev;
1126 enum sci_status status;
1127
1128 sci_remote_device_construct(iport, idev);
1129
1130 status = sci_controller_allocate_remote_node_context(iport->owning_controller,
1131 idev,
1132 &idev->rnc.remote_node_index);
1133 if (status != SCI_SUCCESS)
1134 return status;
1135
1136 if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV ||
1137 (dev->tproto & SAS_PROTOCOL_STP) || dev_is_expander(dev))
1138 /* pass */;
1139 else
1140 return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
1141
1142 /*
1143 * For SAS-2 the physical link rate is actually a logical link
1144 * rate that incorporates multiplexing. The SCU doesn't
1145 * incorporate multiplexing and for the purposes of the
1146 * connection the logical link rate is that same as the
1147 * physical. Furthermore, the SAS-2 and SAS-1.1 fields overlay
1148 * one another, so this code works for both situations. */
1149 idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport),
1150 dev->linkrate);
1151
1152 /* / @todo Should I assign the port width by reading all of the phys on the port? */
1153 idev->device_port_width = 1;
1154
1155 return SCI_SUCCESS;
1156}
1157
1158/**
1159 * sci_remote_device_start() - This method will start the supplied remote
1160 * device. This method enables normal IO requests to flow through to the
1161 * remote device.
1162 * @remote_device: This parameter specifies the device to be started.
1163 * @timeout: This parameter specifies the number of milliseconds in which the
1164 * start operation should complete.
1165 *
1166 * An indication of whether the device was successfully started. SCI_SUCCESS
1167 * This value is returned if the device was successfully started.
1168 * SCI_FAILURE_INVALID_PHY This value is returned if the user attempts to start
1169 * the device when there have been no phys added to it.
1170 */
1171static enum sci_status sci_remote_device_start(struct isci_remote_device *idev,
1172 u32 timeout)
1173{
1174 struct sci_base_state_machine *sm = &idev->sm;
1175 enum sci_remote_device_states state = sm->current_state_id;
1176 enum sci_status status;
1177
1178 if (state != SCI_DEV_STOPPED) {
1179 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
1180 __func__, state);
1181 return SCI_FAILURE_INVALID_STATE;
1182 }
1183
1184 status = sci_remote_node_context_resume(&idev->rnc,
1185 remote_device_resume_done,
1186 idev);
1187 if (status != SCI_SUCCESS)
1188 return status;
1189
1190 sci_change_state(sm, SCI_DEV_STARTING);
1191
1192 return SCI_SUCCESS;
1193}
1194
1195static enum sci_status isci_remote_device_construct(struct isci_port *iport,
1196 struct isci_remote_device *idev)
1197{
1198 struct isci_host *ihost = iport->isci_host;
1199 struct domain_device *dev = idev->domain_dev;
1200 enum sci_status status;
1201
1202 if (dev->parent && dev_is_expander(dev->parent))
1203 status = sci_remote_device_ea_construct(iport, idev);
1204 else
1205 status = sci_remote_device_da_construct(iport, idev);
1206
1207 if (status != SCI_SUCCESS) {
1208 dev_dbg(&ihost->pdev->dev, "%s: construct failed: %d\n",
1209 __func__, status);
1210
1211 return status;
1212 }
1213
1214 /* start the device. */
1215 status = sci_remote_device_start(idev, ISCI_REMOTE_DEVICE_START_TIMEOUT);
1216
1217 if (status != SCI_SUCCESS)
1218 dev_warn(&ihost->pdev->dev, "remote device start failed: %d\n",
1219 status);
1220
1221 return status;
1222}
1223
1224void isci_remote_device_nuke_requests(struct isci_host *ihost, struct isci_remote_device *idev)
1225{
1226 DECLARE_COMPLETION_ONSTACK(aborted_task_completion);
1227
1228 dev_dbg(&ihost->pdev->dev,
1229 "%s: idev = %p\n", __func__, idev);
1230
1231 /* Cleanup all requests pending for this device. */
1232 isci_terminate_pending_requests(ihost, idev);
1233
1234 dev_dbg(&ihost->pdev->dev,
1235 "%s: idev = %p, done\n", __func__, idev);
1236}
1237
1238/**
1239 * This function builds the isci_remote_device when a libsas dev_found message
1240 * is received.
1241 * @isci_host: This parameter specifies the isci host object.
1242 * @port: This parameter specifies the isci_port conected to this device.
1243 *
1244 * pointer to new isci_remote_device.
1245 */
1246static struct isci_remote_device *
1247isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport)
1248{
1249 struct isci_remote_device *idev;
1250 int i;
1251
1252 for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
1253 idev = &ihost->devices[i];
1254 if (!test_and_set_bit(IDEV_ALLOCATED, &idev->flags))
1255 break;
1256 }
1257
1258 if (i >= SCI_MAX_REMOTE_DEVICES) {
1259 dev_warn(&ihost->pdev->dev, "%s: failed\n", __func__);
1260 return NULL;
1261 }
1262
1263 if (WARN_ONCE(!list_empty(&idev->reqs_in_process), "found requests in process\n"))
1264 return NULL;
1265
1266 if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n"))
1267 return NULL;
1268
1269 return idev;
1270}
1271
1272void isci_remote_device_release(struct kref *kref)
1273{
1274 struct isci_remote_device *idev = container_of(kref, typeof(*idev), kref);
1275 struct isci_host *ihost = idev->isci_port->isci_host;
1276
1277 idev->domain_dev = NULL;
1278 idev->isci_port = NULL;
1279 clear_bit(IDEV_START_PENDING, &idev->flags);
1280 clear_bit(IDEV_STOP_PENDING, &idev->flags);
1281 clear_bit(IDEV_IO_READY, &idev->flags);
1282 clear_bit(IDEV_GONE, &idev->flags);
1283 clear_bit(IDEV_EH, &idev->flags);
1284 smp_mb__before_clear_bit();
1285 clear_bit(IDEV_ALLOCATED, &idev->flags);
1286 wake_up(&ihost->eventq);
1287}
1288
1289/**
1290 * isci_remote_device_stop() - This function is called internally to stop the
1291 * remote device.
1292 * @isci_host: This parameter specifies the isci host object.
1293 * @isci_device: This parameter specifies the remote device.
1294 *
1295 * The status of the ihost request to stop.
1296 */
1297enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_remote_device *idev)
1298{
1299 enum sci_status status;
1300 unsigned long flags;
1301
1302 dev_dbg(&ihost->pdev->dev,
1303 "%s: isci_device = %p\n", __func__, idev);
1304
1305 spin_lock_irqsave(&ihost->scic_lock, flags);
1306 idev->domain_dev->lldd_dev = NULL; /* disable new lookups */
1307 set_bit(IDEV_GONE, &idev->flags);
1308 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1309
1310 /* Kill all outstanding requests. */
1311 isci_remote_device_nuke_requests(ihost, idev);
1312
1313 set_bit(IDEV_STOP_PENDING, &idev->flags);
1314
1315 spin_lock_irqsave(&ihost->scic_lock, flags);
1316 status = sci_remote_device_stop(idev, 50);
1317 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1318
1319 /* Wait for the stop complete callback. */
1320 if (WARN_ONCE(status != SCI_SUCCESS, "failed to stop device\n"))
1321 /* nothing to wait for */;
1322 else
1323 wait_for_device_stop(ihost, idev);
1324
1325 return status;
1326}
1327
1328/**
1329 * isci_remote_device_gone() - This function is called by libsas when a domain
1330 * device is removed.
1331 * @domain_device: This parameter specifies the libsas domain device.
1332 *
1333 */
1334void isci_remote_device_gone(struct domain_device *dev)
1335{
1336 struct isci_host *ihost = dev_to_ihost(dev);
1337 struct isci_remote_device *idev = dev->lldd_dev;
1338
1339 dev_dbg(&ihost->pdev->dev,
1340 "%s: domain_device = %p, isci_device = %p, isci_port = %p\n",
1341 __func__, dev, idev, idev->isci_port);
1342
1343 isci_remote_device_stop(ihost, idev);
1344}
1345
1346
1347/**
1348 * isci_remote_device_found() - This function is called by libsas when a remote
1349 * device is discovered. A remote device object is created and started. the
1350 * function then sleeps until the sci core device started message is
1351 * received.
1352 * @domain_device: This parameter specifies the libsas domain device.
1353 *
1354 * status, zero indicates success.
1355 */
1356int isci_remote_device_found(struct domain_device *domain_dev)
1357{
1358 struct isci_host *isci_host = dev_to_ihost(domain_dev);
1359 struct isci_port *isci_port;
1360 struct isci_phy *isci_phy;
1361 struct asd_sas_port *sas_port;
1362 struct asd_sas_phy *sas_phy;
1363 struct isci_remote_device *isci_device;
1364 enum sci_status status;
1365
1366 dev_dbg(&isci_host->pdev->dev,
1367 "%s: domain_device = %p\n", __func__, domain_dev);
1368
1369 wait_for_start(isci_host);
1370
1371 sas_port = domain_dev->port;
1372 sas_phy = list_first_entry(&sas_port->phy_list, struct asd_sas_phy,
1373 port_phy_el);
1374 isci_phy = to_iphy(sas_phy);
1375 isci_port = isci_phy->isci_port;
1376
1377 /* we are being called for a device on this port,
1378 * so it has to come up eventually
1379 */
1380 wait_for_completion(&isci_port->start_complete);
1381
1382 if ((isci_stopping == isci_port_get_state(isci_port)) ||
1383 (isci_stopped == isci_port_get_state(isci_port)))
1384 return -ENODEV;
1385
1386 isci_device = isci_remote_device_alloc(isci_host, isci_port);
1387 if (!isci_device)
1388 return -ENODEV;
1389
1390 kref_init(&isci_device->kref);
1391 INIT_LIST_HEAD(&isci_device->node);
1392
1393 spin_lock_irq(&isci_host->scic_lock);
1394 isci_device->domain_dev = domain_dev;
1395 isci_device->isci_port = isci_port;
1396 list_add_tail(&isci_device->node, &isci_port->remote_dev_list);
1397
1398 set_bit(IDEV_START_PENDING, &isci_device->flags);
1399 status = isci_remote_device_construct(isci_port, isci_device);
1400
1401 dev_dbg(&isci_host->pdev->dev,
1402 "%s: isci_device = %p\n",
1403 __func__, isci_device);
1404
1405 if (status == SCI_SUCCESS) {
1406 /* device came up, advertise it to the world */
1407 domain_dev->lldd_dev = isci_device;
1408 } else
1409 isci_put_device(isci_device);
1410 spin_unlock_irq(&isci_host->scic_lock);
1411
1412 /* wait for the device ready callback. */
1413 wait_for_device_start(isci_host, isci_device);
1414
1415 return status == SCI_SUCCESS ? 0 : -ENODEV;
1416}
1417/**
1418 * isci_device_is_reset_pending() - This function will check if there is any
1419 * pending reset condition on the device.
1420 * @request: This parameter is the isci_device object.
1421 *
1422 * true if there is a reset pending for the device.
1423 */
1424bool isci_device_is_reset_pending(
1425 struct isci_host *isci_host,
1426 struct isci_remote_device *isci_device)
1427{
1428 struct isci_request *isci_request;
1429 struct isci_request *tmp_req;
1430 bool reset_is_pending = false;
1431 unsigned long flags;
1432
1433 dev_dbg(&isci_host->pdev->dev,
1434 "%s: isci_device = %p\n", __func__, isci_device);
1435
1436 spin_lock_irqsave(&isci_host->scic_lock, flags);
1437
1438 /* Check for reset on all pending requests. */
1439 list_for_each_entry_safe(isci_request, tmp_req,
1440 &isci_device->reqs_in_process, dev_node) {
1441 dev_dbg(&isci_host->pdev->dev,
1442 "%s: isci_device = %p request = %p\n",
1443 __func__, isci_device, isci_request);
1444
1445 if (isci_request->ttype == io_task) {
1446 struct sas_task *task = isci_request_access_task(
1447 isci_request);
1448
1449 spin_lock(&task->task_state_lock);
1450 if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET)
1451 reset_is_pending = true;
1452 spin_unlock(&task->task_state_lock);
1453 }
1454 }
1455
1456 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1457
1458 dev_dbg(&isci_host->pdev->dev,
1459 "%s: isci_device = %p reset_is_pending = %d\n",
1460 __func__, isci_device, reset_is_pending);
1461
1462 return reset_is_pending;
1463}
1464
1465/**
1466 * isci_device_clear_reset_pending() - This function will clear if any pending
1467 * reset condition flags on the device.
1468 * @request: This parameter is the isci_device object.
1469 *
1470 * true if there is a reset pending for the device.
1471 */
1472void isci_device_clear_reset_pending(struct isci_host *ihost, struct isci_remote_device *idev)
1473{
1474 struct isci_request *isci_request;
1475 struct isci_request *tmp_req;
1476 unsigned long flags = 0;
1477
1478 dev_dbg(&ihost->pdev->dev, "%s: idev=%p, ihost=%p\n",
1479 __func__, idev, ihost);
1480
1481 spin_lock_irqsave(&ihost->scic_lock, flags);
1482
1483 /* Clear reset pending on all pending requests. */
1484 list_for_each_entry_safe(isci_request, tmp_req,
1485 &idev->reqs_in_process, dev_node) {
1486 dev_dbg(&ihost->pdev->dev, "%s: idev = %p request = %p\n",
1487 __func__, idev, isci_request);
1488
1489 if (isci_request->ttype == io_task) {
1490
1491 unsigned long flags2;
1492 struct sas_task *task = isci_request_access_task(
1493 isci_request);
1494
1495 spin_lock_irqsave(&task->task_state_lock, flags2);
1496 task->task_state_flags &= ~SAS_TASK_NEED_DEV_RESET;
1497 spin_unlock_irqrestore(&task->task_state_lock, flags2);
1498 }
1499 }
1500 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1501}
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h
new file mode 100644
index 000000000000..57ccfc3d6ad3
--- /dev/null
+++ b/drivers/scsi/isci/remote_device.h
@@ -0,0 +1,352 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#ifndef _ISCI_REMOTE_DEVICE_H_
57#define _ISCI_REMOTE_DEVICE_H_
58#include <scsi/libsas.h>
59#include <linux/kref.h>
60#include "scu_remote_node_context.h"
61#include "remote_node_context.h"
62#include "port.h"
63
64enum sci_remote_device_not_ready_reason_code {
65 SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED,
66 SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED,
67 SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED,
68 SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED,
69 SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED,
70 SCIC_REMOTE_DEVICE_NOT_READY_REASON_CODE_MAX
71};
72
73/**
74 * isci_remote_device - isci representation of a sas expander / end point
75 * @device_port_width: hw setting for number of simultaneous connections
76 * @connection_rate: per-taskcontext connection rate for this device
77 * @working_request: SATA requests have no tag we for unaccelerated
78 * protocols we need a method to associate unsolicited
79 * frames with a pending request
80 */
81struct isci_remote_device {
82 #define IDEV_START_PENDING 0
83 #define IDEV_STOP_PENDING 1
84 #define IDEV_ALLOCATED 2
85 #define IDEV_EH 3
86 #define IDEV_GONE 4
87 #define IDEV_IO_READY 5
88 #define IDEV_IO_NCQERROR 6
89 unsigned long flags;
90 struct kref kref;
91 struct isci_port *isci_port;
92 struct domain_device *domain_dev;
93 struct list_head node;
94 struct list_head reqs_in_process;
95 struct sci_base_state_machine sm;
96 u32 device_port_width;
97 enum sas_linkrate connection_rate;
98 bool is_direct_attached;
99 struct isci_port *owning_port;
100 struct sci_remote_node_context rnc;
101 /* XXX unify with device reference counting and delete */
102 u32 started_request_count;
103 struct isci_request *working_request;
104 u32 not_ready_reason;
105};
106
107#define ISCI_REMOTE_DEVICE_START_TIMEOUT 5000
108
109/* device reference routines must be called under sci_lock */
110static inline struct isci_remote_device *isci_lookup_device(struct domain_device *dev)
111{
112 struct isci_remote_device *idev = dev->lldd_dev;
113
114 if (idev && !test_bit(IDEV_GONE, &idev->flags)) {
115 kref_get(&idev->kref);
116 return idev;
117 }
118
119 return NULL;
120}
121
122void isci_remote_device_release(struct kref *kref);
123static inline void isci_put_device(struct isci_remote_device *idev)
124{
125 if (idev)
126 kref_put(&idev->kref, isci_remote_device_release);
127}
128
129enum sci_status isci_remote_device_stop(struct isci_host *ihost,
130 struct isci_remote_device *idev);
131void isci_remote_device_nuke_requests(struct isci_host *ihost,
132 struct isci_remote_device *idev);
133void isci_remote_device_gone(struct domain_device *domain_dev);
134int isci_remote_device_found(struct domain_device *domain_dev);
135bool isci_device_is_reset_pending(struct isci_host *ihost,
136 struct isci_remote_device *idev);
137void isci_device_clear_reset_pending(struct isci_host *ihost,
138 struct isci_remote_device *idev);
139/**
140 * sci_remote_device_stop() - This method will stop both transmission and
141 * reception of link activity for the supplied remote device. This method
142 * disables normal IO requests from flowing through to the remote device.
143 * @remote_device: This parameter specifies the device to be stopped.
144 * @timeout: This parameter specifies the number of milliseconds in which the
145 * stop operation should complete.
146 *
147 * An indication of whether the device was successfully stopped. SCI_SUCCESS
148 * This value is returned if the transmission and reception for the device was
149 * successfully stopped.
150 */
151enum sci_status sci_remote_device_stop(
152 struct isci_remote_device *idev,
153 u32 timeout);
154
155/**
156 * sci_remote_device_reset() - This method will reset the device making it
157 * ready for operation. This method must be called anytime the device is
158 * reset either through a SMP phy control or a port hard reset request.
159 * @remote_device: This parameter specifies the device to be reset.
160 *
161 * This method does not actually cause the device hardware to be reset. This
162 * method resets the software object so that it will be operational after a
163 * device hardware reset completes. An indication of whether the device reset
164 * was accepted. SCI_SUCCESS This value is returned if the device reset is
165 * started.
166 */
167enum sci_status sci_remote_device_reset(
168 struct isci_remote_device *idev);
169
170/**
171 * sci_remote_device_reset_complete() - This method informs the device object
172 * that the reset operation is complete and the device can resume operation
173 * again.
174 * @remote_device: This parameter specifies the device which is to be informed
175 * of the reset complete operation.
176 *
177 * An indication that the device is resuming operation. SCI_SUCCESS the device
178 * is resuming operation.
179 */
180enum sci_status sci_remote_device_reset_complete(
181 struct isci_remote_device *idev);
182
183/**
184 * enum sci_remote_device_states - This enumeration depicts all the states
185 * for the common remote device state machine.
186 *
187 *
188 */
189enum sci_remote_device_states {
190 /**
191 * Simply the initial state for the base remote device state machine.
192 */
193 SCI_DEV_INITIAL,
194
195 /**
196 * This state indicates that the remote device has successfully been
197 * stopped. In this state no new IO operations are permitted.
198 * This state is entered from the INITIAL state.
199 * This state is entered from the STOPPING state.
200 */
201 SCI_DEV_STOPPED,
202
203 /**
204 * This state indicates the the remote device is in the process of
205 * becoming ready (i.e. starting). In this state no new IO operations
206 * are permitted.
207 * This state is entered from the STOPPED state.
208 */
209 SCI_DEV_STARTING,
210
211 /**
212 * This state indicates the remote device is now ready. Thus, the user
213 * is able to perform IO operations on the remote device.
214 * This state is entered from the STARTING state.
215 */
216 SCI_DEV_READY,
217
218 /**
219 * This is the idle substate for the stp remote device. When there are no
220 * active IO for the device it is is in this state.
221 */
222 SCI_STP_DEV_IDLE,
223
224 /**
225 * This is the command state for for the STP remote device. This state is
226 * entered when the device is processing a non-NCQ command. The device object
227 * will fail any new start IO requests until this command is complete.
228 */
229 SCI_STP_DEV_CMD,
230
231 /**
232 * This is the NCQ state for the STP remote device. This state is entered
233 * when the device is processing an NCQ reuqest. It will remain in this state
234 * so long as there is one or more NCQ requests being processed.
235 */
236 SCI_STP_DEV_NCQ,
237
238 /**
239 * This is the NCQ error state for the STP remote device. This state is
240 * entered when an SDB error FIS is received by the device object while in the
241 * NCQ state. The device object will only accept a READ LOG command while in
242 * this state.
243 */
244 SCI_STP_DEV_NCQ_ERROR,
245
246 /**
247 * This is the READY substate indicates the device is waiting for the RESET task
248 * coming to be recovered from certain hardware specific error.
249 */
250 SCI_STP_DEV_AWAIT_RESET,
251
252 /**
253 * This is the ready operational substate for the remote device. This is the
254 * normal operational state for a remote device.
255 */
256 SCI_SMP_DEV_IDLE,
257
258 /**
259 * This is the suspended state for the remote device. This is the state that
260 * the device is placed in when a RNC suspend is received by the SCU hardware.
261 */
262 SCI_SMP_DEV_CMD,
263
264 /**
265 * This state indicates that the remote device is in the process of
266 * stopping. In this state no new IO operations are permitted, but
267 * existing IO operations are allowed to complete.
268 * This state is entered from the READY state.
269 * This state is entered from the FAILED state.
270 */
271 SCI_DEV_STOPPING,
272
273 /**
274 * This state indicates that the remote device has failed.
275 * In this state no new IO operations are permitted.
276 * This state is entered from the INITIALIZING state.
277 * This state is entered from the READY state.
278 */
279 SCI_DEV_FAILED,
280
281 /**
282 * This state indicates the device is being reset.
283 * In this state no new IO operations are permitted.
284 * This state is entered from the READY state.
285 */
286 SCI_DEV_RESETTING,
287
288 /**
289 * Simply the final state for the base remote device state machine.
290 */
291 SCI_DEV_FINAL,
292};
293
294static inline struct isci_remote_device *rnc_to_dev(struct sci_remote_node_context *rnc)
295{
296 struct isci_remote_device *idev;
297
298 idev = container_of(rnc, typeof(*idev), rnc);
299
300 return idev;
301}
302
303static inline bool dev_is_expander(struct domain_device *dev)
304{
305 return dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV;
306}
307
308static inline void sci_remote_device_decrement_request_count(struct isci_remote_device *idev)
309{
310 /* XXX delete this voodoo when converting to the top-level device
311 * reference count
312 */
313 if (WARN_ONCE(idev->started_request_count == 0,
314 "%s: tried to decrement started_request_count past 0!?",
315 __func__))
316 /* pass */;
317 else
318 idev->started_request_count--;
319}
320
321enum sci_status sci_remote_device_frame_handler(
322 struct isci_remote_device *idev,
323 u32 frame_index);
324
325enum sci_status sci_remote_device_event_handler(
326 struct isci_remote_device *idev,
327 u32 event_code);
328
329enum sci_status sci_remote_device_start_io(
330 struct isci_host *ihost,
331 struct isci_remote_device *idev,
332 struct isci_request *ireq);
333
334enum sci_status sci_remote_device_start_task(
335 struct isci_host *ihost,
336 struct isci_remote_device *idev,
337 struct isci_request *ireq);
338
339enum sci_status sci_remote_device_complete_io(
340 struct isci_host *ihost,
341 struct isci_remote_device *idev,
342 struct isci_request *ireq);
343
344enum sci_status sci_remote_device_suspend(
345 struct isci_remote_device *idev,
346 u32 suspend_type);
347
348void sci_remote_device_post_request(
349 struct isci_remote_device *idev,
350 u32 request);
351
352#endif /* !defined(_ISCI_REMOTE_DEVICE_H_) */
diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c
new file mode 100644
index 000000000000..748e8339d1ec
--- /dev/null
+++ b/drivers/scsi/isci/remote_node_context.c
@@ -0,0 +1,627 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#include "host.h"
57#include "isci.h"
58#include "remote_device.h"
59#include "remote_node_context.h"
60#include "scu_event_codes.h"
61#include "scu_task_context.h"
62
63
64/**
65 *
66 * @sci_rnc: The RNC for which the is posted request is being made.
67 *
68 * This method will return true if the RNC is not in the initial state. In all
69 * other states the RNC is considered active and this will return true. The
70 * destroy request of the state machine drives the RNC back to the initial
71 * state. If the state machine changes then this routine will also have to be
72 * changed. bool true if the state machine is not in the initial state false if
73 * the state machine is in the initial state
74 */
75
76/**
77 *
78 * @sci_rnc: The state of the remote node context object to check.
79 *
80 * This method will return true if the remote node context is in a READY state
81 * otherwise it will return false bool true if the remote node context is in
82 * the ready state. false if the remote node context is not in the ready state.
83 */
84bool sci_remote_node_context_is_ready(
85 struct sci_remote_node_context *sci_rnc)
86{
87 u32 current_state = sci_rnc->sm.current_state_id;
88
89 if (current_state == SCI_RNC_READY) {
90 return true;
91 }
92
93 return false;
94}
95
96static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id)
97{
98 if (id < ihost->remote_node_entries &&
99 ihost->device_table[id])
100 return &ihost->remote_node_context_table[id];
101
102 return NULL;
103}
104
105static void sci_remote_node_context_construct_buffer(struct sci_remote_node_context *sci_rnc)
106{
107 struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
108 struct domain_device *dev = idev->domain_dev;
109 int rni = sci_rnc->remote_node_index;
110 union scu_remote_node_context *rnc;
111 struct isci_host *ihost;
112 __le64 sas_addr;
113
114 ihost = idev->owning_port->owning_controller;
115 rnc = sci_rnc_by_id(ihost, rni);
116
117 memset(rnc, 0, sizeof(union scu_remote_node_context)
118 * sci_remote_device_node_count(idev));
119
120 rnc->ssp.remote_node_index = rni;
121 rnc->ssp.remote_node_port_width = idev->device_port_width;
122 rnc->ssp.logical_port_index = idev->owning_port->physical_port_index;
123
124 /* sas address is __be64, context ram format is __le64 */
125 sas_addr = cpu_to_le64(SAS_ADDR(dev->sas_addr));
126 rnc->ssp.remote_sas_address_hi = upper_32_bits(sas_addr);
127 rnc->ssp.remote_sas_address_lo = lower_32_bits(sas_addr);
128
129 rnc->ssp.nexus_loss_timer_enable = true;
130 rnc->ssp.check_bit = false;
131 rnc->ssp.is_valid = false;
132 rnc->ssp.is_remote_node_context = true;
133 rnc->ssp.function_number = 0;
134
135 rnc->ssp.arbitration_wait_time = 0;
136
137 if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
138 rnc->ssp.connection_occupancy_timeout =
139 ihost->user_parameters.stp_max_occupancy_timeout;
140 rnc->ssp.connection_inactivity_timeout =
141 ihost->user_parameters.stp_inactivity_timeout;
142 } else {
143 rnc->ssp.connection_occupancy_timeout =
144 ihost->user_parameters.ssp_max_occupancy_timeout;
145 rnc->ssp.connection_inactivity_timeout =
146 ihost->user_parameters.ssp_inactivity_timeout;
147 }
148
149 rnc->ssp.initial_arbitration_wait_time = 0;
150
151 /* Open Address Frame Parameters */
152 rnc->ssp.oaf_connection_rate = idev->connection_rate;
153 rnc->ssp.oaf_features = 0;
154 rnc->ssp.oaf_source_zone_group = 0;
155 rnc->ssp.oaf_more_compatibility_features = 0;
156}
157
158/**
159 *
160 * @sci_rnc:
161 * @callback:
162 * @callback_parameter:
163 *
164 * This method will setup the remote node context object so it will transition
165 * to its ready state. If the remote node context is already setup to
166 * transition to its final state then this function does nothing. none
167 */
168static void sci_remote_node_context_setup_to_resume(
169 struct sci_remote_node_context *sci_rnc,
170 scics_sds_remote_node_context_callback callback,
171 void *callback_parameter)
172{
173 if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL) {
174 sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY;
175 sci_rnc->user_callback = callback;
176 sci_rnc->user_cookie = callback_parameter;
177 }
178}
179
180static void sci_remote_node_context_setup_to_destory(
181 struct sci_remote_node_context *sci_rnc,
182 scics_sds_remote_node_context_callback callback,
183 void *callback_parameter)
184{
185 sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL;
186 sci_rnc->user_callback = callback;
187 sci_rnc->user_cookie = callback_parameter;
188}
189
190/**
191 *
192 *
193 * This method just calls the user callback function and then resets the
194 * callback.
195 */
196static void sci_remote_node_context_notify_user(
197 struct sci_remote_node_context *rnc)
198{
199 if (rnc->user_callback != NULL) {
200 (*rnc->user_callback)(rnc->user_cookie);
201
202 rnc->user_callback = NULL;
203 rnc->user_cookie = NULL;
204 }
205}
206
207static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc)
208{
209 if (rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY)
210 sci_remote_node_context_resume(rnc, rnc->user_callback,
211 rnc->user_cookie);
212}
213
214static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc)
215{
216 union scu_remote_node_context *rnc_buffer;
217 struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
218 struct domain_device *dev = idev->domain_dev;
219 struct isci_host *ihost = idev->owning_port->owning_controller;
220
221 rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
222
223 rnc_buffer->ssp.is_valid = true;
224
225 if (!idev->is_direct_attached &&
226 (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))) {
227 sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96);
228 } else {
229 sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32);
230
231 if (idev->is_direct_attached)
232 sci_port_setup_transports(idev->owning_port,
233 sci_rnc->remote_node_index);
234 }
235}
236
237static void sci_remote_node_context_invalidate_context_buffer(struct sci_remote_node_context *sci_rnc)
238{
239 union scu_remote_node_context *rnc_buffer;
240 struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
241 struct isci_host *ihost = idev->owning_port->owning_controller;
242
243 rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
244
245 rnc_buffer->ssp.is_valid = false;
246
247 sci_remote_device_post_request(rnc_to_dev(sci_rnc),
248 SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE);
249}
250
251static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm)
252{
253 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
254
255 /* Check to see if we have gotten back to the initial state because
256 * someone requested to destroy the remote node context object.
257 */
258 if (sm->previous_state_id == SCI_RNC_INVALIDATING) {
259 rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
260 sci_remote_node_context_notify_user(rnc);
261 }
262}
263
264static void sci_remote_node_context_posting_state_enter(struct sci_base_state_machine *sm)
265{
266 struct sci_remote_node_context *sci_rnc = container_of(sm, typeof(*sci_rnc), sm);
267
268 sci_remote_node_context_validate_context_buffer(sci_rnc);
269}
270
271static void sci_remote_node_context_invalidating_state_enter(struct sci_base_state_machine *sm)
272{
273 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
274
275 sci_remote_node_context_invalidate_context_buffer(rnc);
276}
277
278static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_machine *sm)
279{
280 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
281 struct isci_remote_device *idev;
282 struct domain_device *dev;
283
284 idev = rnc_to_dev(rnc);
285 dev = idev->domain_dev;
286
287 /*
288 * For direct attached SATA devices we need to clear the TLCR
289 * NCQ to TCi tag mapping on the phy and in cases where we
290 * resume because of a target reset we also need to update
291 * the STPTLDARNI register with the RNi of the device
292 */
293 if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) &&
294 idev->is_direct_attached)
295 sci_port_setup_transports(idev->owning_port,
296 rnc->remote_node_index);
297
298 sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME);
299}
300
301static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm)
302{
303 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
304
305 rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
306
307 if (rnc->user_callback)
308 sci_remote_node_context_notify_user(rnc);
309}
310
311static void sci_remote_node_context_tx_suspended_state_enter(struct sci_base_state_machine *sm)
312{
313 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
314
315 sci_remote_node_context_continue_state_transitions(rnc);
316}
317
318static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm)
319{
320 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
321
322 sci_remote_node_context_continue_state_transitions(rnc);
323}
324
325static const struct sci_base_state sci_remote_node_context_state_table[] = {
326 [SCI_RNC_INITIAL] = {
327 .enter_state = sci_remote_node_context_initial_state_enter,
328 },
329 [SCI_RNC_POSTING] = {
330 .enter_state = sci_remote_node_context_posting_state_enter,
331 },
332 [SCI_RNC_INVALIDATING] = {
333 .enter_state = sci_remote_node_context_invalidating_state_enter,
334 },
335 [SCI_RNC_RESUMING] = {
336 .enter_state = sci_remote_node_context_resuming_state_enter,
337 },
338 [SCI_RNC_READY] = {
339 .enter_state = sci_remote_node_context_ready_state_enter,
340 },
341 [SCI_RNC_TX_SUSPENDED] = {
342 .enter_state = sci_remote_node_context_tx_suspended_state_enter,
343 },
344 [SCI_RNC_TX_RX_SUSPENDED] = {
345 .enter_state = sci_remote_node_context_tx_rx_suspended_state_enter,
346 },
347 [SCI_RNC_AWAIT_SUSPENSION] = { },
348};
349
350void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
351 u16 remote_node_index)
352{
353 memset(rnc, 0, sizeof(struct sci_remote_node_context));
354
355 rnc->remote_node_index = remote_node_index;
356 rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
357
358 sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL);
359}
360
361enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc,
362 u32 event_code)
363{
364 enum scis_sds_remote_node_context_states state;
365
366 state = sci_rnc->sm.current_state_id;
367 switch (state) {
368 case SCI_RNC_POSTING:
369 switch (scu_get_event_code(event_code)) {
370 case SCU_EVENT_POST_RNC_COMPLETE:
371 sci_change_state(&sci_rnc->sm, SCI_RNC_READY);
372 break;
373 default:
374 goto out;
375 }
376 break;
377 case SCI_RNC_INVALIDATING:
378 if (scu_get_event_code(event_code) == SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE) {
379 if (sci_rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL)
380 state = SCI_RNC_INITIAL;
381 else
382 state = SCI_RNC_POSTING;
383 sci_change_state(&sci_rnc->sm, state);
384 } else {
385 switch (scu_get_event_type(event_code)) {
386 case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
387 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
388 /* We really dont care if the hardware is going to suspend
389 * the device since it's being invalidated anyway */
390 dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
391 "%s: SCIC Remote Node Context 0x%p was "
392 "suspeneded by hardware while being "
393 "invalidated.\n", __func__, sci_rnc);
394 break;
395 default:
396 goto out;
397 }
398 }
399 break;
400 case SCI_RNC_RESUMING:
401 if (scu_get_event_code(event_code) == SCU_EVENT_POST_RCN_RELEASE) {
402 sci_change_state(&sci_rnc->sm, SCI_RNC_READY);
403 } else {
404 switch (scu_get_event_type(event_code)) {
405 case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
406 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
407 /* We really dont care if the hardware is going to suspend
408 * the device since it's being resumed anyway */
409 dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
410 "%s: SCIC Remote Node Context 0x%p was "
411 "suspeneded by hardware while being resumed.\n",
412 __func__, sci_rnc);
413 break;
414 default:
415 goto out;
416 }
417 }
418 break;
419 case SCI_RNC_READY:
420 switch (scu_get_event_type(event_code)) {
421 case SCU_EVENT_TL_RNC_SUSPEND_TX:
422 sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED);
423 sci_rnc->suspension_code = scu_get_event_specifier(event_code);
424 break;
425 case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
426 sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED);
427 sci_rnc->suspension_code = scu_get_event_specifier(event_code);
428 break;
429 default:
430 goto out;
431 }
432 break;
433 case SCI_RNC_AWAIT_SUSPENSION:
434 switch (scu_get_event_type(event_code)) {
435 case SCU_EVENT_TL_RNC_SUSPEND_TX:
436 sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED);
437 sci_rnc->suspension_code = scu_get_event_specifier(event_code);
438 break;
439 case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
440 sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED);
441 sci_rnc->suspension_code = scu_get_event_specifier(event_code);
442 break;
443 default:
444 goto out;
445 }
446 break;
447 default:
448 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
449 "%s: invalid state %d\n", __func__, state);
450 return SCI_FAILURE_INVALID_STATE;
451 }
452 return SCI_SUCCESS;
453
454 out:
455 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
456 "%s: code: %#x state: %d\n", __func__, event_code, state);
457 return SCI_FAILURE;
458
459}
460
461enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc,
462 scics_sds_remote_node_context_callback cb_fn,
463 void *cb_p)
464{
465 enum scis_sds_remote_node_context_states state;
466
467 state = sci_rnc->sm.current_state_id;
468 switch (state) {
469 case SCI_RNC_INVALIDATING:
470 sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p);
471 return SCI_SUCCESS;
472 case SCI_RNC_POSTING:
473 case SCI_RNC_RESUMING:
474 case SCI_RNC_READY:
475 case SCI_RNC_TX_SUSPENDED:
476 case SCI_RNC_TX_RX_SUSPENDED:
477 case SCI_RNC_AWAIT_SUSPENSION:
478 sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p);
479 sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
480 return SCI_SUCCESS;
481 case SCI_RNC_INITIAL:
482 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
483 "%s: invalid state %d\n", __func__, state);
484 /* We have decided that the destruct request on the remote node context
485 * can not fail since it is either in the initial/destroyed state or is
486 * can be destroyed.
487 */
488 return SCI_SUCCESS;
489 default:
490 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
491 "%s: invalid state %d\n", __func__, state);
492 return SCI_FAILURE_INVALID_STATE;
493 }
494}
495
496enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc,
497 u32 suspend_type,
498 scics_sds_remote_node_context_callback cb_fn,
499 void *cb_p)
500{
501 enum scis_sds_remote_node_context_states state;
502
503 state = sci_rnc->sm.current_state_id;
504 if (state != SCI_RNC_READY) {
505 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
506 "%s: invalid state %d\n", __func__, state);
507 return SCI_FAILURE_INVALID_STATE;
508 }
509
510 sci_rnc->user_callback = cb_fn;
511 sci_rnc->user_cookie = cb_p;
512 sci_rnc->suspension_code = suspend_type;
513
514 if (suspend_type == SCI_SOFTWARE_SUSPENSION) {
515 sci_remote_device_post_request(rnc_to_dev(sci_rnc),
516 SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX);
517 }
518
519 sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION);
520 return SCI_SUCCESS;
521}
522
523enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc,
524 scics_sds_remote_node_context_callback cb_fn,
525 void *cb_p)
526{
527 enum scis_sds_remote_node_context_states state;
528
529 state = sci_rnc->sm.current_state_id;
530 switch (state) {
531 case SCI_RNC_INITIAL:
532 if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
533 return SCI_FAILURE_INVALID_STATE;
534
535 sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
536 sci_remote_node_context_construct_buffer(sci_rnc);
537 sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING);
538 return SCI_SUCCESS;
539 case SCI_RNC_POSTING:
540 case SCI_RNC_INVALIDATING:
541 case SCI_RNC_RESUMING:
542 if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY)
543 return SCI_FAILURE_INVALID_STATE;
544
545 sci_rnc->user_callback = cb_fn;
546 sci_rnc->user_cookie = cb_p;
547 return SCI_SUCCESS;
548 case SCI_RNC_TX_SUSPENDED: {
549 struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
550 struct domain_device *dev = idev->domain_dev;
551
552 sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
553
554 /* TODO: consider adding a resume action of NONE, INVALIDATE, WRITE_TLCR */
555 if (dev->dev_type == SAS_END_DEV || dev_is_expander(dev))
556 sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
557 else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
558 if (idev->is_direct_attached) {
559 /* @todo Fix this since I am being silly in writing to the STPTLDARNI register. */
560 sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
561 } else {
562 sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
563 }
564 } else
565 return SCI_FAILURE;
566 return SCI_SUCCESS;
567 }
568 case SCI_RNC_TX_RX_SUSPENDED:
569 sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
570 sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
571 return SCI_FAILURE_INVALID_STATE;
572 case SCI_RNC_AWAIT_SUSPENSION:
573 sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
574 return SCI_SUCCESS;
575 default:
576 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
577 "%s: invalid state %d\n", __func__, state);
578 return SCI_FAILURE_INVALID_STATE;
579 }
580}
581
582enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc,
583 struct isci_request *ireq)
584{
585 enum scis_sds_remote_node_context_states state;
586
587 state = sci_rnc->sm.current_state_id;
588
589 switch (state) {
590 case SCI_RNC_READY:
591 return SCI_SUCCESS;
592 case SCI_RNC_TX_SUSPENDED:
593 case SCI_RNC_TX_RX_SUSPENDED:
594 case SCI_RNC_AWAIT_SUSPENSION:
595 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
596 "%s: invalid state %d\n", __func__, state);
597 return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
598 default:
599 break;
600 }
601 dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
602 "%s: requested to start IO while still resuming, %d\n",
603 __func__, state);
604 return SCI_FAILURE_INVALID_STATE;
605}
606
607enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc,
608 struct isci_request *ireq)
609{
610 enum scis_sds_remote_node_context_states state;
611
612 state = sci_rnc->sm.current_state_id;
613 switch (state) {
614 case SCI_RNC_RESUMING:
615 case SCI_RNC_READY:
616 case SCI_RNC_AWAIT_SUSPENSION:
617 return SCI_SUCCESS;
618 case SCI_RNC_TX_SUSPENDED:
619 case SCI_RNC_TX_RX_SUSPENDED:
620 sci_remote_node_context_resume(sci_rnc, NULL, NULL);
621 return SCI_SUCCESS;
622 default:
623 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
624 "%s: invalid state %d\n", __func__, state);
625 return SCI_FAILURE_INVALID_STATE;
626 }
627}
diff --git a/drivers/scsi/isci/remote_node_context.h b/drivers/scsi/isci/remote_node_context.h
new file mode 100644
index 000000000000..41580ad12520
--- /dev/null
+++ b/drivers/scsi/isci/remote_node_context.h
@@ -0,0 +1,224 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#ifndef _SCIC_SDS_REMOTE_NODE_CONTEXT_H_
57#define _SCIC_SDS_REMOTE_NODE_CONTEXT_H_
58
59/**
60 * This file contains the structures, constants, and prototypes associated with
61 * the remote node context in the silicon. It exists to model and manage
62 * the remote node context in the silicon.
63 *
64 *
65 */
66
67#include "isci.h"
68
69/**
70 *
71 *
72 * This constant represents an invalid remote device id, it is used to program
73 * the STPDARNI register so the driver knows when it has received a SIGNATURE
74 * FIS from the SCU.
75 */
76#define SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX 0x0FFF
77
78#define SCU_HARDWARE_SUSPENSION (0)
79#define SCI_SOFTWARE_SUSPENSION (1)
80
81struct isci_request;
82struct isci_remote_device;
83struct sci_remote_node_context;
84
85typedef void (*scics_sds_remote_node_context_callback)(void *);
86
87/**
88 * This is the enumeration of the remote node context states.
89 */
90enum scis_sds_remote_node_context_states {
91 /**
92 * This state is the initial state for a remote node context. On a resume
93 * request the remote node context will transition to the posting state.
94 */
95 SCI_RNC_INITIAL,
96
97 /**
98 * This is a transition state that posts the RNi to the hardware. Once the RNC
99 * is posted the remote node context will be made ready.
100 */
101 SCI_RNC_POSTING,
102
103 /**
104 * This is a transition state that will post an RNC invalidate to the
105 * hardware. Once the invalidate is complete the remote node context will
106 * transition to the posting state.
107 */
108 SCI_RNC_INVALIDATING,
109
110 /**
111 * This is a transition state that will post an RNC resume to the hardare.
112 * Once the event notification of resume complete is received the remote node
113 * context will transition to the ready state.
114 */
115 SCI_RNC_RESUMING,
116
117 /**
118 * This is the state that the remote node context must be in to accept io
119 * request operations.
120 */
121 SCI_RNC_READY,
122
123 /**
124 * This is the state that the remote node context transitions to when it gets
125 * a TX suspend notification from the hardware.
126 */
127 SCI_RNC_TX_SUSPENDED,
128
129 /**
130 * This is the state that the remote node context transitions to when it gets
131 * a TX RX suspend notification from the hardware.
132 */
133 SCI_RNC_TX_RX_SUSPENDED,
134
135 /**
136 * This state is a wait state for the remote node context that waits for a
137 * suspend notification from the hardware. This state is entered when either
138 * there is a request to supend the remote node context or when there is a TC
139 * completion where the remote node will be suspended by the hardware.
140 */
141 SCI_RNC_AWAIT_SUSPENSION
142};
143
144/**
145 *
146 *
147 * This enumeration is used to define the end destination state for the remote
148 * node context.
149 */
150enum sci_remote_node_context_destination_state {
151 SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED,
152 SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY,
153 SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL
154};
155
156/**
157 * struct sci_remote_node_context - This structure contains the data
158 * associated with the remote node context object. The remote node context
159 * (RNC) object models the the remote device information necessary to manage
160 * the silicon RNC.
161 */
162struct sci_remote_node_context {
163 /**
164 * This field indicates the remote node index (RNI) associated with
165 * this RNC.
166 */
167 u16 remote_node_index;
168
169 /**
170 * This field is the recored suspension code or the reason for the remote node
171 * context suspension.
172 */
173 u32 suspension_code;
174
175 /**
176 * This field is true if the remote node context is resuming from its current
177 * state. This can cause an automatic resume on receiving a suspension
178 * notification.
179 */
180 enum sci_remote_node_context_destination_state destination_state;
181
182 /**
183 * This field contains the callback function that the user requested to be
184 * called when the requested state transition is complete.
185 */
186 scics_sds_remote_node_context_callback user_callback;
187
188 /**
189 * This field contains the parameter that is called when the user requested
190 * state transition is completed.
191 */
192 void *user_cookie;
193
194 /**
195 * This field contains the data for the object's state machine.
196 */
197 struct sci_base_state_machine sm;
198};
199
200void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
201 u16 remote_node_index);
202
203
204bool sci_remote_node_context_is_ready(
205 struct sci_remote_node_context *sci_rnc);
206
207enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc,
208 u32 event_code);
209enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc,
210 scics_sds_remote_node_context_callback callback,
211 void *callback_parameter);
212enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc,
213 u32 suspend_type,
214 scics_sds_remote_node_context_callback cb_fn,
215 void *cb_p);
216enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc,
217 scics_sds_remote_node_context_callback cb_fn,
218 void *cb_p);
219enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc,
220 struct isci_request *ireq);
221enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc,
222 struct isci_request *ireq);
223
224#endif /* _SCIC_SDS_REMOTE_NODE_CONTEXT_H_ */
diff --git a/drivers/scsi/isci/remote_node_table.c b/drivers/scsi/isci/remote_node_table.c
new file mode 100644
index 000000000000..301b3141945e
--- /dev/null
+++ b/drivers/scsi/isci/remote_node_table.c
@@ -0,0 +1,598 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56/**
57 * This file contains the implementation of the SCIC_SDS_REMOTE_NODE_TABLE
58 * public, protected, and private methods.
59 *
60 *
61 */
62#include "remote_node_table.h"
63#include "remote_node_context.h"
64
65/**
66 *
67 * @remote_node_table: This is the remote node index table from which the
68 * selection will be made.
69 * @group_table_index: This is the index to the group table from which to
70 * search for an available selection.
71 *
72 * This routine will find the bit position in absolute bit terms of the next 32
73 * + bit position. If there are available bits in the first u32 then it is
74 * just bit position. u32 This is the absolute bit position for an available
75 * group.
76 */
77static u32 sci_remote_node_table_get_group_index(
78 struct sci_remote_node_table *remote_node_table,
79 u32 group_table_index)
80{
81 u32 dword_index;
82 u32 *group_table;
83 u32 bit_index;
84
85 group_table = remote_node_table->remote_node_groups[group_table_index];
86
87 for (dword_index = 0; dword_index < remote_node_table->group_array_size; dword_index++) {
88 if (group_table[dword_index] != 0) {
89 for (bit_index = 0; bit_index < 32; bit_index++) {
90 if ((group_table[dword_index] & (1 << bit_index)) != 0) {
91 return (dword_index * 32) + bit_index;
92 }
93 }
94 }
95 }
96
97 return SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX;
98}
99
100/**
101 *
102 * @out]: remote_node_table This the remote node table in which to clear the
103 * selector.
104 * @set_index: This is the remote node selector in which the change will be
105 * made.
106 * @group_index: This is the bit index in the table to be modified.
107 *
108 * This method will clear the group index entry in the specified group index
109 * table. none
110 */
111static void sci_remote_node_table_clear_group_index(
112 struct sci_remote_node_table *remote_node_table,
113 u32 group_table_index,
114 u32 group_index)
115{
116 u32 dword_index;
117 u32 bit_index;
118 u32 *group_table;
119
120 BUG_ON(group_table_index >= SCU_STP_REMOTE_NODE_COUNT);
121 BUG_ON(group_index >= (u32)(remote_node_table->group_array_size * 32));
122
123 dword_index = group_index / 32;
124 bit_index = group_index % 32;
125 group_table = remote_node_table->remote_node_groups[group_table_index];
126
127 group_table[dword_index] = group_table[dword_index] & ~(1 << bit_index);
128}
129
130/**
131 *
132 * @out]: remote_node_table This the remote node table in which to set the
133 * selector.
134 * @group_table_index: This is the remote node selector in which the change
135 * will be made.
136 * @group_index: This is the bit position in the table to be modified.
137 *
138 * This method will set the group index bit entry in the specified gropu index
139 * table. none
140 */
141static void sci_remote_node_table_set_group_index(
142 struct sci_remote_node_table *remote_node_table,
143 u32 group_table_index,
144 u32 group_index)
145{
146 u32 dword_index;
147 u32 bit_index;
148 u32 *group_table;
149
150 BUG_ON(group_table_index >= SCU_STP_REMOTE_NODE_COUNT);
151 BUG_ON(group_index >= (u32)(remote_node_table->group_array_size * 32));
152
153 dword_index = group_index / 32;
154 bit_index = group_index % 32;
155 group_table = remote_node_table->remote_node_groups[group_table_index];
156
157 group_table[dword_index] = group_table[dword_index] | (1 << bit_index);
158}
159
160/**
161 *
162 * @out]: remote_node_table This is the remote node table in which to modify
163 * the remote node availability.
164 * @remote_node_index: This is the remote node index that is being returned to
165 * the table.
166 *
167 * This method will set the remote to available in the remote node allocation
168 * table. none
169 */
170static void sci_remote_node_table_set_node_index(
171 struct sci_remote_node_table *remote_node_table,
172 u32 remote_node_index)
173{
174 u32 dword_location;
175 u32 dword_remainder;
176 u32 slot_normalized;
177 u32 slot_position;
178
179 BUG_ON(
180 (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
181 <= (remote_node_index / SCU_STP_REMOTE_NODE_COUNT)
182 );
183
184 dword_location = remote_node_index / SCIC_SDS_REMOTE_NODES_PER_DWORD;
185 dword_remainder = remote_node_index % SCIC_SDS_REMOTE_NODES_PER_DWORD;
186 slot_normalized = (dword_remainder / SCU_STP_REMOTE_NODE_COUNT) * sizeof(u32);
187 slot_position = remote_node_index % SCU_STP_REMOTE_NODE_COUNT;
188
189 remote_node_table->available_remote_nodes[dword_location] |=
190 1 << (slot_normalized + slot_position);
191}
192
193/**
194 *
195 * @out]: remote_node_table This is the remote node table from which to clear
196 * the available remote node bit.
197 * @remote_node_index: This is the remote node index which is to be cleared
198 * from the table.
199 *
200 * This method clears the remote node index from the table of available remote
201 * nodes. none
202 */
203static void sci_remote_node_table_clear_node_index(
204 struct sci_remote_node_table *remote_node_table,
205 u32 remote_node_index)
206{
207 u32 dword_location;
208 u32 dword_remainder;
209 u32 slot_position;
210 u32 slot_normalized;
211
212 BUG_ON(
213 (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
214 <= (remote_node_index / SCU_STP_REMOTE_NODE_COUNT)
215 );
216
217 dword_location = remote_node_index / SCIC_SDS_REMOTE_NODES_PER_DWORD;
218 dword_remainder = remote_node_index % SCIC_SDS_REMOTE_NODES_PER_DWORD;
219 slot_normalized = (dword_remainder / SCU_STP_REMOTE_NODE_COUNT) * sizeof(u32);
220 slot_position = remote_node_index % SCU_STP_REMOTE_NODE_COUNT;
221
222 remote_node_table->available_remote_nodes[dword_location] &=
223 ~(1 << (slot_normalized + slot_position));
224}
225
226/**
227 *
228 * @out]: remote_node_table The remote node table from which the slot will be
229 * cleared.
230 * @group_index: The index for the slot that is to be cleared.
231 *
232 * This method clears the entire table slot at the specified slot index. none
233 */
234static void sci_remote_node_table_clear_group(
235 struct sci_remote_node_table *remote_node_table,
236 u32 group_index)
237{
238 u32 dword_location;
239 u32 dword_remainder;
240 u32 dword_value;
241
242 BUG_ON(
243 (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
244 <= (group_index / SCU_STP_REMOTE_NODE_COUNT)
245 );
246
247 dword_location = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
248 dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
249
250 dword_value = remote_node_table->available_remote_nodes[dword_location];
251 dword_value &= ~(SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4));
252 remote_node_table->available_remote_nodes[dword_location] = dword_value;
253}
254
255/**
256 *
257 * @remote_node_table:
258 *
259 * THis method sets an entire remote node group in the remote node table.
260 */
261static void sci_remote_node_table_set_group(
262 struct sci_remote_node_table *remote_node_table,
263 u32 group_index)
264{
265 u32 dword_location;
266 u32 dword_remainder;
267 u32 dword_value;
268
269 BUG_ON(
270 (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
271 <= (group_index / SCU_STP_REMOTE_NODE_COUNT)
272 );
273
274 dword_location = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
275 dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
276
277 dword_value = remote_node_table->available_remote_nodes[dword_location];
278 dword_value |= (SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4));
279 remote_node_table->available_remote_nodes[dword_location] = dword_value;
280}
281
282/**
283 *
284 * @remote_node_table: This is the remote node table that for which the group
285 * value is to be returned.
286 * @group_index: This is the group index to use to find the group value.
287 *
288 * This method will return the group value for the specified group index. The
289 * bit values at the specified remote node group index.
290 */
291static u8 sci_remote_node_table_get_group_value(
292 struct sci_remote_node_table *remote_node_table,
293 u32 group_index)
294{
295 u32 dword_location;
296 u32 dword_remainder;
297 u32 dword_value;
298
299 dword_location = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
300 dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
301
302 dword_value = remote_node_table->available_remote_nodes[dword_location];
303 dword_value &= (SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4));
304 dword_value = dword_value >> (dword_remainder * 4);
305
306 return (u8)dword_value;
307}
308
309/**
310 *
311 * @out]: remote_node_table The remote that which is to be initialized.
312 * @remote_node_entries: The number of entries to put in the table.
313 *
314 * This method will initialize the remote node table for use. none
315 */
316void sci_remote_node_table_initialize(
317 struct sci_remote_node_table *remote_node_table,
318 u32 remote_node_entries)
319{
320 u32 index;
321
322 /*
323 * Initialize the raw data we could improve the speed by only initializing
324 * those entries that we are actually going to be used */
325 memset(
326 remote_node_table->available_remote_nodes,
327 0x00,
328 sizeof(remote_node_table->available_remote_nodes)
329 );
330
331 memset(
332 remote_node_table->remote_node_groups,
333 0x00,
334 sizeof(remote_node_table->remote_node_groups)
335 );
336
337 /* Initialize the available remote node sets */
338 remote_node_table->available_nodes_array_size = (u16)
339 (remote_node_entries / SCIC_SDS_REMOTE_NODES_PER_DWORD)
340 + ((remote_node_entries % SCIC_SDS_REMOTE_NODES_PER_DWORD) != 0);
341
342
343 /* Initialize each full DWORD to a FULL SET of remote nodes */
344 for (index = 0; index < remote_node_entries; index++) {
345 sci_remote_node_table_set_node_index(remote_node_table, index);
346 }
347
348 remote_node_table->group_array_size = (u16)
349 (remote_node_entries / (SCU_STP_REMOTE_NODE_COUNT * 32))
350 + ((remote_node_entries % (SCU_STP_REMOTE_NODE_COUNT * 32)) != 0);
351
352 for (index = 0; index < (remote_node_entries / SCU_STP_REMOTE_NODE_COUNT); index++) {
353 /*
354 * These are all guaranteed to be full slot values so fill them in the
355 * available sets of 3 remote nodes */
356 sci_remote_node_table_set_group_index(remote_node_table, 2, index);
357 }
358
359 /* Now fill in any remainders that we may find */
360 if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 2) {
361 sci_remote_node_table_set_group_index(remote_node_table, 1, index);
362 } else if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 1) {
363 sci_remote_node_table_set_group_index(remote_node_table, 0, index);
364 }
365}
366
367/**
368 *
369 * @out]: remote_node_table The remote node table from which to allocate a
370 * remote node.
371 * @table_index: The group index that is to be used for the search.
372 *
373 * This method will allocate a single RNi from the remote node table. The
374 * table index will determine from which remote node group table to search.
375 * This search may fail and another group node table can be specified. The
376 * function is designed to allow a serach of the available single remote node
377 * group up to the triple remote node group. If an entry is found in the
378 * specified table the remote node is removed and the remote node groups are
379 * updated. The RNi value or an invalid remote node context if an RNi can not
380 * be found.
381 */
382static u16 sci_remote_node_table_allocate_single_remote_node(
383 struct sci_remote_node_table *remote_node_table,
384 u32 group_table_index)
385{
386 u8 index;
387 u8 group_value;
388 u32 group_index;
389 u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
390
391 group_index = sci_remote_node_table_get_group_index(
392 remote_node_table, group_table_index);
393
394 /* We could not find an available slot in the table selector 0 */
395 if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) {
396 group_value = sci_remote_node_table_get_group_value(
397 remote_node_table, group_index);
398
399 for (index = 0; index < SCU_STP_REMOTE_NODE_COUNT; index++) {
400 if (((1 << index) & group_value) != 0) {
401 /* We have selected a bit now clear it */
402 remote_node_index = (u16)(group_index * SCU_STP_REMOTE_NODE_COUNT
403 + index);
404
405 sci_remote_node_table_clear_group_index(
406 remote_node_table, group_table_index, group_index
407 );
408
409 sci_remote_node_table_clear_node_index(
410 remote_node_table, remote_node_index
411 );
412
413 if (group_table_index > 0) {
414 sci_remote_node_table_set_group_index(
415 remote_node_table, group_table_index - 1, group_index
416 );
417 }
418
419 break;
420 }
421 }
422 }
423
424 return remote_node_index;
425}
426
427/**
428 *
429 * @remote_node_table: This is the remote node table from which to allocate the
430 * remote node entries.
431 * @group_table_index: THis is the group table index which must equal two (2)
432 * for this operation.
433 *
434 * This method will allocate three consecutive remote node context entries. If
435 * there are no remaining triple entries the function will return a failure.
436 * The remote node index that represents three consecutive remote node entries
437 * or an invalid remote node context if none can be found.
438 */
439static u16 sci_remote_node_table_allocate_triple_remote_node(
440 struct sci_remote_node_table *remote_node_table,
441 u32 group_table_index)
442{
443 u32 group_index;
444 u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
445
446 group_index = sci_remote_node_table_get_group_index(
447 remote_node_table, group_table_index);
448
449 if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) {
450 remote_node_index = (u16)group_index * SCU_STP_REMOTE_NODE_COUNT;
451
452 sci_remote_node_table_clear_group_index(
453 remote_node_table, group_table_index, group_index
454 );
455
456 sci_remote_node_table_clear_group(
457 remote_node_table, group_index
458 );
459 }
460
461 return remote_node_index;
462}
463
464/**
465 *
466 * @remote_node_table: This is the remote node table from which the remote node
467 * allocation is to take place.
468 * @remote_node_count: This is ther remote node count which is one of
469 * SCU_SSP_REMOTE_NODE_COUNT(1) or SCU_STP_REMOTE_NODE_COUNT(3).
470 *
471 * This method will allocate a remote node that mataches the remote node count
472 * specified by the caller. Valid values for remote node count is
473 * SCU_SSP_REMOTE_NODE_COUNT(1) or SCU_STP_REMOTE_NODE_COUNT(3). u16 This is
474 * the remote node index that is returned or an invalid remote node context.
475 */
476u16 sci_remote_node_table_allocate_remote_node(
477 struct sci_remote_node_table *remote_node_table,
478 u32 remote_node_count)
479{
480 u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
481
482 if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) {
483 remote_node_index =
484 sci_remote_node_table_allocate_single_remote_node(
485 remote_node_table, 0);
486
487 if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
488 remote_node_index =
489 sci_remote_node_table_allocate_single_remote_node(
490 remote_node_table, 1);
491 }
492
493 if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
494 remote_node_index =
495 sci_remote_node_table_allocate_single_remote_node(
496 remote_node_table, 2);
497 }
498 } else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) {
499 remote_node_index =
500 sci_remote_node_table_allocate_triple_remote_node(
501 remote_node_table, 2);
502 }
503
504 return remote_node_index;
505}
506
507/**
508 *
509 * @remote_node_table:
510 *
511 * This method will free a single remote node index back to the remote node
512 * table. This routine will update the remote node groups
513 */
514static void sci_remote_node_table_release_single_remote_node(
515 struct sci_remote_node_table *remote_node_table,
516 u16 remote_node_index)
517{
518 u32 group_index;
519 u8 group_value;
520
521 group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT;
522
523 group_value = sci_remote_node_table_get_group_value(remote_node_table, group_index);
524
525 /*
526 * Assert that we are not trying to add an entry to a slot that is already
527 * full. */
528 BUG_ON(group_value == SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE);
529
530 if (group_value == 0x00) {
531 /*
532 * There are no entries in this slot so it must be added to the single
533 * slot table. */
534 sci_remote_node_table_set_group_index(remote_node_table, 0, group_index);
535 } else if ((group_value & (group_value - 1)) == 0) {
536 /*
537 * There is only one entry in this slot so it must be moved from the
538 * single slot table to the dual slot table */
539 sci_remote_node_table_clear_group_index(remote_node_table, 0, group_index);
540 sci_remote_node_table_set_group_index(remote_node_table, 1, group_index);
541 } else {
542 /*
543 * There are two entries in the slot so it must be moved from the dual
544 * slot table to the tripple slot table. */
545 sci_remote_node_table_clear_group_index(remote_node_table, 1, group_index);
546 sci_remote_node_table_set_group_index(remote_node_table, 2, group_index);
547 }
548
549 sci_remote_node_table_set_node_index(remote_node_table, remote_node_index);
550}
551
552/**
553 *
554 * @remote_node_table: This is the remote node table to which the remote node
555 * index is to be freed.
556 *
557 * This method will release a group of three consecutive remote nodes back to
558 * the free remote nodes.
559 */
560static void sci_remote_node_table_release_triple_remote_node(
561 struct sci_remote_node_table *remote_node_table,
562 u16 remote_node_index)
563{
564 u32 group_index;
565
566 group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT;
567
568 sci_remote_node_table_set_group_index(
569 remote_node_table, 2, group_index
570 );
571
572 sci_remote_node_table_set_group(remote_node_table, group_index);
573}
574
575/**
576 *
577 * @remote_node_table: The remote node table to which the remote node index is
578 * to be freed.
579 * @remote_node_count: This is the count of consecutive remote nodes that are
580 * to be freed.
581 *
582 * This method will release the remote node index back into the remote node
583 * table free pool.
584 */
585void sci_remote_node_table_release_remote_node_index(
586 struct sci_remote_node_table *remote_node_table,
587 u32 remote_node_count,
588 u16 remote_node_index)
589{
590 if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) {
591 sci_remote_node_table_release_single_remote_node(
592 remote_node_table, remote_node_index);
593 } else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) {
594 sci_remote_node_table_release_triple_remote_node(
595 remote_node_table, remote_node_index);
596 }
597}
598
diff --git a/drivers/scsi/isci/remote_node_table.h b/drivers/scsi/isci/remote_node_table.h
new file mode 100644
index 000000000000..721ab982d2ac
--- /dev/null
+++ b/drivers/scsi/isci/remote_node_table.h
@@ -0,0 +1,188 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#ifndef _SCIC_SDS_REMOTE_NODE_TABLE_H_
57#define _SCIC_SDS_REMOTE_NODE_TABLE_H_
58
59#include "isci.h"
60
61/**
62 *
63 *
64 * Remote node sets are sets of remote node index in the remtoe node table The
65 * SCU hardware requires that STP remote node entries take three consecutive
66 * remote node index so the table is arranged in sets of three. The bits are
67 * used as 0111 0111 to make a byte and the bits define the set of three remote
68 * nodes to use as a sequence.
69 */
70#define SCIC_SDS_REMOTE_NODE_SETS_PER_BYTE 2
71
72/**
73 *
74 *
75 * Since the remote node table is organized as DWORDS take the remote node sets
76 * in bytes and represent them in DWORDs. The lowest ordered bits are the ones
77 * used in case full DWORD is not being used. i.e. 0000 0000 0000 0000 0111
78 * 0111 0111 0111 // if only a single WORD is in use in the DWORD.
79 */
80#define SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD \
81 (sizeof(u32) * SCIC_SDS_REMOTE_NODE_SETS_PER_BYTE)
82/**
83 *
84 *
85 * This is a count of the numeber of remote nodes that can be represented in a
86 * byte
87 */
88#define SCIC_SDS_REMOTE_NODES_PER_BYTE \
89 (SCU_STP_REMOTE_NODE_COUNT * SCIC_SDS_REMOTE_NODE_SETS_PER_BYTE)
90
91/**
92 *
93 *
94 * This is a count of the number of remote nodes that can be represented in a
95 * DWROD
96 */
97#define SCIC_SDS_REMOTE_NODES_PER_DWORD \
98 (sizeof(u32) * SCIC_SDS_REMOTE_NODES_PER_BYTE)
99
100/**
101 *
102 *
103 * This is the number of bits in a remote node group
104 */
105#define SCIC_SDS_REMOTE_NODES_BITS_PER_GROUP 4
106
107#define SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX (0xFFFFFFFF)
108#define SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE (0x07)
109#define SCIC_SDS_REMOTE_NODE_TABLE_EMPTY_SLOT_VALUE (0x00)
110
111/**
112 *
113 *
114 * Expander attached sata remote node count
115 */
116#define SCU_STP_REMOTE_NODE_COUNT 3
117
118/**
119 *
120 *
121 * Expander or direct attached ssp remote node count
122 */
123#define SCU_SSP_REMOTE_NODE_COUNT 1
124
125/**
126 *
127 *
128 * Direct attached STP remote node count
129 */
130#define SCU_SATA_REMOTE_NODE_COUNT 1
131
132/**
133 * struct sci_remote_node_table -
134 *
135 *
136 */
137struct sci_remote_node_table {
138 /**
139 * This field contains the array size in dwords
140 */
141 u16 available_nodes_array_size;
142
143 /**
144 * This field contains the array size of the
145 */
146 u16 group_array_size;
147
148 /**
149 * This field is the array of available remote node entries in bits.
150 * Because of the way STP remote node data is allocated on the SCU hardware
151 * the remote nodes must occupy three consecutive remote node context
152 * entries. For ease of allocation and de-allocation we have broken the
153 * sets of three into a single nibble. When the STP RNi is allocated all
154 * of the bits in the nibble are cleared. This math results in a table size
155 * of MAX_REMOTE_NODES / CONSECUTIVE RNi ENTRIES for STP / 2 entries per byte.
156 */
157 u32 available_remote_nodes[
158 (SCI_MAX_REMOTE_DEVICES / SCIC_SDS_REMOTE_NODES_PER_DWORD)
159 + ((SCI_MAX_REMOTE_DEVICES % SCIC_SDS_REMOTE_NODES_PER_DWORD) != 0)];
160
161 /**
162 * This field is the nibble selector for the above table. There are three
163 * possible selectors each for fast lookup when trying to find one, two or
164 * three remote node entries.
165 */
166 u32 remote_node_groups[
167 SCU_STP_REMOTE_NODE_COUNT][
168 (SCI_MAX_REMOTE_DEVICES / (32 * SCU_STP_REMOTE_NODE_COUNT))
169 + ((SCI_MAX_REMOTE_DEVICES % (32 * SCU_STP_REMOTE_NODE_COUNT)) != 0)];
170
171};
172
173/* --------------------------------------------------------------------------- */
174
175void sci_remote_node_table_initialize(
176 struct sci_remote_node_table *remote_node_table,
177 u32 remote_node_entries);
178
179u16 sci_remote_node_table_allocate_remote_node(
180 struct sci_remote_node_table *remote_node_table,
181 u32 remote_node_count);
182
183void sci_remote_node_table_release_remote_node_index(
184 struct sci_remote_node_table *remote_node_table,
185 u32 remote_node_count,
186 u16 remote_node_index);
187
188#endif /* _SCIC_SDS_REMOTE_NODE_TABLE_H_ */
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
new file mode 100644
index 000000000000..a46e07ac789f
--- /dev/null
+++ b/drivers/scsi/isci/request.c
@@ -0,0 +1,3391 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#include "isci.h"
57#include "task.h"
58#include "request.h"
59#include "scu_completion_codes.h"
60#include "scu_event_codes.h"
61#include "sas.h"
62
63static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq,
64 int idx)
65{
66 if (idx == 0)
67 return &ireq->tc->sgl_pair_ab;
68 else if (idx == 1)
69 return &ireq->tc->sgl_pair_cd;
70 else if (idx < 0)
71 return NULL;
72 else
73 return &ireq->sg_table[idx - 2];
74}
75
76static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost,
77 struct isci_request *ireq, u32 idx)
78{
79 u32 offset;
80
81 if (idx == 0) {
82 offset = (void *) &ireq->tc->sgl_pair_ab -
83 (void *) &ihost->task_context_table[0];
84 return ihost->task_context_dma + offset;
85 } else if (idx == 1) {
86 offset = (void *) &ireq->tc->sgl_pair_cd -
87 (void *) &ihost->task_context_table[0];
88 return ihost->task_context_dma + offset;
89 }
90
91 return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]);
92}
93
94static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg)
95{
96 e->length = sg_dma_len(sg);
97 e->address_upper = upper_32_bits(sg_dma_address(sg));
98 e->address_lower = lower_32_bits(sg_dma_address(sg));
99 e->address_modifier = 0;
100}
101
102static void sci_request_build_sgl(struct isci_request *ireq)
103{
104 struct isci_host *ihost = ireq->isci_host;
105 struct sas_task *task = isci_request_access_task(ireq);
106 struct scatterlist *sg = NULL;
107 dma_addr_t dma_addr;
108 u32 sg_idx = 0;
109 struct scu_sgl_element_pair *scu_sg = NULL;
110 struct scu_sgl_element_pair *prev_sg = NULL;
111
112 if (task->num_scatter > 0) {
113 sg = task->scatter;
114
115 while (sg) {
116 scu_sg = to_sgl_element_pair(ireq, sg_idx);
117 init_sgl_element(&scu_sg->A, sg);
118 sg = sg_next(sg);
119 if (sg) {
120 init_sgl_element(&scu_sg->B, sg);
121 sg = sg_next(sg);
122 } else
123 memset(&scu_sg->B, 0, sizeof(scu_sg->B));
124
125 if (prev_sg) {
126 dma_addr = to_sgl_element_pair_dma(ihost,
127 ireq,
128 sg_idx);
129
130 prev_sg->next_pair_upper =
131 upper_32_bits(dma_addr);
132 prev_sg->next_pair_lower =
133 lower_32_bits(dma_addr);
134 }
135
136 prev_sg = scu_sg;
137 sg_idx++;
138 }
139 } else { /* handle when no sg */
140 scu_sg = to_sgl_element_pair(ireq, sg_idx);
141
142 dma_addr = dma_map_single(&ihost->pdev->dev,
143 task->scatter,
144 task->total_xfer_len,
145 task->data_dir);
146
147 ireq->zero_scatter_daddr = dma_addr;
148
149 scu_sg->A.length = task->total_xfer_len;
150 scu_sg->A.address_upper = upper_32_bits(dma_addr);
151 scu_sg->A.address_lower = lower_32_bits(dma_addr);
152 }
153
154 if (scu_sg) {
155 scu_sg->next_pair_upper = 0;
156 scu_sg->next_pair_lower = 0;
157 }
158}
159
160static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq)
161{
162 struct ssp_cmd_iu *cmd_iu;
163 struct sas_task *task = isci_request_access_task(ireq);
164
165 cmd_iu = &ireq->ssp.cmd;
166
167 memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8);
168 cmd_iu->add_cdb_len = 0;
169 cmd_iu->_r_a = 0;
170 cmd_iu->_r_b = 0;
171 cmd_iu->en_fburst = 0; /* unsupported */
172 cmd_iu->task_prio = task->ssp_task.task_prio;
173 cmd_iu->task_attr = task->ssp_task.task_attr;
174 cmd_iu->_r_c = 0;
175
176 sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb,
177 sizeof(task->ssp_task.cdb) / sizeof(u32));
178}
179
180static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq)
181{
182 struct ssp_task_iu *task_iu;
183 struct sas_task *task = isci_request_access_task(ireq);
184 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
185
186 task_iu = &ireq->ssp.tmf;
187
188 memset(task_iu, 0, sizeof(struct ssp_task_iu));
189
190 memcpy(task_iu->LUN, task->ssp_task.LUN, 8);
191
192 task_iu->task_func = isci_tmf->tmf_code;
193 task_iu->task_tag =
194 (ireq->ttype == tmf_task) ?
195 isci_tmf->io_tag :
196 SCI_CONTROLLER_INVALID_IO_TAG;
197}
198
199/**
200 * This method is will fill in the SCU Task Context for any type of SSP request.
201 * @sci_req:
202 * @task_context:
203 *
204 */
205static void scu_ssp_reqeust_construct_task_context(
206 struct isci_request *ireq,
207 struct scu_task_context *task_context)
208{
209 dma_addr_t dma_addr;
210 struct isci_remote_device *idev;
211 struct isci_port *iport;
212
213 idev = ireq->target_device;
214 iport = idev->owning_port;
215
216 /* Fill in the TC with the its required data */
217 task_context->abort = 0;
218 task_context->priority = 0;
219 task_context->initiator_request = 1;
220 task_context->connection_rate = idev->connection_rate;
221 task_context->protocol_engine_index = ISCI_PEG;
222 task_context->logical_port_index = iport->physical_port_index;
223 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
224 task_context->valid = SCU_TASK_CONTEXT_VALID;
225 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
226
227 task_context->remote_node_index = idev->rnc.remote_node_index;
228 task_context->command_code = 0;
229
230 task_context->link_layer_control = 0;
231 task_context->do_not_dma_ssp_good_response = 1;
232 task_context->strict_ordering = 0;
233 task_context->control_frame = 0;
234 task_context->timeout_enable = 0;
235 task_context->block_guard_enable = 0;
236
237 task_context->address_modifier = 0;
238
239 /* task_context->type.ssp.tag = ireq->io_tag; */
240 task_context->task_phase = 0x01;
241
242 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
243 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
244 (iport->physical_port_index <<
245 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
246 ISCI_TAG_TCI(ireq->io_tag));
247
248 /*
249 * Copy the physical address for the command buffer to the
250 * SCU Task Context
251 */
252 dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.cmd);
253
254 task_context->command_iu_upper = upper_32_bits(dma_addr);
255 task_context->command_iu_lower = lower_32_bits(dma_addr);
256
257 /*
258 * Copy the physical address for the response buffer to the
259 * SCU Task Context
260 */
261 dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.rsp);
262
263 task_context->response_iu_upper = upper_32_bits(dma_addr);
264 task_context->response_iu_lower = lower_32_bits(dma_addr);
265}
266
267/**
268 * This method is will fill in the SCU Task Context for a SSP IO request.
269 * @sci_req:
270 *
271 */
272static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
273 enum dma_data_direction dir,
274 u32 len)
275{
276 struct scu_task_context *task_context = ireq->tc;
277
278 scu_ssp_reqeust_construct_task_context(ireq, task_context);
279
280 task_context->ssp_command_iu_length =
281 sizeof(struct ssp_cmd_iu) / sizeof(u32);
282 task_context->type.ssp.frame_type = SSP_COMMAND;
283
284 switch (dir) {
285 case DMA_FROM_DEVICE:
286 case DMA_NONE:
287 default:
288 task_context->task_type = SCU_TASK_TYPE_IOREAD;
289 break;
290 case DMA_TO_DEVICE:
291 task_context->task_type = SCU_TASK_TYPE_IOWRITE;
292 break;
293 }
294
295 task_context->transfer_length_bytes = len;
296
297 if (task_context->transfer_length_bytes > 0)
298 sci_request_build_sgl(ireq);
299}
300
301/**
302 * This method will fill in the SCU Task Context for a SSP Task request. The
303 * following important settings are utilized: -# priority ==
304 * SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued
305 * ahead of other task destined for the same Remote Node. -# task_type ==
306 * SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type
307 * (i.e. non-raw frame) is being utilized to perform task management. -#
308 * control_frame == 1. This ensures that the proper endianess is set so
309 * that the bytes are transmitted in the right order for a task frame.
310 * @sci_req: This parameter specifies the task request object being
311 * constructed.
312 *
313 */
314static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq)
315{
316 struct scu_task_context *task_context = ireq->tc;
317
318 scu_ssp_reqeust_construct_task_context(ireq, task_context);
319
320 task_context->control_frame = 1;
321 task_context->priority = SCU_TASK_PRIORITY_HIGH;
322 task_context->task_type = SCU_TASK_TYPE_RAW_FRAME;
323 task_context->transfer_length_bytes = 0;
324 task_context->type.ssp.frame_type = SSP_TASK;
325 task_context->ssp_command_iu_length =
326 sizeof(struct ssp_task_iu) / sizeof(u32);
327}
328
329/**
330 * This method is will fill in the SCU Task Context for any type of SATA
331 * request. This is called from the various SATA constructors.
332 * @sci_req: The general IO request object which is to be used in
333 * constructing the SCU task context.
334 * @task_context: The buffer pointer for the SCU task context which is being
335 * constructed.
336 *
337 * The general io request construction is complete. The buffer assignment for
338 * the command buffer is complete. none Revisit task context construction to
339 * determine what is common for SSP/SMP/STP task context structures.
340 */
341static void scu_sata_reqeust_construct_task_context(
342 struct isci_request *ireq,
343 struct scu_task_context *task_context)
344{
345 dma_addr_t dma_addr;
346 struct isci_remote_device *idev;
347 struct isci_port *iport;
348
349 idev = ireq->target_device;
350 iport = idev->owning_port;
351
352 /* Fill in the TC with the its required data */
353 task_context->abort = 0;
354 task_context->priority = SCU_TASK_PRIORITY_NORMAL;
355 task_context->initiator_request = 1;
356 task_context->connection_rate = idev->connection_rate;
357 task_context->protocol_engine_index = ISCI_PEG;
358 task_context->logical_port_index = iport->physical_port_index;
359 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
360 task_context->valid = SCU_TASK_CONTEXT_VALID;
361 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
362
363 task_context->remote_node_index = idev->rnc.remote_node_index;
364 task_context->command_code = 0;
365
366 task_context->link_layer_control = 0;
367 task_context->do_not_dma_ssp_good_response = 1;
368 task_context->strict_ordering = 0;
369 task_context->control_frame = 0;
370 task_context->timeout_enable = 0;
371 task_context->block_guard_enable = 0;
372
373 task_context->address_modifier = 0;
374 task_context->task_phase = 0x01;
375
376 task_context->ssp_command_iu_length =
377 (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
378
379 /* Set the first word of the H2D REG FIS */
380 task_context->type.words[0] = *(u32 *)&ireq->stp.cmd;
381
382 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
383 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
384 (iport->physical_port_index <<
385 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
386 ISCI_TAG_TCI(ireq->io_tag));
387 /*
388 * Copy the physical address for the command buffer to the SCU Task
389 * Context. We must offset the command buffer by 4 bytes because the
390 * first 4 bytes are transfered in the body of the TC.
391 */
392 dma_addr = sci_io_request_get_dma_addr(ireq,
393 ((char *) &ireq->stp.cmd) +
394 sizeof(u32));
395
396 task_context->command_iu_upper = upper_32_bits(dma_addr);
397 task_context->command_iu_lower = lower_32_bits(dma_addr);
398
399 /* SATA Requests do not have a response buffer */
400 task_context->response_iu_upper = 0;
401 task_context->response_iu_lower = 0;
402}
403
404static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq)
405{
406 struct scu_task_context *task_context = ireq->tc;
407
408 scu_sata_reqeust_construct_task_context(ireq, task_context);
409
410 task_context->control_frame = 0;
411 task_context->priority = SCU_TASK_PRIORITY_NORMAL;
412 task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME;
413 task_context->type.stp.fis_type = FIS_REGH2D;
414 task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
415}
416
417static enum sci_status sci_stp_pio_request_construct(struct isci_request *ireq,
418 bool copy_rx_frame)
419{
420 struct isci_stp_request *stp_req = &ireq->stp.req;
421
422 scu_stp_raw_request_construct_task_context(ireq);
423
424 stp_req->status = 0;
425 stp_req->sgl.offset = 0;
426 stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A;
427
428 if (copy_rx_frame) {
429 sci_request_build_sgl(ireq);
430 stp_req->sgl.index = 0;
431 } else {
432 /* The user does not want the data copied to the SGL buffer location */
433 stp_req->sgl.index = -1;
434 }
435
436 return SCI_SUCCESS;
437}
438
439/**
440 *
441 * @sci_req: This parameter specifies the request to be constructed as an
442 * optimized request.
443 * @optimized_task_type: This parameter specifies whether the request is to be
444 * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
445 * value of 1 indicates NCQ.
446 *
447 * This method will perform request construction common to all types of STP
448 * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
449 * returns an indication as to whether the construction was successful.
450 */
451static void sci_stp_optimized_request_construct(struct isci_request *ireq,
452 u8 optimized_task_type,
453 u32 len,
454 enum dma_data_direction dir)
455{
456 struct scu_task_context *task_context = ireq->tc;
457
458 /* Build the STP task context structure */
459 scu_sata_reqeust_construct_task_context(ireq, task_context);
460
461 /* Copy over the SGL elements */
462 sci_request_build_sgl(ireq);
463
464 /* Copy over the number of bytes to be transfered */
465 task_context->transfer_length_bytes = len;
466
467 if (dir == DMA_TO_DEVICE) {
468 /*
469 * The difference between the DMA IN and DMA OUT request task type
470 * values are consistent with the difference between FPDMA READ
471 * and FPDMA WRITE values. Add the supplied task type parameter
472 * to this difference to set the task type properly for this
473 * DATA OUT (WRITE) case. */
474 task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
475 - SCU_TASK_TYPE_DMA_IN);
476 } else {
477 /*
478 * For the DATA IN (READ) case, simply save the supplied
479 * optimized task type. */
480 task_context->task_type = optimized_task_type;
481 }
482}
483
484
485
486static enum sci_status
487sci_io_request_construct_sata(struct isci_request *ireq,
488 u32 len,
489 enum dma_data_direction dir,
490 bool copy)
491{
492 enum sci_status status = SCI_SUCCESS;
493 struct sas_task *task = isci_request_access_task(ireq);
494
495 /* check for management protocols */
496 if (ireq->ttype == tmf_task) {
497 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
498
499 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
500 tmf->tmf_code == isci_tmf_sata_srst_low) {
501 scu_stp_raw_request_construct_task_context(ireq);
502 return SCI_SUCCESS;
503 } else {
504 dev_err(&ireq->owning_controller->pdev->dev,
505 "%s: Request 0x%p received un-handled SAT "
506 "management protocol 0x%x.\n",
507 __func__, ireq, tmf->tmf_code);
508
509 return SCI_FAILURE;
510 }
511 }
512
513 if (!sas_protocol_ata(task->task_proto)) {
514 dev_err(&ireq->owning_controller->pdev->dev,
515 "%s: Non-ATA protocol in SATA path: 0x%x\n",
516 __func__,
517 task->task_proto);
518 return SCI_FAILURE;
519
520 }
521
522 /* non data */
523 if (task->data_dir == DMA_NONE) {
524 scu_stp_raw_request_construct_task_context(ireq);
525 return SCI_SUCCESS;
526 }
527
528 /* NCQ */
529 if (task->ata_task.use_ncq) {
530 sci_stp_optimized_request_construct(ireq,
531 SCU_TASK_TYPE_FPDMAQ_READ,
532 len, dir);
533 return SCI_SUCCESS;
534 }
535
536 /* DMA */
537 if (task->ata_task.dma_xfer) {
538 sci_stp_optimized_request_construct(ireq,
539 SCU_TASK_TYPE_DMA_IN,
540 len, dir);
541 return SCI_SUCCESS;
542 } else /* PIO */
543 return sci_stp_pio_request_construct(ireq, copy);
544
545 return status;
546}
547
548static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *ireq)
549{
550 struct sas_task *task = isci_request_access_task(ireq);
551
552 ireq->protocol = SCIC_SSP_PROTOCOL;
553
554 scu_ssp_io_request_construct_task_context(ireq,
555 task->data_dir,
556 task->total_xfer_len);
557
558 sci_io_request_build_ssp_command_iu(ireq);
559
560 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
561
562 return SCI_SUCCESS;
563}
564
565enum sci_status sci_task_request_construct_ssp(
566 struct isci_request *ireq)
567{
568 /* Construct the SSP Task SCU Task Context */
569 scu_ssp_task_request_construct_task_context(ireq);
570
571 /* Fill in the SSP Task IU */
572 sci_task_request_build_ssp_task_iu(ireq);
573
574 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
575
576 return SCI_SUCCESS;
577}
578
579static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *ireq)
580{
581 enum sci_status status;
582 bool copy = false;
583 struct sas_task *task = isci_request_access_task(ireq);
584
585 ireq->protocol = SCIC_STP_PROTOCOL;
586
587 copy = (task->data_dir == DMA_NONE) ? false : true;
588
589 status = sci_io_request_construct_sata(ireq,
590 task->total_xfer_len,
591 task->data_dir,
592 copy);
593
594 if (status == SCI_SUCCESS)
595 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
596
597 return status;
598}
599
600enum sci_status sci_task_request_construct_sata(struct isci_request *ireq)
601{
602 enum sci_status status = SCI_SUCCESS;
603
604 /* check for management protocols */
605 if (ireq->ttype == tmf_task) {
606 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
607
608 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
609 tmf->tmf_code == isci_tmf_sata_srst_low) {
610 scu_stp_raw_request_construct_task_context(ireq);
611 } else {
612 dev_err(&ireq->owning_controller->pdev->dev,
613 "%s: Request 0x%p received un-handled SAT "
614 "Protocol 0x%x.\n",
615 __func__, ireq, tmf->tmf_code);
616
617 return SCI_FAILURE;
618 }
619 }
620
621 if (status != SCI_SUCCESS)
622 return status;
623 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
624
625 return status;
626}
627
628/**
629 * sci_req_tx_bytes - bytes transferred when reply underruns request
630 * @sci_req: request that was terminated early
631 */
632#define SCU_TASK_CONTEXT_SRAM 0x200000
633static u32 sci_req_tx_bytes(struct isci_request *ireq)
634{
635 struct isci_host *ihost = ireq->owning_controller;
636 u32 ret_val = 0;
637
638 if (readl(&ihost->smu_registers->address_modifier) == 0) {
639 void __iomem *scu_reg_base = ihost->scu_registers;
640
641 /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where
642 * BAR1 is the scu_registers
643 * 0x20002C = 0x200000 + 0x2c
644 * = start of task context SRAM + offset of (type.ssp.data_offset)
645 * TCi is the io_tag of struct sci_request
646 */
647 ret_val = readl(scu_reg_base +
648 (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
649 ((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(ireq->io_tag)));
650 }
651
652 return ret_val;
653}
654
655enum sci_status sci_request_start(struct isci_request *ireq)
656{
657 enum sci_base_request_states state;
658 struct scu_task_context *tc = ireq->tc;
659 struct isci_host *ihost = ireq->owning_controller;
660
661 state = ireq->sm.current_state_id;
662 if (state != SCI_REQ_CONSTRUCTED) {
663 dev_warn(&ihost->pdev->dev,
664 "%s: SCIC IO Request requested to start while in wrong "
665 "state %d\n", __func__, state);
666 return SCI_FAILURE_INVALID_STATE;
667 }
668
669 tc->task_index = ISCI_TAG_TCI(ireq->io_tag);
670
671 switch (tc->protocol_type) {
672 case SCU_TASK_CONTEXT_PROTOCOL_SMP:
673 case SCU_TASK_CONTEXT_PROTOCOL_SSP:
674 /* SSP/SMP Frame */
675 tc->type.ssp.tag = ireq->io_tag;
676 tc->type.ssp.target_port_transfer_tag = 0xFFFF;
677 break;
678
679 case SCU_TASK_CONTEXT_PROTOCOL_STP:
680 /* STP/SATA Frame
681 * tc->type.stp.ncq_tag = ireq->ncq_tag;
682 */
683 break;
684
685 case SCU_TASK_CONTEXT_PROTOCOL_NONE:
686 /* / @todo When do we set no protocol type? */
687 break;
688
689 default:
690 /* This should never happen since we build the IO
691 * requests */
692 break;
693 }
694
695 /* Add to the post_context the io tag value */
696 ireq->post_context |= ISCI_TAG_TCI(ireq->io_tag);
697
698 /* Everything is good go ahead and change state */
699 sci_change_state(&ireq->sm, SCI_REQ_STARTED);
700
701 return SCI_SUCCESS;
702}
703
704enum sci_status
705sci_io_request_terminate(struct isci_request *ireq)
706{
707 enum sci_base_request_states state;
708
709 state = ireq->sm.current_state_id;
710
711 switch (state) {
712 case SCI_REQ_CONSTRUCTED:
713 ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
714 ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
715 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
716 return SCI_SUCCESS;
717 case SCI_REQ_STARTED:
718 case SCI_REQ_TASK_WAIT_TC_COMP:
719 case SCI_REQ_SMP_WAIT_RESP:
720 case SCI_REQ_SMP_WAIT_TC_COMP:
721 case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
722 case SCI_REQ_STP_UDMA_WAIT_D2H:
723 case SCI_REQ_STP_NON_DATA_WAIT_H2D:
724 case SCI_REQ_STP_NON_DATA_WAIT_D2H:
725 case SCI_REQ_STP_PIO_WAIT_H2D:
726 case SCI_REQ_STP_PIO_WAIT_FRAME:
727 case SCI_REQ_STP_PIO_DATA_IN:
728 case SCI_REQ_STP_PIO_DATA_OUT:
729 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
730 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
731 case SCI_REQ_STP_SOFT_RESET_WAIT_D2H:
732 sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
733 return SCI_SUCCESS;
734 case SCI_REQ_TASK_WAIT_TC_RESP:
735 sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
736 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
737 return SCI_SUCCESS;
738 case SCI_REQ_ABORTING:
739 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
740 return SCI_SUCCESS;
741 case SCI_REQ_COMPLETED:
742 default:
743 dev_warn(&ireq->owning_controller->pdev->dev,
744 "%s: SCIC IO Request requested to abort while in wrong "
745 "state %d\n",
746 __func__,
747 ireq->sm.current_state_id);
748 break;
749 }
750
751 return SCI_FAILURE_INVALID_STATE;
752}
753
754enum sci_status sci_request_complete(struct isci_request *ireq)
755{
756 enum sci_base_request_states state;
757 struct isci_host *ihost = ireq->owning_controller;
758
759 state = ireq->sm.current_state_id;
760 if (WARN_ONCE(state != SCI_REQ_COMPLETED,
761 "isci: request completion from wrong state (%d)\n", state))
762 return SCI_FAILURE_INVALID_STATE;
763
764 if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX)
765 sci_controller_release_frame(ihost,
766 ireq->saved_rx_frame_index);
767
768 /* XXX can we just stop the machine and remove the 'final' state? */
769 sci_change_state(&ireq->sm, SCI_REQ_FINAL);
770 return SCI_SUCCESS;
771}
772
773enum sci_status sci_io_request_event_handler(struct isci_request *ireq,
774 u32 event_code)
775{
776 enum sci_base_request_states state;
777 struct isci_host *ihost = ireq->owning_controller;
778
779 state = ireq->sm.current_state_id;
780
781 if (state != SCI_REQ_STP_PIO_DATA_IN) {
782 dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %d\n",
783 __func__, event_code, state);
784
785 return SCI_FAILURE_INVALID_STATE;
786 }
787
788 switch (scu_get_event_specifier(event_code)) {
789 case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT:
790 /* We are waiting for data and the SCU has R_ERR the data frame.
791 * Go back to waiting for the D2H Register FIS
792 */
793 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
794 return SCI_SUCCESS;
795 default:
796 dev_err(&ihost->pdev->dev,
797 "%s: pio request unexpected event %#x\n",
798 __func__, event_code);
799
800 /* TODO Should we fail the PIO request when we get an
801 * unexpected event?
802 */
803 return SCI_FAILURE;
804 }
805}
806
807/*
808 * This function copies response data for requests returning response data
809 * instead of sense data.
810 * @sci_req: This parameter specifies the request object for which to copy
811 * the response data.
812 */
813static void sci_io_request_copy_response(struct isci_request *ireq)
814{
815 void *resp_buf;
816 u32 len;
817 struct ssp_response_iu *ssp_response;
818 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
819
820 ssp_response = &ireq->ssp.rsp;
821
822 resp_buf = &isci_tmf->resp.resp_iu;
823
824 len = min_t(u32,
825 SSP_RESP_IU_MAX_SIZE,
826 be32_to_cpu(ssp_response->response_data_len));
827
828 memcpy(resp_buf, ssp_response->resp_data, len);
829}
830
831static enum sci_status
832request_started_state_tc_event(struct isci_request *ireq,
833 u32 completion_code)
834{
835 struct ssp_response_iu *resp_iu;
836 u8 datapres;
837
838 /* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000
839 * to determine SDMA status
840 */
841 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
842 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
843 ireq->scu_status = SCU_TASK_DONE_GOOD;
844 ireq->sci_status = SCI_SUCCESS;
845 break;
846 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): {
847 /* There are times when the SCU hardware will return an early
848 * response because the io request specified more data than is
849 * returned by the target device (mode pages, inquiry data,
850 * etc.). We must check the response stats to see if this is
851 * truly a failed request or a good request that just got
852 * completed early.
853 */
854 struct ssp_response_iu *resp = &ireq->ssp.rsp;
855 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
856
857 sci_swab32_cpy(&ireq->ssp.rsp,
858 &ireq->ssp.rsp,
859 word_cnt);
860
861 if (resp->status == 0) {
862 ireq->scu_status = SCU_TASK_DONE_GOOD;
863 ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY;
864 } else {
865 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
866 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
867 }
868 break;
869 }
870 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): {
871 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
872
873 sci_swab32_cpy(&ireq->ssp.rsp,
874 &ireq->ssp.rsp,
875 word_cnt);
876
877 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
878 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
879 break;
880 }
881
882 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR):
883 /* TODO With TASK_DONE_RESP_LEN_ERR is the response frame
884 * guaranteed to be received before this completion status is
885 * posted?
886 */
887 resp_iu = &ireq->ssp.rsp;
888 datapres = resp_iu->datapres;
889
890 if (datapres == 1 || datapres == 2) {
891 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
892 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
893 } else {
894 ireq->scu_status = SCU_TASK_DONE_GOOD;
895 ireq->sci_status = SCI_SUCCESS;
896 }
897 break;
898 /* only stp device gets suspended. */
899 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
900 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR):
901 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR):
902 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR):
903 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR):
904 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN):
905 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
906 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP):
907 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
908 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
909 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
910 if (ireq->protocol == SCIC_STP_PROTOCOL) {
911 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
912 SCU_COMPLETION_TL_STATUS_SHIFT;
913 ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
914 } else {
915 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
916 SCU_COMPLETION_TL_STATUS_SHIFT;
917 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
918 }
919 break;
920
921 /* both stp/ssp device gets suspended */
922 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR):
923 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION):
924 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1):
925 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2):
926 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3):
927 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION):
928 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION):
929 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
930 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
931 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
932 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
933 SCU_COMPLETION_TL_STATUS_SHIFT;
934 ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
935 break;
936
937 /* neither ssp nor stp gets suspended. */
938 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR):
939 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR):
940 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR):
941 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR):
942 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR):
943 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA):
944 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
945 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
946 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
947 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
948 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA):
949 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL):
950 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV):
951 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV):
952 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
953 default:
954 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
955 SCU_COMPLETION_TL_STATUS_SHIFT;
956 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
957 break;
958 }
959
960 /*
961 * TODO: This is probably wrong for ACK/NAK timeout conditions
962 */
963
964 /* In all cases we will treat this as the completion of the IO req. */
965 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
966 return SCI_SUCCESS;
967}
968
969static enum sci_status
970request_aborting_state_tc_event(struct isci_request *ireq,
971 u32 completion_code)
972{
973 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
974 case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
975 case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
976 ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
977 ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
978 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
979 break;
980
981 default:
982 /* Unless we get some strange error wait for the task abort to complete
983 * TODO: Should there be a state change for this completion?
984 */
985 break;
986 }
987
988 return SCI_SUCCESS;
989}
990
991static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq,
992 u32 completion_code)
993{
994 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
995 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
996 ireq->scu_status = SCU_TASK_DONE_GOOD;
997 ireq->sci_status = SCI_SUCCESS;
998 sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
999 break;
1000 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
1001 /* Currently, the decision is to simply allow the task request
1002 * to timeout if the task IU wasn't received successfully.
1003 * There is a potential for receiving multiple task responses if
1004 * we decide to send the task IU again.
1005 */
1006 dev_warn(&ireq->owning_controller->pdev->dev,
1007 "%s: TaskRequest:0x%p CompletionCode:%x - "
1008 "ACK/NAK timeout\n", __func__, ireq,
1009 completion_code);
1010
1011 sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
1012 break;
1013 default:
1014 /*
1015 * All other completion status cause the IO to be complete.
1016 * If a NAK was received, then it is up to the user to retry
1017 * the request.
1018 */
1019 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1020 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1021 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1022 break;
1023 }
1024
1025 return SCI_SUCCESS;
1026}
1027
1028static enum sci_status
1029smp_request_await_response_tc_event(struct isci_request *ireq,
1030 u32 completion_code)
1031{
1032 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1033 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1034 /* In the AWAIT RESPONSE state, any TC completion is
1035 * unexpected. but if the TC has success status, we
1036 * complete the IO anyway.
1037 */
1038 ireq->scu_status = SCU_TASK_DONE_GOOD;
1039 ireq->sci_status = SCI_SUCCESS;
1040 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1041 break;
1042 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
1043 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
1044 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
1045 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
1046 /* These status has been seen in a specific LSI
1047 * expander, which sometimes is not able to send smp
1048 * response within 2 ms. This causes our hardware break
1049 * the connection and set TC completion with one of
1050 * these SMP_XXX_XX_ERR status. For these type of error,
1051 * we ask ihost user to retry the request.
1052 */
1053 ireq->scu_status = SCU_TASK_DONE_SMP_RESP_TO_ERR;
1054 ireq->sci_status = SCI_FAILURE_RETRY_REQUIRED;
1055 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1056 break;
1057 default:
1058 /* All other completion status cause the IO to be complete. If a NAK
1059 * was received, then it is up to the user to retry the request
1060 */
1061 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1062 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1063 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1064 break;
1065 }
1066
1067 return SCI_SUCCESS;
1068}
1069
1070static enum sci_status
1071smp_request_await_tc_event(struct isci_request *ireq,
1072 u32 completion_code)
1073{
1074 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1075 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1076 ireq->scu_status = SCU_TASK_DONE_GOOD;
1077 ireq->sci_status = SCI_SUCCESS;
1078 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1079 break;
1080 default:
1081 /* All other completion status cause the IO to be
1082 * complete. If a NAK was received, then it is up to
1083 * the user to retry the request.
1084 */
1085 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1086 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1087 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1088 break;
1089 }
1090
1091 return SCI_SUCCESS;
1092}
1093
1094static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req)
1095{
1096 struct scu_sgl_element *sgl;
1097 struct scu_sgl_element_pair *sgl_pair;
1098 struct isci_request *ireq = to_ireq(stp_req);
1099 struct isci_stp_pio_sgl *pio_sgl = &stp_req->sgl;
1100
1101 sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
1102 if (!sgl_pair)
1103 sgl = NULL;
1104 else if (pio_sgl->set == SCU_SGL_ELEMENT_PAIR_A) {
1105 if (sgl_pair->B.address_lower == 0 &&
1106 sgl_pair->B.address_upper == 0) {
1107 sgl = NULL;
1108 } else {
1109 pio_sgl->set = SCU_SGL_ELEMENT_PAIR_B;
1110 sgl = &sgl_pair->B;
1111 }
1112 } else {
1113 if (sgl_pair->next_pair_lower == 0 &&
1114 sgl_pair->next_pair_upper == 0) {
1115 sgl = NULL;
1116 } else {
1117 pio_sgl->index++;
1118 pio_sgl->set = SCU_SGL_ELEMENT_PAIR_A;
1119 sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
1120 sgl = &sgl_pair->A;
1121 }
1122 }
1123
1124 return sgl;
1125}
1126
1127static enum sci_status
1128stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq,
1129 u32 completion_code)
1130{
1131 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1132 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1133 ireq->scu_status = SCU_TASK_DONE_GOOD;
1134 ireq->sci_status = SCI_SUCCESS;
1135 sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H);
1136 break;
1137
1138 default:
1139 /* All other completion status cause the IO to be
1140 * complete. If a NAK was received, then it is up to
1141 * the user to retry the request.
1142 */
1143 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1144 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1145 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1146 break;
1147 }
1148
1149 return SCI_SUCCESS;
1150}
1151
1152#define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */
1153
1154/* transmit DATA_FIS from (current sgl + offset) for input
1155 * parameter length. current sgl and offset is alreay stored in the IO request
1156 */
1157static enum sci_status sci_stp_request_pio_data_out_trasmit_data_frame(
1158 struct isci_request *ireq,
1159 u32 length)
1160{
1161 struct isci_stp_request *stp_req = &ireq->stp.req;
1162 struct scu_task_context *task_context = ireq->tc;
1163 struct scu_sgl_element_pair *sgl_pair;
1164 struct scu_sgl_element *current_sgl;
1165
1166 /* Recycle the TC and reconstruct it for sending out DATA FIS containing
1167 * for the data from current_sgl+offset for the input length
1168 */
1169 sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
1170 if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A)
1171 current_sgl = &sgl_pair->A;
1172 else
1173 current_sgl = &sgl_pair->B;
1174
1175 /* update the TC */
1176 task_context->command_iu_upper = current_sgl->address_upper;
1177 task_context->command_iu_lower = current_sgl->address_lower;
1178 task_context->transfer_length_bytes = length;
1179 task_context->type.stp.fis_type = FIS_DATA;
1180
1181 /* send the new TC out. */
1182 return sci_controller_continue_io(ireq);
1183}
1184
1185static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_request *ireq)
1186{
1187 struct isci_stp_request *stp_req = &ireq->stp.req;
1188 struct scu_sgl_element_pair *sgl_pair;
1189 struct scu_sgl_element *sgl;
1190 enum sci_status status;
1191 u32 offset;
1192 u32 len = 0;
1193
1194 offset = stp_req->sgl.offset;
1195 sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
1196 if (WARN_ONCE(!sgl_pair, "%s: null sgl element", __func__))
1197 return SCI_FAILURE;
1198
1199 if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) {
1200 sgl = &sgl_pair->A;
1201 len = sgl_pair->A.length - offset;
1202 } else {
1203 sgl = &sgl_pair->B;
1204 len = sgl_pair->B.length - offset;
1205 }
1206
1207 if (stp_req->pio_len == 0)
1208 return SCI_SUCCESS;
1209
1210 if (stp_req->pio_len >= len) {
1211 status = sci_stp_request_pio_data_out_trasmit_data_frame(ireq, len);
1212 if (status != SCI_SUCCESS)
1213 return status;
1214 stp_req->pio_len -= len;
1215
1216 /* update the current sgl, offset and save for future */
1217 sgl = pio_sgl_next(stp_req);
1218 offset = 0;
1219 } else if (stp_req->pio_len < len) {
1220 sci_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len);
1221
1222 /* Sgl offset will be adjusted and saved for future */
1223 offset += stp_req->pio_len;
1224 sgl->address_lower += stp_req->pio_len;
1225 stp_req->pio_len = 0;
1226 }
1227
1228 stp_req->sgl.offset = offset;
1229
1230 return status;
1231}
1232
1233/**
1234 *
1235 * @stp_request: The request that is used for the SGL processing.
1236 * @data_buffer: The buffer of data to be copied.
1237 * @length: The length of the data transfer.
1238 *
1239 * Copy the data from the buffer for the length specified to the IO reqeust SGL
1240 * specified data region. enum sci_status
1241 */
1242static enum sci_status
1243sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req,
1244 u8 *data_buf, u32 len)
1245{
1246 struct isci_request *ireq;
1247 u8 *src_addr;
1248 int copy_len;
1249 struct sas_task *task;
1250 struct scatterlist *sg;
1251 void *kaddr;
1252 int total_len = len;
1253
1254 ireq = to_ireq(stp_req);
1255 task = isci_request_access_task(ireq);
1256 src_addr = data_buf;
1257
1258 if (task->num_scatter > 0) {
1259 sg = task->scatter;
1260
1261 while (total_len > 0) {
1262 struct page *page = sg_page(sg);
1263
1264 copy_len = min_t(int, total_len, sg_dma_len(sg));
1265 kaddr = kmap_atomic(page, KM_IRQ0);
1266 memcpy(kaddr + sg->offset, src_addr, copy_len);
1267 kunmap_atomic(kaddr, KM_IRQ0);
1268 total_len -= copy_len;
1269 src_addr += copy_len;
1270 sg = sg_next(sg);
1271 }
1272 } else {
1273 BUG_ON(task->total_xfer_len < total_len);
1274 memcpy(task->scatter, src_addr, total_len);
1275 }
1276
1277 return SCI_SUCCESS;
1278}
1279
1280/**
1281 *
1282 * @sci_req: The PIO DATA IN request that is to receive the data.
1283 * @data_buffer: The buffer to copy from.
1284 *
1285 * Copy the data buffer to the io request data region. enum sci_status
1286 */
1287static enum sci_status sci_stp_request_pio_data_in_copy_data(
1288 struct isci_stp_request *stp_req,
1289 u8 *data_buffer)
1290{
1291 enum sci_status status;
1292
1293 /*
1294 * If there is less than 1K remaining in the transfer request
1295 * copy just the data for the transfer */
1296 if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) {
1297 status = sci_stp_request_pio_data_in_copy_data_buffer(
1298 stp_req, data_buffer, stp_req->pio_len);
1299
1300 if (status == SCI_SUCCESS)
1301 stp_req->pio_len = 0;
1302 } else {
1303 /* We are transfering the whole frame so copy */
1304 status = sci_stp_request_pio_data_in_copy_data_buffer(
1305 stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
1306
1307 if (status == SCI_SUCCESS)
1308 stp_req->pio_len -= SCU_MAX_FRAME_BUFFER_SIZE;
1309 }
1310
1311 return status;
1312}
1313
1314static enum sci_status
1315stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq,
1316 u32 completion_code)
1317{
1318 enum sci_status status = SCI_SUCCESS;
1319
1320 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1321 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1322 ireq->scu_status = SCU_TASK_DONE_GOOD;
1323 ireq->sci_status = SCI_SUCCESS;
1324 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1325 break;
1326
1327 default:
1328 /* All other completion status cause the IO to be
1329 * complete. If a NAK was received, then it is up to
1330 * the user to retry the request.
1331 */
1332 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1333 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1334 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1335 break;
1336 }
1337
1338 return status;
1339}
1340
1341static enum sci_status
1342pio_data_out_tx_done_tc_event(struct isci_request *ireq,
1343 u32 completion_code)
1344{
1345 enum sci_status status = SCI_SUCCESS;
1346 bool all_frames_transferred = false;
1347 struct isci_stp_request *stp_req = &ireq->stp.req;
1348
1349 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1350 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1351 /* Transmit data */
1352 if (stp_req->pio_len != 0) {
1353 status = sci_stp_request_pio_data_out_transmit_data(ireq);
1354 if (status == SCI_SUCCESS) {
1355 if (stp_req->pio_len == 0)
1356 all_frames_transferred = true;
1357 }
1358 } else if (stp_req->pio_len == 0) {
1359 /*
1360 * this will happen if the all data is written at the
1361 * first time after the pio setup fis is received
1362 */
1363 all_frames_transferred = true;
1364 }
1365
1366 /* all data transferred. */
1367 if (all_frames_transferred) {
1368 /*
1369 * Change the state to SCI_REQ_STP_PIO_DATA_IN
1370 * and wait for PIO_SETUP fis / or D2H REg fis. */
1371 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1372 }
1373 break;
1374
1375 default:
1376 /*
1377 * All other completion status cause the IO to be complete.
1378 * If a NAK was received, then it is up to the user to retry
1379 * the request.
1380 */
1381 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1382 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1383 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1384 break;
1385 }
1386
1387 return status;
1388}
1389
1390static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq,
1391 u32 frame_index)
1392{
1393 struct isci_host *ihost = ireq->owning_controller;
1394 struct dev_to_host_fis *frame_header;
1395 enum sci_status status;
1396 u32 *frame_buffer;
1397
1398 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1399 frame_index,
1400 (void **)&frame_header);
1401
1402 if ((status == SCI_SUCCESS) &&
1403 (frame_header->fis_type == FIS_REGD2H)) {
1404 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1405 frame_index,
1406 (void **)&frame_buffer);
1407
1408 sci_controller_copy_sata_response(&ireq->stp.rsp,
1409 frame_header,
1410 frame_buffer);
1411 }
1412
1413 sci_controller_release_frame(ihost, frame_index);
1414
1415 return status;
1416}
1417
1418enum sci_status
1419sci_io_request_frame_handler(struct isci_request *ireq,
1420 u32 frame_index)
1421{
1422 struct isci_host *ihost = ireq->owning_controller;
1423 struct isci_stp_request *stp_req = &ireq->stp.req;
1424 enum sci_base_request_states state;
1425 enum sci_status status;
1426 ssize_t word_cnt;
1427
1428 state = ireq->sm.current_state_id;
1429 switch (state) {
1430 case SCI_REQ_STARTED: {
1431 struct ssp_frame_hdr ssp_hdr;
1432 void *frame_header;
1433
1434 sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1435 frame_index,
1436 &frame_header);
1437
1438 word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32);
1439 sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt);
1440
1441 if (ssp_hdr.frame_type == SSP_RESPONSE) {
1442 struct ssp_response_iu *resp_iu;
1443 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
1444
1445 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1446 frame_index,
1447 (void **)&resp_iu);
1448
1449 sci_swab32_cpy(&ireq->ssp.rsp, resp_iu, word_cnt);
1450
1451 resp_iu = &ireq->ssp.rsp;
1452
1453 if (resp_iu->datapres == 0x01 ||
1454 resp_iu->datapres == 0x02) {
1455 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1456 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1457 } else {
1458 ireq->scu_status = SCU_TASK_DONE_GOOD;
1459 ireq->sci_status = SCI_SUCCESS;
1460 }
1461 } else {
1462 /* not a response frame, why did it get forwarded? */
1463 dev_err(&ihost->pdev->dev,
1464 "%s: SCIC IO Request 0x%p received unexpected "
1465 "frame %d type 0x%02x\n", __func__, ireq,
1466 frame_index, ssp_hdr.frame_type);
1467 }
1468
1469 /*
1470 * In any case we are done with this frame buffer return it to
1471 * the controller
1472 */
1473 sci_controller_release_frame(ihost, frame_index);
1474
1475 return SCI_SUCCESS;
1476 }
1477
1478 case SCI_REQ_TASK_WAIT_TC_RESP:
1479 sci_io_request_copy_response(ireq);
1480 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1481 sci_controller_release_frame(ihost, frame_index);
1482 return SCI_SUCCESS;
1483
1484 case SCI_REQ_SMP_WAIT_RESP: {
1485 struct smp_resp *rsp_hdr = &ireq->smp.rsp;
1486 void *frame_header;
1487
1488 sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1489 frame_index,
1490 &frame_header);
1491
1492 /* byte swap the header. */
1493 word_cnt = SMP_RESP_HDR_SZ / sizeof(u32);
1494 sci_swab32_cpy(rsp_hdr, frame_header, word_cnt);
1495
1496 if (rsp_hdr->frame_type == SMP_RESPONSE) {
1497 void *smp_resp;
1498
1499 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1500 frame_index,
1501 &smp_resp);
1502
1503 word_cnt = (sizeof(struct smp_resp) - SMP_RESP_HDR_SZ) /
1504 sizeof(u32);
1505
1506 sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ,
1507 smp_resp, word_cnt);
1508
1509 ireq->scu_status = SCU_TASK_DONE_GOOD;
1510 ireq->sci_status = SCI_SUCCESS;
1511 sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP);
1512 } else {
1513 /*
1514 * This was not a response frame why did it get
1515 * forwarded?
1516 */
1517 dev_err(&ihost->pdev->dev,
1518 "%s: SCIC SMP Request 0x%p received unexpected "
1519 "frame %d type 0x%02x\n",
1520 __func__,
1521 ireq,
1522 frame_index,
1523 rsp_hdr->frame_type);
1524
1525 ireq->scu_status = SCU_TASK_DONE_SMP_FRM_TYPE_ERR;
1526 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1527 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1528 }
1529
1530 sci_controller_release_frame(ihost, frame_index);
1531
1532 return SCI_SUCCESS;
1533 }
1534
1535 case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
1536 return sci_stp_request_udma_general_frame_handler(ireq,
1537 frame_index);
1538
1539 case SCI_REQ_STP_UDMA_WAIT_D2H:
1540 /* Use the general frame handler to copy the resposne data */
1541 status = sci_stp_request_udma_general_frame_handler(ireq, frame_index);
1542
1543 if (status != SCI_SUCCESS)
1544 return status;
1545
1546 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1547 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1548 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1549 return SCI_SUCCESS;
1550
1551 case SCI_REQ_STP_NON_DATA_WAIT_D2H: {
1552 struct dev_to_host_fis *frame_header;
1553 u32 *frame_buffer;
1554
1555 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1556 frame_index,
1557 (void **)&frame_header);
1558
1559 if (status != SCI_SUCCESS) {
1560 dev_err(&ihost->pdev->dev,
1561 "%s: SCIC IO Request 0x%p could not get frame "
1562 "header for frame index %d, status %x\n",
1563 __func__,
1564 stp_req,
1565 frame_index,
1566 status);
1567
1568 return status;
1569 }
1570
1571 switch (frame_header->fis_type) {
1572 case FIS_REGD2H:
1573 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1574 frame_index,
1575 (void **)&frame_buffer);
1576
1577 sci_controller_copy_sata_response(&ireq->stp.rsp,
1578 frame_header,
1579 frame_buffer);
1580
1581 /* The command has completed with error */
1582 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1583 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1584 break;
1585
1586 default:
1587 dev_warn(&ihost->pdev->dev,
1588 "%s: IO Request:0x%p Frame Id:%d protocol "
1589 "violation occurred\n", __func__, stp_req,
1590 frame_index);
1591
1592 ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS;
1593 ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION;
1594 break;
1595 }
1596
1597 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1598
1599 /* Frame has been decoded return it to the controller */
1600 sci_controller_release_frame(ihost, frame_index);
1601
1602 return status;
1603 }
1604
1605 case SCI_REQ_STP_PIO_WAIT_FRAME: {
1606 struct sas_task *task = isci_request_access_task(ireq);
1607 struct dev_to_host_fis *frame_header;
1608 u32 *frame_buffer;
1609
1610 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1611 frame_index,
1612 (void **)&frame_header);
1613
1614 if (status != SCI_SUCCESS) {
1615 dev_err(&ihost->pdev->dev,
1616 "%s: SCIC IO Request 0x%p could not get frame "
1617 "header for frame index %d, status %x\n",
1618 __func__, stp_req, frame_index, status);
1619 return status;
1620 }
1621
1622 switch (frame_header->fis_type) {
1623 case FIS_PIO_SETUP:
1624 /* Get from the frame buffer the PIO Setup Data */
1625 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1626 frame_index,
1627 (void **)&frame_buffer);
1628
1629 /* Get the data from the PIO Setup The SCU Hardware
1630 * returns first word in the frame_header and the rest
1631 * of the data is in the frame buffer so we need to
1632 * back up one dword
1633 */
1634
1635 /* transfer_count: first 16bits in the 4th dword */
1636 stp_req->pio_len = frame_buffer[3] & 0xffff;
1637
1638 /* status: 4th byte in the 3rd dword */
1639 stp_req->status = (frame_buffer[2] >> 24) & 0xff;
1640
1641 sci_controller_copy_sata_response(&ireq->stp.rsp,
1642 frame_header,
1643 frame_buffer);
1644
1645 ireq->stp.rsp.status = stp_req->status;
1646
1647 /* The next state is dependent on whether the
1648 * request was PIO Data-in or Data out
1649 */
1650 if (task->data_dir == DMA_FROM_DEVICE) {
1651 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN);
1652 } else if (task->data_dir == DMA_TO_DEVICE) {
1653 /* Transmit data */
1654 status = sci_stp_request_pio_data_out_transmit_data(ireq);
1655 if (status != SCI_SUCCESS)
1656 break;
1657 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT);
1658 }
1659 break;
1660
1661 case FIS_SETDEVBITS:
1662 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1663 break;
1664
1665 case FIS_REGD2H:
1666 if (frame_header->status & ATA_BUSY) {
1667 /*
1668 * Now why is the drive sending a D2H Register
1669 * FIS when it is still busy? Do nothing since
1670 * we are still in the right state.
1671 */
1672 dev_dbg(&ihost->pdev->dev,
1673 "%s: SCIC PIO Request 0x%p received "
1674 "D2H Register FIS with BSY status "
1675 "0x%x\n",
1676 __func__,
1677 stp_req,
1678 frame_header->status);
1679 break;
1680 }
1681
1682 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1683 frame_index,
1684 (void **)&frame_buffer);
1685
1686 sci_controller_copy_sata_response(&ireq->stp.req,
1687 frame_header,
1688 frame_buffer);
1689
1690 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1691 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1692 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1693 break;
1694
1695 default:
1696 /* FIXME: what do we do here? */
1697 break;
1698 }
1699
1700 /* Frame is decoded return it to the controller */
1701 sci_controller_release_frame(ihost, frame_index);
1702
1703 return status;
1704 }
1705
1706 case SCI_REQ_STP_PIO_DATA_IN: {
1707 struct dev_to_host_fis *frame_header;
1708 struct sata_fis_data *frame_buffer;
1709
1710 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1711 frame_index,
1712 (void **)&frame_header);
1713
1714 if (status != SCI_SUCCESS) {
1715 dev_err(&ihost->pdev->dev,
1716 "%s: SCIC IO Request 0x%p could not get frame "
1717 "header for frame index %d, status %x\n",
1718 __func__,
1719 stp_req,
1720 frame_index,
1721 status);
1722 return status;
1723 }
1724
1725 if (frame_header->fis_type != FIS_DATA) {
1726 dev_err(&ihost->pdev->dev,
1727 "%s: SCIC PIO Request 0x%p received frame %d "
1728 "with fis type 0x%02x when expecting a data "
1729 "fis.\n",
1730 __func__,
1731 stp_req,
1732 frame_index,
1733 frame_header->fis_type);
1734
1735 ireq->scu_status = SCU_TASK_DONE_GOOD;
1736 ireq->sci_status = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT;
1737 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1738
1739 /* Frame is decoded return it to the controller */
1740 sci_controller_release_frame(ihost, frame_index);
1741 return status;
1742 }
1743
1744 if (stp_req->sgl.index < 0) {
1745 ireq->saved_rx_frame_index = frame_index;
1746 stp_req->pio_len = 0;
1747 } else {
1748 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1749 frame_index,
1750 (void **)&frame_buffer);
1751
1752 status = sci_stp_request_pio_data_in_copy_data(stp_req,
1753 (u8 *)frame_buffer);
1754
1755 /* Frame is decoded return it to the controller */
1756 sci_controller_release_frame(ihost, frame_index);
1757 }
1758
1759 /* Check for the end of the transfer, are there more
1760 * bytes remaining for this data transfer
1761 */
1762 if (status != SCI_SUCCESS || stp_req->pio_len != 0)
1763 return status;
1764
1765 if ((stp_req->status & ATA_BUSY) == 0) {
1766 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1767 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1768 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1769 } else {
1770 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1771 }
1772 return status;
1773 }
1774
1775 case SCI_REQ_STP_SOFT_RESET_WAIT_D2H: {
1776 struct dev_to_host_fis *frame_header;
1777 u32 *frame_buffer;
1778
1779 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1780 frame_index,
1781 (void **)&frame_header);
1782 if (status != SCI_SUCCESS) {
1783 dev_err(&ihost->pdev->dev,
1784 "%s: SCIC IO Request 0x%p could not get frame "
1785 "header for frame index %d, status %x\n",
1786 __func__,
1787 stp_req,
1788 frame_index,
1789 status);
1790 return status;
1791 }
1792
1793 switch (frame_header->fis_type) {
1794 case FIS_REGD2H:
1795 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1796 frame_index,
1797 (void **)&frame_buffer);
1798
1799 sci_controller_copy_sata_response(&ireq->stp.rsp,
1800 frame_header,
1801 frame_buffer);
1802
1803 /* The command has completed with error */
1804 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1805 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1806 break;
1807
1808 default:
1809 dev_warn(&ihost->pdev->dev,
1810 "%s: IO Request:0x%p Frame Id:%d protocol "
1811 "violation occurred\n",
1812 __func__,
1813 stp_req,
1814 frame_index);
1815
1816 ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS;
1817 ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION;
1818 break;
1819 }
1820
1821 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1822
1823 /* Frame has been decoded return it to the controller */
1824 sci_controller_release_frame(ihost, frame_index);
1825
1826 return status;
1827 }
1828 case SCI_REQ_ABORTING:
1829 /*
1830 * TODO: Is it even possible to get an unsolicited frame in the
1831 * aborting state?
1832 */
1833 sci_controller_release_frame(ihost, frame_index);
1834 return SCI_SUCCESS;
1835
1836 default:
1837 dev_warn(&ihost->pdev->dev,
1838 "%s: SCIC IO Request given unexpected frame %x while "
1839 "in state %d\n",
1840 __func__,
1841 frame_index,
1842 state);
1843
1844 sci_controller_release_frame(ihost, frame_index);
1845 return SCI_FAILURE_INVALID_STATE;
1846 }
1847}
1848
1849static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq,
1850 u32 completion_code)
1851{
1852 enum sci_status status = SCI_SUCCESS;
1853
1854 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1855 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1856 ireq->scu_status = SCU_TASK_DONE_GOOD;
1857 ireq->sci_status = SCI_SUCCESS;
1858 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1859 break;
1860 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
1861 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
1862 /* We must check ther response buffer to see if the D2H
1863 * Register FIS was received before we got the TC
1864 * completion.
1865 */
1866 if (ireq->stp.rsp.fis_type == FIS_REGD2H) {
1867 sci_remote_device_suspend(ireq->target_device,
1868 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
1869
1870 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1871 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1872 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1873 } else {
1874 /* If we have an error completion status for the
1875 * TC then we can expect a D2H register FIS from
1876 * the device so we must change state to wait
1877 * for it
1878 */
1879 sci_change_state(&ireq->sm, SCI_REQ_STP_UDMA_WAIT_D2H);
1880 }
1881 break;
1882
1883 /* TODO Check to see if any of these completion status need to
1884 * wait for the device to host register fis.
1885 */
1886 /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR
1887 * - this comes only for B0
1888 */
1889 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN):
1890 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
1891 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
1892 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
1893 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR):
1894 sci_remote_device_suspend(ireq->target_device,
1895 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
1896 /* Fall through to the default case */
1897 default:
1898 /* All other completion status cause the IO to be complete. */
1899 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1900 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1901 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1902 break;
1903 }
1904
1905 return status;
1906}
1907
1908static enum sci_status
1909stp_request_soft_reset_await_h2d_asserted_tc_event(struct isci_request *ireq,
1910 u32 completion_code)
1911{
1912 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1913 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1914 ireq->scu_status = SCU_TASK_DONE_GOOD;
1915 ireq->sci_status = SCI_SUCCESS;
1916 sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG);
1917 break;
1918
1919 default:
1920 /*
1921 * All other completion status cause the IO to be complete.
1922 * If a NAK was received, then it is up to the user to retry
1923 * the request.
1924 */
1925 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1926 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1927 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1928 break;
1929 }
1930
1931 return SCI_SUCCESS;
1932}
1933
1934static enum sci_status
1935stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq,
1936 u32 completion_code)
1937{
1938 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1939 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1940 ireq->scu_status = SCU_TASK_DONE_GOOD;
1941 ireq->sci_status = SCI_SUCCESS;
1942 sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H);
1943 break;
1944
1945 default:
1946 /* All other completion status cause the IO to be complete. If
1947 * a NAK was received, then it is up to the user to retry the
1948 * request.
1949 */
1950 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1951 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1952 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1953 break;
1954 }
1955
1956 return SCI_SUCCESS;
1957}
1958
1959enum sci_status
1960sci_io_request_tc_completion(struct isci_request *ireq,
1961 u32 completion_code)
1962{
1963 enum sci_base_request_states state;
1964 struct isci_host *ihost = ireq->owning_controller;
1965
1966 state = ireq->sm.current_state_id;
1967
1968 switch (state) {
1969 case SCI_REQ_STARTED:
1970 return request_started_state_tc_event(ireq, completion_code);
1971
1972 case SCI_REQ_TASK_WAIT_TC_COMP:
1973 return ssp_task_request_await_tc_event(ireq,
1974 completion_code);
1975
1976 case SCI_REQ_SMP_WAIT_RESP:
1977 return smp_request_await_response_tc_event(ireq,
1978 completion_code);
1979
1980 case SCI_REQ_SMP_WAIT_TC_COMP:
1981 return smp_request_await_tc_event(ireq, completion_code);
1982
1983 case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
1984 return stp_request_udma_await_tc_event(ireq,
1985 completion_code);
1986
1987 case SCI_REQ_STP_NON_DATA_WAIT_H2D:
1988 return stp_request_non_data_await_h2d_tc_event(ireq,
1989 completion_code);
1990
1991 case SCI_REQ_STP_PIO_WAIT_H2D:
1992 return stp_request_pio_await_h2d_completion_tc_event(ireq,
1993 completion_code);
1994
1995 case SCI_REQ_STP_PIO_DATA_OUT:
1996 return pio_data_out_tx_done_tc_event(ireq, completion_code);
1997
1998 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
1999 return stp_request_soft_reset_await_h2d_asserted_tc_event(ireq,
2000 completion_code);
2001
2002 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
2003 return stp_request_soft_reset_await_h2d_diagnostic_tc_event(ireq,
2004 completion_code);
2005
2006 case SCI_REQ_ABORTING:
2007 return request_aborting_state_tc_event(ireq,
2008 completion_code);
2009
2010 default:
2011 dev_warn(&ihost->pdev->dev,
2012 "%s: SCIC IO Request given task completion "
2013 "notification %x while in wrong state %d\n",
2014 __func__,
2015 completion_code,
2016 state);
2017 return SCI_FAILURE_INVALID_STATE;
2018 }
2019}
2020
2021/**
2022 * isci_request_process_response_iu() - This function sets the status and
2023 * response iu, in the task struct, from the request object for the upper
2024 * layer driver.
2025 * @sas_task: This parameter is the task struct from the upper layer driver.
2026 * @resp_iu: This parameter points to the response iu of the completed request.
2027 * @dev: This parameter specifies the linux device struct.
2028 *
2029 * none.
2030 */
2031static void isci_request_process_response_iu(
2032 struct sas_task *task,
2033 struct ssp_response_iu *resp_iu,
2034 struct device *dev)
2035{
2036 dev_dbg(dev,
2037 "%s: resp_iu = %p "
2038 "resp_iu->status = 0x%x,\nresp_iu->datapres = %d "
2039 "resp_iu->response_data_len = %x, "
2040 "resp_iu->sense_data_len = %x\nrepsonse data: ",
2041 __func__,
2042 resp_iu,
2043 resp_iu->status,
2044 resp_iu->datapres,
2045 resp_iu->response_data_len,
2046 resp_iu->sense_data_len);
2047
2048 task->task_status.stat = resp_iu->status;
2049
2050 /* libsas updates the task status fields based on the response iu. */
2051 sas_ssp_task_response(dev, task, resp_iu);
2052}
2053
2054/**
2055 * isci_request_set_open_reject_status() - This function prepares the I/O
2056 * completion for OPEN_REJECT conditions.
2057 * @request: This parameter is the completed isci_request object.
2058 * @response_ptr: This parameter specifies the service response for the I/O.
2059 * @status_ptr: This parameter specifies the exec status for the I/O.
2060 * @complete_to_host_ptr: This parameter specifies the action to be taken by
2061 * the LLDD with respect to completing this request or forcing an abort
2062 * condition on the I/O.
2063 * @open_rej_reason: This parameter specifies the encoded reason for the
2064 * abandon-class reject.
2065 *
2066 * none.
2067 */
2068static void isci_request_set_open_reject_status(
2069 struct isci_request *request,
2070 struct sas_task *task,
2071 enum service_response *response_ptr,
2072 enum exec_status *status_ptr,
2073 enum isci_completion_selection *complete_to_host_ptr,
2074 enum sas_open_rej_reason open_rej_reason)
2075{
2076 /* Task in the target is done. */
2077 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2078 *response_ptr = SAS_TASK_UNDELIVERED;
2079 *status_ptr = SAS_OPEN_REJECT;
2080 *complete_to_host_ptr = isci_perform_normal_io_completion;
2081 task->task_status.open_rej_reason = open_rej_reason;
2082}
2083
2084/**
2085 * isci_request_handle_controller_specific_errors() - This function decodes
2086 * controller-specific I/O completion error conditions.
2087 * @request: This parameter is the completed isci_request object.
2088 * @response_ptr: This parameter specifies the service response for the I/O.
2089 * @status_ptr: This parameter specifies the exec status for the I/O.
2090 * @complete_to_host_ptr: This parameter specifies the action to be taken by
2091 * the LLDD with respect to completing this request or forcing an abort
2092 * condition on the I/O.
2093 *
2094 * none.
2095 */
2096static void isci_request_handle_controller_specific_errors(
2097 struct isci_remote_device *idev,
2098 struct isci_request *request,
2099 struct sas_task *task,
2100 enum service_response *response_ptr,
2101 enum exec_status *status_ptr,
2102 enum isci_completion_selection *complete_to_host_ptr)
2103{
2104 unsigned int cstatus;
2105
2106 cstatus = request->scu_status;
2107
2108 dev_dbg(&request->isci_host->pdev->dev,
2109 "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR "
2110 "- controller status = 0x%x\n",
2111 __func__, request, cstatus);
2112
2113 /* Decode the controller-specific errors; most
2114 * important is to recognize those conditions in which
2115 * the target may still have a task outstanding that
2116 * must be aborted.
2117 *
2118 * Note that there are SCU completion codes being
2119 * named in the decode below for which SCIC has already
2120 * done work to handle them in a way other than as
2121 * a controller-specific completion code; these are left
2122 * in the decode below for completeness sake.
2123 */
2124 switch (cstatus) {
2125 case SCU_TASK_DONE_DMASETUP_DIRERR:
2126 /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */
2127 case SCU_TASK_DONE_XFERCNT_ERR:
2128 /* Also SCU_TASK_DONE_SMP_UFI_ERR: */
2129 if (task->task_proto == SAS_PROTOCOL_SMP) {
2130 /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */
2131 *response_ptr = SAS_TASK_COMPLETE;
2132
2133 /* See if the device has been/is being stopped. Note
2134 * that we ignore the quiesce state, since we are
2135 * concerned about the actual device state.
2136 */
2137 if (!idev)
2138 *status_ptr = SAS_DEVICE_UNKNOWN;
2139 else
2140 *status_ptr = SAS_ABORTED_TASK;
2141
2142 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2143
2144 *complete_to_host_ptr =
2145 isci_perform_normal_io_completion;
2146 } else {
2147 /* Task in the target is not done. */
2148 *response_ptr = SAS_TASK_UNDELIVERED;
2149
2150 if (!idev)
2151 *status_ptr = SAS_DEVICE_UNKNOWN;
2152 else
2153 *status_ptr = SAM_STAT_TASK_ABORTED;
2154
2155 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2156
2157 *complete_to_host_ptr =
2158 isci_perform_error_io_completion;
2159 }
2160
2161 break;
2162
2163 case SCU_TASK_DONE_CRC_ERR:
2164 case SCU_TASK_DONE_NAK_CMD_ERR:
2165 case SCU_TASK_DONE_EXCESS_DATA:
2166 case SCU_TASK_DONE_UNEXP_FIS:
2167 /* Also SCU_TASK_DONE_UNEXP_RESP: */
2168 case SCU_TASK_DONE_VIIT_ENTRY_NV: /* TODO - conditions? */
2169 case SCU_TASK_DONE_IIT_ENTRY_NV: /* TODO - conditions? */
2170 case SCU_TASK_DONE_RNCNV_OUTBOUND: /* TODO - conditions? */
2171 /* These are conditions in which the target
2172 * has completed the task, so that no cleanup
2173 * is necessary.
2174 */
2175 *response_ptr = SAS_TASK_COMPLETE;
2176
2177 /* See if the device has been/is being stopped. Note
2178 * that we ignore the quiesce state, since we are
2179 * concerned about the actual device state.
2180 */
2181 if (!idev)
2182 *status_ptr = SAS_DEVICE_UNKNOWN;
2183 else
2184 *status_ptr = SAS_ABORTED_TASK;
2185
2186 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2187
2188 *complete_to_host_ptr = isci_perform_normal_io_completion;
2189 break;
2190
2191
2192 /* Note that the only open reject completion codes seen here will be
2193 * abandon-class codes; all others are automatically retried in the SCU.
2194 */
2195 case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
2196
2197 isci_request_set_open_reject_status(
2198 request, task, response_ptr, status_ptr,
2199 complete_to_host_ptr, SAS_OREJ_WRONG_DEST);
2200 break;
2201
2202 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
2203
2204 /* Note - the return of AB0 will change when
2205 * libsas implements detection of zone violations.
2206 */
2207 isci_request_set_open_reject_status(
2208 request, task, response_ptr, status_ptr,
2209 complete_to_host_ptr, SAS_OREJ_RESV_AB0);
2210 break;
2211
2212 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
2213
2214 isci_request_set_open_reject_status(
2215 request, task, response_ptr, status_ptr,
2216 complete_to_host_ptr, SAS_OREJ_RESV_AB1);
2217 break;
2218
2219 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
2220
2221 isci_request_set_open_reject_status(
2222 request, task, response_ptr, status_ptr,
2223 complete_to_host_ptr, SAS_OREJ_RESV_AB2);
2224 break;
2225
2226 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
2227
2228 isci_request_set_open_reject_status(
2229 request, task, response_ptr, status_ptr,
2230 complete_to_host_ptr, SAS_OREJ_RESV_AB3);
2231 break;
2232
2233 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
2234
2235 isci_request_set_open_reject_status(
2236 request, task, response_ptr, status_ptr,
2237 complete_to_host_ptr, SAS_OREJ_BAD_DEST);
2238 break;
2239
2240 case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
2241
2242 isci_request_set_open_reject_status(
2243 request, task, response_ptr, status_ptr,
2244 complete_to_host_ptr, SAS_OREJ_STP_NORES);
2245 break;
2246
2247 case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
2248
2249 isci_request_set_open_reject_status(
2250 request, task, response_ptr, status_ptr,
2251 complete_to_host_ptr, SAS_OREJ_EPROTO);
2252 break;
2253
2254 case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
2255
2256 isci_request_set_open_reject_status(
2257 request, task, response_ptr, status_ptr,
2258 complete_to_host_ptr, SAS_OREJ_CONN_RATE);
2259 break;
2260
2261 case SCU_TASK_DONE_LL_R_ERR:
2262 /* Also SCU_TASK_DONE_ACK_NAK_TO: */
2263 case SCU_TASK_DONE_LL_PERR:
2264 case SCU_TASK_DONE_LL_SY_TERM:
2265 /* Also SCU_TASK_DONE_NAK_ERR:*/
2266 case SCU_TASK_DONE_LL_LF_TERM:
2267 /* Also SCU_TASK_DONE_DATA_LEN_ERR: */
2268 case SCU_TASK_DONE_LL_ABORT_ERR:
2269 case SCU_TASK_DONE_SEQ_INV_TYPE:
2270 /* Also SCU_TASK_DONE_UNEXP_XR: */
2271 case SCU_TASK_DONE_XR_IU_LEN_ERR:
2272 case SCU_TASK_DONE_INV_FIS_LEN:
2273 /* Also SCU_TASK_DONE_XR_WD_LEN: */
2274 case SCU_TASK_DONE_SDMA_ERR:
2275 case SCU_TASK_DONE_OFFSET_ERR:
2276 case SCU_TASK_DONE_MAX_PLD_ERR:
2277 case SCU_TASK_DONE_LF_ERR:
2278 case SCU_TASK_DONE_SMP_RESP_TO_ERR: /* Escalate to dev reset? */
2279 case SCU_TASK_DONE_SMP_LL_RX_ERR:
2280 case SCU_TASK_DONE_UNEXP_DATA:
2281 case SCU_TASK_DONE_UNEXP_SDBFIS:
2282 case SCU_TASK_DONE_REG_ERR:
2283 case SCU_TASK_DONE_SDB_ERR:
2284 case SCU_TASK_DONE_TASK_ABORT:
2285 default:
2286 /* Task in the target is not done. */
2287 *response_ptr = SAS_TASK_UNDELIVERED;
2288 *status_ptr = SAM_STAT_TASK_ABORTED;
2289
2290 if (task->task_proto == SAS_PROTOCOL_SMP) {
2291 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2292
2293 *complete_to_host_ptr = isci_perform_normal_io_completion;
2294 } else {
2295 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2296
2297 *complete_to_host_ptr = isci_perform_error_io_completion;
2298 }
2299 break;
2300 }
2301}
2302
2303/**
2304 * isci_task_save_for_upper_layer_completion() - This function saves the
2305 * request for later completion to the upper layer driver.
2306 * @host: This parameter is a pointer to the host on which the the request
2307 * should be queued (either as an error or success).
2308 * @request: This parameter is the completed request.
2309 * @response: This parameter is the response code for the completed task.
2310 * @status: This parameter is the status code for the completed task.
2311 *
2312 * none.
2313 */
2314static void isci_task_save_for_upper_layer_completion(
2315 struct isci_host *host,
2316 struct isci_request *request,
2317 enum service_response response,
2318 enum exec_status status,
2319 enum isci_completion_selection task_notification_selection)
2320{
2321 struct sas_task *task = isci_request_access_task(request);
2322
2323 task_notification_selection
2324 = isci_task_set_completion_status(task, response, status,
2325 task_notification_selection);
2326
2327 /* Tasks aborted specifically by a call to the lldd_abort_task
2328 * function should not be completed to the host in the regular path.
2329 */
2330 switch (task_notification_selection) {
2331
2332 case isci_perform_normal_io_completion:
2333
2334 /* Normal notification (task_done) */
2335 dev_dbg(&host->pdev->dev,
2336 "%s: Normal - task = %p, response=%d (%d), status=%d (%d)\n",
2337 __func__,
2338 task,
2339 task->task_status.resp, response,
2340 task->task_status.stat, status);
2341 /* Add to the completed list. */
2342 list_add(&request->completed_node,
2343 &host->requests_to_complete);
2344
2345 /* Take the request off the device's pending request list. */
2346 list_del_init(&request->dev_node);
2347 break;
2348
2349 case isci_perform_aborted_io_completion:
2350 /* No notification to libsas because this request is
2351 * already in the abort path.
2352 */
2353 dev_dbg(&host->pdev->dev,
2354 "%s: Aborted - task = %p, response=%d (%d), status=%d (%d)\n",
2355 __func__,
2356 task,
2357 task->task_status.resp, response,
2358 task->task_status.stat, status);
2359
2360 /* Wake up whatever process was waiting for this
2361 * request to complete.
2362 */
2363 WARN_ON(request->io_request_completion == NULL);
2364
2365 if (request->io_request_completion != NULL) {
2366
2367 /* Signal whoever is waiting that this
2368 * request is complete.
2369 */
2370 complete(request->io_request_completion);
2371 }
2372 break;
2373
2374 case isci_perform_error_io_completion:
2375 /* Use sas_task_abort */
2376 dev_dbg(&host->pdev->dev,
2377 "%s: Error - task = %p, response=%d (%d), status=%d (%d)\n",
2378 __func__,
2379 task,
2380 task->task_status.resp, response,
2381 task->task_status.stat, status);
2382 /* Add to the aborted list. */
2383 list_add(&request->completed_node,
2384 &host->requests_to_errorback);
2385 break;
2386
2387 default:
2388 dev_dbg(&host->pdev->dev,
2389 "%s: Unknown - task = %p, response=%d (%d), status=%d (%d)\n",
2390 __func__,
2391 task,
2392 task->task_status.resp, response,
2393 task->task_status.stat, status);
2394
2395 /* Add to the error to libsas list. */
2396 list_add(&request->completed_node,
2397 &host->requests_to_errorback);
2398 break;
2399 }
2400}
2401
2402static void isci_request_process_stp_response(struct sas_task *task,
2403 void *response_buffer)
2404{
2405 struct dev_to_host_fis *d2h_reg_fis = response_buffer;
2406 struct task_status_struct *ts = &task->task_status;
2407 struct ata_task_resp *resp = (void *)&ts->buf[0];
2408
2409 resp->frame_len = le16_to_cpu(*(__le16 *)(response_buffer + 6));
2410 memcpy(&resp->ending_fis[0], response_buffer + 16, 24);
2411 ts->buf_valid_size = sizeof(*resp);
2412
2413 /**
2414 * If the device fault bit is set in the status register, then
2415 * set the sense data and return.
2416 */
2417 if (d2h_reg_fis->status & ATA_DF)
2418 ts->stat = SAS_PROTO_RESPONSE;
2419 else
2420 ts->stat = SAM_STAT_GOOD;
2421
2422 ts->resp = SAS_TASK_COMPLETE;
2423}
2424
2425static void isci_request_io_request_complete(struct isci_host *ihost,
2426 struct isci_request *request,
2427 enum sci_io_status completion_status)
2428{
2429 struct sas_task *task = isci_request_access_task(request);
2430 struct ssp_response_iu *resp_iu;
2431 void *resp_buf;
2432 unsigned long task_flags;
2433 struct isci_remote_device *idev = isci_lookup_device(task->dev);
2434 enum service_response response = SAS_TASK_UNDELIVERED;
2435 enum exec_status status = SAS_ABORTED_TASK;
2436 enum isci_request_status request_status;
2437 enum isci_completion_selection complete_to_host
2438 = isci_perform_normal_io_completion;
2439
2440 dev_dbg(&ihost->pdev->dev,
2441 "%s: request = %p, task = %p,\n"
2442 "task->data_dir = %d completion_status = 0x%x\n",
2443 __func__,
2444 request,
2445 task,
2446 task->data_dir,
2447 completion_status);
2448
2449 spin_lock(&request->state_lock);
2450 request_status = request->status;
2451
2452 /* Decode the request status. Note that if the request has been
2453 * aborted by a task management function, we don't care
2454 * what the status is.
2455 */
2456 switch (request_status) {
2457
2458 case aborted:
2459 /* "aborted" indicates that the request was aborted by a task
2460 * management function, since once a task management request is
2461 * perfomed by the device, the request only completes because
2462 * of the subsequent driver terminate.
2463 *
2464 * Aborted also means an external thread is explicitly managing
2465 * this request, so that we do not complete it up the stack.
2466 *
2467 * The target is still there (since the TMF was successful).
2468 */
2469 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2470 response = SAS_TASK_COMPLETE;
2471
2472 /* See if the device has been/is being stopped. Note
2473 * that we ignore the quiesce state, since we are
2474 * concerned about the actual device state.
2475 */
2476 if (!idev)
2477 status = SAS_DEVICE_UNKNOWN;
2478 else
2479 status = SAS_ABORTED_TASK;
2480
2481 complete_to_host = isci_perform_aborted_io_completion;
2482 /* This was an aborted request. */
2483
2484 spin_unlock(&request->state_lock);
2485 break;
2486
2487 case aborting:
2488 /* aborting means that the task management function tried and
2489 * failed to abort the request. We need to note the request
2490 * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the
2491 * target as down.
2492 *
2493 * Aborting also means an external thread is explicitly managing
2494 * this request, so that we do not complete it up the stack.
2495 */
2496 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2497 response = SAS_TASK_UNDELIVERED;
2498
2499 if (!idev)
2500 /* The device has been /is being stopped. Note that
2501 * we ignore the quiesce state, since we are
2502 * concerned about the actual device state.
2503 */
2504 status = SAS_DEVICE_UNKNOWN;
2505 else
2506 status = SAS_PHY_DOWN;
2507
2508 complete_to_host = isci_perform_aborted_io_completion;
2509
2510 /* This was an aborted request. */
2511
2512 spin_unlock(&request->state_lock);
2513 break;
2514
2515 case terminating:
2516
2517 /* This was an terminated request. This happens when
2518 * the I/O is being terminated because of an action on
2519 * the device (reset, tear down, etc.), and the I/O needs
2520 * to be completed up the stack.
2521 */
2522 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2523 response = SAS_TASK_UNDELIVERED;
2524
2525 /* See if the device has been/is being stopped. Note
2526 * that we ignore the quiesce state, since we are
2527 * concerned about the actual device state.
2528 */
2529 if (!idev)
2530 status = SAS_DEVICE_UNKNOWN;
2531 else
2532 status = SAS_ABORTED_TASK;
2533
2534 complete_to_host = isci_perform_aborted_io_completion;
2535
2536 /* This was a terminated request. */
2537
2538 spin_unlock(&request->state_lock);
2539 break;
2540
2541 case dead:
2542 /* This was a terminated request that timed-out during the
2543 * termination process. There is no task to complete to
2544 * libsas.
2545 */
2546 complete_to_host = isci_perform_normal_io_completion;
2547 spin_unlock(&request->state_lock);
2548 break;
2549
2550 default:
2551
2552 /* The request is done from an SCU HW perspective. */
2553 request->status = completed;
2554
2555 spin_unlock(&request->state_lock);
2556
2557 /* This is an active request being completed from the core. */
2558 switch (completion_status) {
2559
2560 case SCI_IO_FAILURE_RESPONSE_VALID:
2561 dev_dbg(&ihost->pdev->dev,
2562 "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
2563 __func__,
2564 request,
2565 task);
2566
2567 if (sas_protocol_ata(task->task_proto)) {
2568 resp_buf = &request->stp.rsp;
2569 isci_request_process_stp_response(task,
2570 resp_buf);
2571 } else if (SAS_PROTOCOL_SSP == task->task_proto) {
2572
2573 /* crack the iu response buffer. */
2574 resp_iu = &request->ssp.rsp;
2575 isci_request_process_response_iu(task, resp_iu,
2576 &ihost->pdev->dev);
2577
2578 } else if (SAS_PROTOCOL_SMP == task->task_proto) {
2579
2580 dev_err(&ihost->pdev->dev,
2581 "%s: SCI_IO_FAILURE_RESPONSE_VALID: "
2582 "SAS_PROTOCOL_SMP protocol\n",
2583 __func__);
2584
2585 } else
2586 dev_err(&ihost->pdev->dev,
2587 "%s: unknown protocol\n", __func__);
2588
2589 /* use the task status set in the task struct by the
2590 * isci_request_process_response_iu call.
2591 */
2592 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2593 response = task->task_status.resp;
2594 status = task->task_status.stat;
2595 break;
2596
2597 case SCI_IO_SUCCESS:
2598 case SCI_IO_SUCCESS_IO_DONE_EARLY:
2599
2600 response = SAS_TASK_COMPLETE;
2601 status = SAM_STAT_GOOD;
2602 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2603
2604 if (task->task_proto == SAS_PROTOCOL_SMP) {
2605 void *rsp = &request->smp.rsp;
2606
2607 dev_dbg(&ihost->pdev->dev,
2608 "%s: SMP protocol completion\n",
2609 __func__);
2610
2611 sg_copy_from_buffer(
2612 &task->smp_task.smp_resp, 1,
2613 rsp, sizeof(struct smp_resp));
2614 } else if (completion_status
2615 == SCI_IO_SUCCESS_IO_DONE_EARLY) {
2616
2617 /* This was an SSP / STP / SATA transfer.
2618 * There is a possibility that less data than
2619 * the maximum was transferred.
2620 */
2621 u32 transferred_length = sci_req_tx_bytes(request);
2622
2623 task->task_status.residual
2624 = task->total_xfer_len - transferred_length;
2625
2626 /* If there were residual bytes, call this an
2627 * underrun.
2628 */
2629 if (task->task_status.residual != 0)
2630 status = SAS_DATA_UNDERRUN;
2631
2632 dev_dbg(&ihost->pdev->dev,
2633 "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
2634 __func__,
2635 status);
2636
2637 } else
2638 dev_dbg(&ihost->pdev->dev,
2639 "%s: SCI_IO_SUCCESS\n",
2640 __func__);
2641
2642 break;
2643
2644 case SCI_IO_FAILURE_TERMINATED:
2645 dev_dbg(&ihost->pdev->dev,
2646 "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n",
2647 __func__,
2648 request,
2649 task);
2650
2651 /* The request was terminated explicitly. No handling
2652 * is needed in the SCSI error handler path.
2653 */
2654 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2655 response = SAS_TASK_UNDELIVERED;
2656
2657 /* See if the device has been/is being stopped. Note
2658 * that we ignore the quiesce state, since we are
2659 * concerned about the actual device state.
2660 */
2661 if (!idev)
2662 status = SAS_DEVICE_UNKNOWN;
2663 else
2664 status = SAS_ABORTED_TASK;
2665
2666 complete_to_host = isci_perform_normal_io_completion;
2667 break;
2668
2669 case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
2670
2671 isci_request_handle_controller_specific_errors(
2672 idev, request, task, &response, &status,
2673 &complete_to_host);
2674
2675 break;
2676
2677 case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
2678 /* This is a special case, in that the I/O completion
2679 * is telling us that the device needs a reset.
2680 * In order for the device reset condition to be
2681 * noticed, the I/O has to be handled in the error
2682 * handler. Set the reset flag and cause the
2683 * SCSI error thread to be scheduled.
2684 */
2685 spin_lock_irqsave(&task->task_state_lock, task_flags);
2686 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
2687 spin_unlock_irqrestore(&task->task_state_lock, task_flags);
2688
2689 /* Fail the I/O. */
2690 response = SAS_TASK_UNDELIVERED;
2691 status = SAM_STAT_TASK_ABORTED;
2692
2693 complete_to_host = isci_perform_error_io_completion;
2694 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2695 break;
2696
2697 case SCI_FAILURE_RETRY_REQUIRED:
2698
2699 /* Fail the I/O so it can be retried. */
2700 response = SAS_TASK_UNDELIVERED;
2701 if (!idev)
2702 status = SAS_DEVICE_UNKNOWN;
2703 else
2704 status = SAS_ABORTED_TASK;
2705
2706 complete_to_host = isci_perform_normal_io_completion;
2707 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2708 break;
2709
2710
2711 default:
2712 /* Catch any otherwise unhandled error codes here. */
2713 dev_dbg(&ihost->pdev->dev,
2714 "%s: invalid completion code: 0x%x - "
2715 "isci_request = %p\n",
2716 __func__, completion_status, request);
2717
2718 response = SAS_TASK_UNDELIVERED;
2719
2720 /* See if the device has been/is being stopped. Note
2721 * that we ignore the quiesce state, since we are
2722 * concerned about the actual device state.
2723 */
2724 if (!idev)
2725 status = SAS_DEVICE_UNKNOWN;
2726 else
2727 status = SAS_ABORTED_TASK;
2728
2729 if (SAS_PROTOCOL_SMP == task->task_proto) {
2730 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2731 complete_to_host = isci_perform_normal_io_completion;
2732 } else {
2733 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2734 complete_to_host = isci_perform_error_io_completion;
2735 }
2736 break;
2737 }
2738 break;
2739 }
2740
2741 switch (task->task_proto) {
2742 case SAS_PROTOCOL_SSP:
2743 if (task->data_dir == DMA_NONE)
2744 break;
2745 if (task->num_scatter == 0)
2746 /* 0 indicates a single dma address */
2747 dma_unmap_single(&ihost->pdev->dev,
2748 request->zero_scatter_daddr,
2749 task->total_xfer_len, task->data_dir);
2750 else /* unmap the sgl dma addresses */
2751 dma_unmap_sg(&ihost->pdev->dev, task->scatter,
2752 request->num_sg_entries, task->data_dir);
2753 break;
2754 case SAS_PROTOCOL_SMP: {
2755 struct scatterlist *sg = &task->smp_task.smp_req;
2756 struct smp_req *smp_req;
2757 void *kaddr;
2758
2759 dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE);
2760
2761 /* need to swab it back in case the command buffer is re-used */
2762 kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
2763 smp_req = kaddr + sg->offset;
2764 sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
2765 kunmap_atomic(kaddr, KM_IRQ0);
2766 break;
2767 }
2768 default:
2769 break;
2770 }
2771
2772 /* Put the completed request on the correct list */
2773 isci_task_save_for_upper_layer_completion(ihost, request, response,
2774 status, complete_to_host
2775 );
2776
2777 /* complete the io request to the core. */
2778 sci_controller_complete_io(ihost, request->target_device, request);
2779 isci_put_device(idev);
2780
2781 /* set terminated handle so it cannot be completed or
2782 * terminated again, and to cause any calls into abort
2783 * task to recognize the already completed case.
2784 */
2785 set_bit(IREQ_TERMINATED, &request->flags);
2786}
2787
2788static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
2789{
2790 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2791 struct domain_device *dev = ireq->target_device->domain_dev;
2792 struct sas_task *task;
2793
2794 /* XXX as hch said always creating an internal sas_task for tmf
2795 * requests would simplify the driver
2796 */
2797 task = ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL;
2798
2799 /* all unaccelerated request types (non ssp or ncq) handled with
2800 * substates
2801 */
2802 if (!task && dev->dev_type == SAS_END_DEV) {
2803 sci_change_state(sm, SCI_REQ_TASK_WAIT_TC_COMP);
2804 } else if (!task &&
2805 (isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high ||
2806 isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) {
2807 sci_change_state(sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED);
2808 } else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
2809 sci_change_state(sm, SCI_REQ_SMP_WAIT_RESP);
2810 } else if (task && sas_protocol_ata(task->task_proto) &&
2811 !task->ata_task.use_ncq) {
2812 u32 state;
2813
2814 if (task->data_dir == DMA_NONE)
2815 state = SCI_REQ_STP_NON_DATA_WAIT_H2D;
2816 else if (task->ata_task.dma_xfer)
2817 state = SCI_REQ_STP_UDMA_WAIT_TC_COMP;
2818 else /* PIO */
2819 state = SCI_REQ_STP_PIO_WAIT_H2D;
2820
2821 sci_change_state(sm, state);
2822 }
2823}
2824
2825static void sci_request_completed_state_enter(struct sci_base_state_machine *sm)
2826{
2827 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2828 struct isci_host *ihost = ireq->owning_controller;
2829
2830 /* Tell the SCI_USER that the IO request is complete */
2831 if (!test_bit(IREQ_TMF, &ireq->flags))
2832 isci_request_io_request_complete(ihost, ireq,
2833 ireq->sci_status);
2834 else
2835 isci_task_request_complete(ihost, ireq, ireq->sci_status);
2836}
2837
2838static void sci_request_aborting_state_enter(struct sci_base_state_machine *sm)
2839{
2840 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2841
2842 /* Setting the abort bit in the Task Context is required by the silicon. */
2843 ireq->tc->abort = 1;
2844}
2845
2846static void sci_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm)
2847{
2848 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2849
2850 ireq->target_device->working_request = ireq;
2851}
2852
2853static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm)
2854{
2855 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2856
2857 ireq->target_device->working_request = ireq;
2858}
2859
2860static void sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm)
2861{
2862 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2863
2864 ireq->target_device->working_request = ireq;
2865}
2866
2867static void sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm)
2868{
2869 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2870 struct scu_task_context *tc = ireq->tc;
2871 struct host_to_dev_fis *h2d_fis;
2872 enum sci_status status;
2873
2874 /* Clear the SRST bit */
2875 h2d_fis = &ireq->stp.cmd;
2876 h2d_fis->control = 0;
2877
2878 /* Clear the TC control bit */
2879 tc->control_frame = 0;
2880
2881 status = sci_controller_continue_io(ireq);
2882 WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n");
2883}
2884
2885static const struct sci_base_state sci_request_state_table[] = {
2886 [SCI_REQ_INIT] = { },
2887 [SCI_REQ_CONSTRUCTED] = { },
2888 [SCI_REQ_STARTED] = {
2889 .enter_state = sci_request_started_state_enter,
2890 },
2891 [SCI_REQ_STP_NON_DATA_WAIT_H2D] = {
2892 .enter_state = sci_stp_request_started_non_data_await_h2d_completion_enter,
2893 },
2894 [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { },
2895 [SCI_REQ_STP_PIO_WAIT_H2D] = {
2896 .enter_state = sci_stp_request_started_pio_await_h2d_completion_enter,
2897 },
2898 [SCI_REQ_STP_PIO_WAIT_FRAME] = { },
2899 [SCI_REQ_STP_PIO_DATA_IN] = { },
2900 [SCI_REQ_STP_PIO_DATA_OUT] = { },
2901 [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { },
2902 [SCI_REQ_STP_UDMA_WAIT_D2H] = { },
2903 [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED] = {
2904 .enter_state = sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
2905 },
2906 [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG] = {
2907 .enter_state = sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
2908 },
2909 [SCI_REQ_STP_SOFT_RESET_WAIT_D2H] = { },
2910 [SCI_REQ_TASK_WAIT_TC_COMP] = { },
2911 [SCI_REQ_TASK_WAIT_TC_RESP] = { },
2912 [SCI_REQ_SMP_WAIT_RESP] = { },
2913 [SCI_REQ_SMP_WAIT_TC_COMP] = { },
2914 [SCI_REQ_COMPLETED] = {
2915 .enter_state = sci_request_completed_state_enter,
2916 },
2917 [SCI_REQ_ABORTING] = {
2918 .enter_state = sci_request_aborting_state_enter,
2919 },
2920 [SCI_REQ_FINAL] = { },
2921};
2922
2923static void
2924sci_general_request_construct(struct isci_host *ihost,
2925 struct isci_remote_device *idev,
2926 struct isci_request *ireq)
2927{
2928 sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT);
2929
2930 ireq->target_device = idev;
2931 ireq->protocol = SCIC_NO_PROTOCOL;
2932 ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
2933
2934 ireq->sci_status = SCI_SUCCESS;
2935 ireq->scu_status = 0;
2936 ireq->post_context = 0xFFFFFFFF;
2937}
2938
2939static enum sci_status
2940sci_io_request_construct(struct isci_host *ihost,
2941 struct isci_remote_device *idev,
2942 struct isci_request *ireq)
2943{
2944 struct domain_device *dev = idev->domain_dev;
2945 enum sci_status status = SCI_SUCCESS;
2946
2947 /* Build the common part of the request */
2948 sci_general_request_construct(ihost, idev, ireq);
2949
2950 if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
2951 return SCI_FAILURE_INVALID_REMOTE_DEVICE;
2952
2953 if (dev->dev_type == SAS_END_DEV)
2954 /* pass */;
2955 else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
2956 memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
2957 else if (dev_is_expander(dev))
2958 /* pass */;
2959 else
2960 return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
2961
2962 memset(ireq->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab));
2963
2964 return status;
2965}
2966
2967enum sci_status sci_task_request_construct(struct isci_host *ihost,
2968 struct isci_remote_device *idev,
2969 u16 io_tag, struct isci_request *ireq)
2970{
2971 struct domain_device *dev = idev->domain_dev;
2972 enum sci_status status = SCI_SUCCESS;
2973
2974 /* Build the common part of the request */
2975 sci_general_request_construct(ihost, idev, ireq);
2976
2977 if (dev->dev_type == SAS_END_DEV ||
2978 dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
2979 set_bit(IREQ_TMF, &ireq->flags);
2980 memset(ireq->tc, 0, sizeof(struct scu_task_context));
2981 } else
2982 status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
2983
2984 return status;
2985}
2986
2987static enum sci_status isci_request_ssp_request_construct(
2988 struct isci_request *request)
2989{
2990 enum sci_status status;
2991
2992 dev_dbg(&request->isci_host->pdev->dev,
2993 "%s: request = %p\n",
2994 __func__,
2995 request);
2996 status = sci_io_request_construct_basic_ssp(request);
2997 return status;
2998}
2999
3000static enum sci_status isci_request_stp_request_construct(struct isci_request *ireq)
3001{
3002 struct sas_task *task = isci_request_access_task(ireq);
3003 struct host_to_dev_fis *fis = &ireq->stp.cmd;
3004 struct ata_queued_cmd *qc = task->uldd_task;
3005 enum sci_status status;
3006
3007 dev_dbg(&ireq->isci_host->pdev->dev,
3008 "%s: ireq = %p\n",
3009 __func__,
3010 ireq);
3011
3012 memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
3013 if (!task->ata_task.device_control_reg_update)
3014 fis->flags |= 0x80;
3015 fis->flags &= 0xF0;
3016
3017 status = sci_io_request_construct_basic_sata(ireq);
3018
3019 if (qc && (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
3020 qc->tf.command == ATA_CMD_FPDMA_READ)) {
3021 fis->sector_count = qc->tag << 3;
3022 ireq->tc->type.stp.ncq_tag = qc->tag;
3023 }
3024
3025 return status;
3026}
3027
3028static enum sci_status
3029sci_io_request_construct_smp(struct device *dev,
3030 struct isci_request *ireq,
3031 struct sas_task *task)
3032{
3033 struct scatterlist *sg = &task->smp_task.smp_req;
3034 struct isci_remote_device *idev;
3035 struct scu_task_context *task_context;
3036 struct isci_port *iport;
3037 struct smp_req *smp_req;
3038 void *kaddr;
3039 u8 req_len;
3040 u32 cmd;
3041
3042 kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
3043 smp_req = kaddr + sg->offset;
3044 /*
3045 * Look at the SMP requests' header fields; for certain SAS 1.x SMP
3046 * functions under SAS 2.0, a zero request length really indicates
3047 * a non-zero default length.
3048 */
3049 if (smp_req->req_len == 0) {
3050 switch (smp_req->func) {
3051 case SMP_DISCOVER:
3052 case SMP_REPORT_PHY_ERR_LOG:
3053 case SMP_REPORT_PHY_SATA:
3054 case SMP_REPORT_ROUTE_INFO:
3055 smp_req->req_len = 2;
3056 break;
3057 case SMP_CONF_ROUTE_INFO:
3058 case SMP_PHY_CONTROL:
3059 case SMP_PHY_TEST_FUNCTION:
3060 smp_req->req_len = 9;
3061 break;
3062 /* Default - zero is a valid default for 2.0. */
3063 }
3064 }
3065 req_len = smp_req->req_len;
3066 sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
3067 cmd = *(u32 *) smp_req;
3068 kunmap_atomic(kaddr, KM_IRQ0);
3069
3070 if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE))
3071 return SCI_FAILURE;
3072
3073 ireq->protocol = SCIC_SMP_PROTOCOL;
3074
3075 /* byte swap the smp request. */
3076
3077 task_context = ireq->tc;
3078
3079 idev = ireq->target_device;
3080 iport = idev->owning_port;
3081
3082 /*
3083 * Fill in the TC with the its required data
3084 * 00h
3085 */
3086 task_context->priority = 0;
3087 task_context->initiator_request = 1;
3088 task_context->connection_rate = idev->connection_rate;
3089 task_context->protocol_engine_index = ISCI_PEG;
3090 task_context->logical_port_index = iport->physical_port_index;
3091 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP;
3092 task_context->abort = 0;
3093 task_context->valid = SCU_TASK_CONTEXT_VALID;
3094 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
3095
3096 /* 04h */
3097 task_context->remote_node_index = idev->rnc.remote_node_index;
3098 task_context->command_code = 0;
3099 task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST;
3100
3101 /* 08h */
3102 task_context->link_layer_control = 0;
3103 task_context->do_not_dma_ssp_good_response = 1;
3104 task_context->strict_ordering = 0;
3105 task_context->control_frame = 1;
3106 task_context->timeout_enable = 0;
3107 task_context->block_guard_enable = 0;
3108
3109 /* 0ch */
3110 task_context->address_modifier = 0;
3111
3112 /* 10h */
3113 task_context->ssp_command_iu_length = req_len;
3114
3115 /* 14h */
3116 task_context->transfer_length_bytes = 0;
3117
3118 /*
3119 * 18h ~ 30h, protocol specific
3120 * since commandIU has been build by framework at this point, we just
3121 * copy the frist DWord from command IU to this location. */
3122 memcpy(&task_context->type.smp, &cmd, sizeof(u32));
3123
3124 /*
3125 * 40h
3126 * "For SMP you could program it to zero. We would prefer that way
3127 * so that done code will be consistent." - Venki
3128 */
3129 task_context->task_phase = 0;
3130
3131 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
3132 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
3133 (iport->physical_port_index <<
3134 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
3135 ISCI_TAG_TCI(ireq->io_tag));
3136 /*
3137 * Copy the physical address for the command buffer to the SCU Task
3138 * Context command buffer should not contain command header.
3139 */
3140 task_context->command_iu_upper = upper_32_bits(sg_dma_address(sg));
3141 task_context->command_iu_lower = lower_32_bits(sg_dma_address(sg) + sizeof(u32));
3142
3143 /* SMP response comes as UF, so no need to set response IU address. */
3144 task_context->response_iu_upper = 0;
3145 task_context->response_iu_lower = 0;
3146
3147 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
3148
3149 return SCI_SUCCESS;
3150}
3151
3152/*
3153 * isci_smp_request_build() - This function builds the smp request.
3154 * @ireq: This parameter points to the isci_request allocated in the
3155 * request construct function.
3156 *
3157 * SCI_SUCCESS on successfull completion, or specific failure code.
3158 */
3159static enum sci_status isci_smp_request_build(struct isci_request *ireq)
3160{
3161 struct sas_task *task = isci_request_access_task(ireq);
3162 struct device *dev = &ireq->isci_host->pdev->dev;
3163 enum sci_status status = SCI_FAILURE;
3164
3165 status = sci_io_request_construct_smp(dev, ireq, task);
3166 if (status != SCI_SUCCESS)
3167 dev_dbg(&ireq->isci_host->pdev->dev,
3168 "%s: failed with status = %d\n",
3169 __func__,
3170 status);
3171
3172 return status;
3173}
3174
3175/**
3176 * isci_io_request_build() - This function builds the io request object.
3177 * @ihost: This parameter specifies the ISCI host object
3178 * @request: This parameter points to the isci_request object allocated in the
3179 * request construct function.
3180 * @sci_device: This parameter is the handle for the sci core's remote device
3181 * object that is the destination for this request.
3182 *
3183 * SCI_SUCCESS on successfull completion, or specific failure code.
3184 */
3185static enum sci_status isci_io_request_build(struct isci_host *ihost,
3186 struct isci_request *request,
3187 struct isci_remote_device *idev)
3188{
3189 enum sci_status status = SCI_SUCCESS;
3190 struct sas_task *task = isci_request_access_task(request);
3191
3192 dev_dbg(&ihost->pdev->dev,
3193 "%s: idev = 0x%p; request = %p, "
3194 "num_scatter = %d\n",
3195 __func__,
3196 idev,
3197 request,
3198 task->num_scatter);
3199
3200 /* map the sgl addresses, if present.
3201 * libata does the mapping for sata devices
3202 * before we get the request.
3203 */
3204 if (task->num_scatter &&
3205 !sas_protocol_ata(task->task_proto) &&
3206 !(SAS_PROTOCOL_SMP & task->task_proto)) {
3207
3208 request->num_sg_entries = dma_map_sg(
3209 &ihost->pdev->dev,
3210 task->scatter,
3211 task->num_scatter,
3212 task->data_dir
3213 );
3214
3215 if (request->num_sg_entries == 0)
3216 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
3217 }
3218
3219 status = sci_io_request_construct(ihost, idev, request);
3220
3221 if (status != SCI_SUCCESS) {
3222 dev_dbg(&ihost->pdev->dev,
3223 "%s: failed request construct\n",
3224 __func__);
3225 return SCI_FAILURE;
3226 }
3227
3228 switch (task->task_proto) {
3229 case SAS_PROTOCOL_SMP:
3230 status = isci_smp_request_build(request);
3231 break;
3232 case SAS_PROTOCOL_SSP:
3233 status = isci_request_ssp_request_construct(request);
3234 break;
3235 case SAS_PROTOCOL_SATA:
3236 case SAS_PROTOCOL_STP:
3237 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
3238 status = isci_request_stp_request_construct(request);
3239 break;
3240 default:
3241 dev_dbg(&ihost->pdev->dev,
3242 "%s: unknown protocol\n", __func__);
3243 return SCI_FAILURE;
3244 }
3245
3246 return SCI_SUCCESS;
3247}
3248
3249static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag)
3250{
3251 struct isci_request *ireq;
3252
3253 ireq = ihost->reqs[ISCI_TAG_TCI(tag)];
3254 ireq->io_tag = tag;
3255 ireq->io_request_completion = NULL;
3256 ireq->flags = 0;
3257 ireq->num_sg_entries = 0;
3258 INIT_LIST_HEAD(&ireq->completed_node);
3259 INIT_LIST_HEAD(&ireq->dev_node);
3260 isci_request_change_state(ireq, allocated);
3261
3262 return ireq;
3263}
3264
3265static struct isci_request *isci_io_request_from_tag(struct isci_host *ihost,
3266 struct sas_task *task,
3267 u16 tag)
3268{
3269 struct isci_request *ireq;
3270
3271 ireq = isci_request_from_tag(ihost, tag);
3272 ireq->ttype_ptr.io_task_ptr = task;
3273 ireq->ttype = io_task;
3274 task->lldd_task = ireq;
3275
3276 return ireq;
3277}
3278
3279struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
3280 struct isci_tmf *isci_tmf,
3281 u16 tag)
3282{
3283 struct isci_request *ireq;
3284
3285 ireq = isci_request_from_tag(ihost, tag);
3286 ireq->ttype_ptr.tmf_task_ptr = isci_tmf;
3287 ireq->ttype = tmf_task;
3288
3289 return ireq;
3290}
3291
3292int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
3293 struct sas_task *task, u16 tag)
3294{
3295 enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3296 struct isci_request *ireq;
3297 unsigned long flags;
3298 int ret = 0;
3299
3300 /* do common allocation and init of request object. */
3301 ireq = isci_io_request_from_tag(ihost, task, tag);
3302
3303 status = isci_io_request_build(ihost, ireq, idev);
3304 if (status != SCI_SUCCESS) {
3305 dev_dbg(&ihost->pdev->dev,
3306 "%s: request_construct failed - status = 0x%x\n",
3307 __func__,
3308 status);
3309 return status;
3310 }
3311
3312 spin_lock_irqsave(&ihost->scic_lock, flags);
3313
3314 if (test_bit(IDEV_IO_NCQERROR, &idev->flags)) {
3315
3316 if (isci_task_is_ncq_recovery(task)) {
3317
3318 /* The device is in an NCQ recovery state. Issue the
3319 * request on the task side. Note that it will
3320 * complete on the I/O request side because the
3321 * request was built that way (ie.
3322 * ireq->is_task_management_request is false).
3323 */
3324 status = sci_controller_start_task(ihost,
3325 idev,
3326 ireq);
3327 } else {
3328 status = SCI_FAILURE;
3329 }
3330 } else {
3331 /* send the request, let the core assign the IO TAG. */
3332 status = sci_controller_start_io(ihost, idev,
3333 ireq);
3334 }
3335
3336 if (status != SCI_SUCCESS &&
3337 status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
3338 dev_dbg(&ihost->pdev->dev,
3339 "%s: failed request start (0x%x)\n",
3340 __func__, status);
3341 spin_unlock_irqrestore(&ihost->scic_lock, flags);
3342 return status;
3343 }
3344
3345 /* Either I/O started OK, or the core has signaled that
3346 * the device needs a target reset.
3347 *
3348 * In either case, hold onto the I/O for later.
3349 *
3350 * Update it's status and add it to the list in the
3351 * remote device object.
3352 */
3353 list_add(&ireq->dev_node, &idev->reqs_in_process);
3354
3355 if (status == SCI_SUCCESS) {
3356 isci_request_change_state(ireq, started);
3357 } else {
3358 /* The request did not really start in the
3359 * hardware, so clear the request handle
3360 * here so no terminations will be done.
3361 */
3362 set_bit(IREQ_TERMINATED, &ireq->flags);
3363 isci_request_change_state(ireq, completed);
3364 }
3365 spin_unlock_irqrestore(&ihost->scic_lock, flags);
3366
3367 if (status ==
3368 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
3369 /* Signal libsas that we need the SCSI error
3370 * handler thread to work on this I/O and that
3371 * we want a device reset.
3372 */
3373 spin_lock_irqsave(&task->task_state_lock, flags);
3374 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
3375 spin_unlock_irqrestore(&task->task_state_lock, flags);
3376
3377 /* Cause this task to be scheduled in the SCSI error
3378 * handler thread.
3379 */
3380 isci_execpath_callback(ihost, task,
3381 sas_task_abort);
3382
3383 /* Change the status, since we are holding
3384 * the I/O until it is managed by the SCSI
3385 * error handler.
3386 */
3387 status = SCI_SUCCESS;
3388 }
3389
3390 return ret;
3391}
diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h
new file mode 100644
index 000000000000..7a1d5a9778eb
--- /dev/null
+++ b/drivers/scsi/isci/request.h
@@ -0,0 +1,448 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#ifndef _ISCI_REQUEST_H_
57#define _ISCI_REQUEST_H_
58
59#include "isci.h"
60#include "host.h"
61#include "scu_task_context.h"
62
63/**
64 * struct isci_request_status - This enum defines the possible states of an I/O
65 * request.
66 *
67 *
68 */
69enum isci_request_status {
70 unallocated = 0x00,
71 allocated = 0x01,
72 started = 0x02,
73 completed = 0x03,
74 aborting = 0x04,
75 aborted = 0x05,
76 terminating = 0x06,
77 dead = 0x07
78};
79
80enum task_type {
81 io_task = 0,
82 tmf_task = 1
83};
84
85enum sci_request_protocol {
86 SCIC_NO_PROTOCOL,
87 SCIC_SMP_PROTOCOL,
88 SCIC_SSP_PROTOCOL,
89 SCIC_STP_PROTOCOL
90}; /* XXX remove me, use sas_task.{dev|task_proto} instead */;
91
92/**
93 * isci_stp_request - extra request infrastructure to handle pio/atapi protocol
94 * @pio_len - number of bytes requested at PIO setup
95 * @status - pio setup ending status value to tell us if we need
96 * to wait for another fis or if the transfer is complete. Upon
97 * receipt of a d2h fis this will be the status field of that fis.
98 * @sgl - track pio transfer progress as we iterate through the sgl
99 * @device_cdb_len - atapi device advertises it's transfer constraints at setup
100 */
101struct isci_stp_request {
102 u32 pio_len;
103 u8 status;
104
105 struct isci_stp_pio_sgl {
106 int index;
107 u8 set;
108 u32 offset;
109 } sgl;
110 u32 device_cdb_len;
111};
112
113struct isci_request {
114 enum isci_request_status status;
115 #define IREQ_COMPLETE_IN_TARGET 0
116 #define IREQ_TERMINATED 1
117 #define IREQ_TMF 2
118 #define IREQ_ACTIVE 3
119 unsigned long flags;
120 /* XXX kill ttype and ttype_ptr, allocate full sas_task */
121 enum task_type ttype;
122 union ttype_ptr_union {
123 struct sas_task *io_task_ptr; /* When ttype==io_task */
124 struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */
125 } ttype_ptr;
126 struct isci_host *isci_host;
127 /* For use in the requests_to_{complete|abort} lists: */
128 struct list_head completed_node;
129 /* For use in the reqs_in_process list: */
130 struct list_head dev_node;
131 spinlock_t state_lock;
132 dma_addr_t request_daddr;
133 dma_addr_t zero_scatter_daddr;
134 unsigned int num_sg_entries;
135 /* Note: "io_request_completion" is completed in two different ways
136 * depending on whether this is a TMF or regular request.
137 * - TMF requests are completed in the thread that started them;
138 * - regular requests are completed in the request completion callback
139 * function.
140 * This difference in operation allows the aborter of a TMF request
141 * to be sure that once the TMF request completes, the I/O that the
142 * TMF was aborting is guaranteed to have completed.
143 *
144 * XXX kill io_request_completion
145 */
146 struct completion *io_request_completion;
147 struct sci_base_state_machine sm;
148 struct isci_host *owning_controller;
149 struct isci_remote_device *target_device;
150 u16 io_tag;
151 enum sci_request_protocol protocol;
152 u32 scu_status; /* hardware result */
153 u32 sci_status; /* upper layer disposition */
154 u32 post_context;
155 struct scu_task_context *tc;
156 /* could be larger with sg chaining */
157 #define SCU_SGL_SIZE ((SCI_MAX_SCATTER_GATHER_ELEMENTS + 1) / 2)
158 struct scu_sgl_element_pair sg_table[SCU_SGL_SIZE] __attribute__ ((aligned(32)));
159 /* This field is a pointer to the stored rx frame data. It is used in
160 * STP internal requests and SMP response frames. If this field is
161 * non-NULL the saved frame must be released on IO request completion.
162 */
163 u32 saved_rx_frame_index;
164
165 union {
166 struct {
167 union {
168 struct ssp_cmd_iu cmd;
169 struct ssp_task_iu tmf;
170 };
171 union {
172 struct ssp_response_iu rsp;
173 u8 rsp_buf[SSP_RESP_IU_MAX_SIZE];
174 };
175 } ssp;
176 struct {
177 struct smp_resp rsp;
178 } smp;
179 struct {
180 struct isci_stp_request req;
181 struct host_to_dev_fis cmd;
182 struct dev_to_host_fis rsp;
183 } stp;
184 };
185};
186
187static inline struct isci_request *to_ireq(struct isci_stp_request *stp_req)
188{
189 struct isci_request *ireq;
190
191 ireq = container_of(stp_req, typeof(*ireq), stp.req);
192 return ireq;
193}
194
195/**
196 * enum sci_base_request_states - This enumeration depicts all the states for
197 * the common request state machine.
198 *
199 *
200 */
201enum sci_base_request_states {
202 /*
203 * Simply the initial state for the base request state machine.
204 */
205 SCI_REQ_INIT,
206
207 /*
208 * This state indicates that the request has been constructed.
209 * This state is entered from the INITIAL state.
210 */
211 SCI_REQ_CONSTRUCTED,
212
213 /*
214 * This state indicates that the request has been started. This state
215 * is entered from the CONSTRUCTED state.
216 */
217 SCI_REQ_STARTED,
218
219 SCI_REQ_STP_UDMA_WAIT_TC_COMP,
220 SCI_REQ_STP_UDMA_WAIT_D2H,
221
222 SCI_REQ_STP_NON_DATA_WAIT_H2D,
223 SCI_REQ_STP_NON_DATA_WAIT_D2H,
224
225 SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED,
226 SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG,
227 SCI_REQ_STP_SOFT_RESET_WAIT_D2H,
228
229 /*
230 * While in this state the IO request object is waiting for the TC
231 * completion notification for the H2D Register FIS
232 */
233 SCI_REQ_STP_PIO_WAIT_H2D,
234
235 /*
236 * While in this state the IO request object is waiting for either a
237 * PIO Setup FIS or a D2H register FIS. The type of frame received is
238 * based on the result of the prior frame and line conditions.
239 */
240 SCI_REQ_STP_PIO_WAIT_FRAME,
241
242 /*
243 * While in this state the IO request object is waiting for a DATA
244 * frame from the device.
245 */
246 SCI_REQ_STP_PIO_DATA_IN,
247
248 /*
249 * While in this state the IO request object is waiting to transmit
250 * the next data frame to the device.
251 */
252 SCI_REQ_STP_PIO_DATA_OUT,
253
254 /*
255 * The AWAIT_TC_COMPLETION sub-state indicates that the started raw
256 * task management request is waiting for the transmission of the
257 * initial frame (i.e. command, task, etc.).
258 */
259 SCI_REQ_TASK_WAIT_TC_COMP,
260
261 /*
262 * This sub-state indicates that the started task management request
263 * is waiting for the reception of an unsolicited frame
264 * (i.e. response IU).
265 */
266 SCI_REQ_TASK_WAIT_TC_RESP,
267
268 /*
269 * This sub-state indicates that the started task management request
270 * is waiting for the reception of an unsolicited frame
271 * (i.e. response IU).
272 */
273 SCI_REQ_SMP_WAIT_RESP,
274
275 /*
276 * The AWAIT_TC_COMPLETION sub-state indicates that the started SMP
277 * request is waiting for the transmission of the initial frame
278 * (i.e. command, task, etc.).
279 */
280 SCI_REQ_SMP_WAIT_TC_COMP,
281
282 /*
283 * This state indicates that the request has completed.
284 * This state is entered from the STARTED state. This state is entered
285 * from the ABORTING state.
286 */
287 SCI_REQ_COMPLETED,
288
289 /*
290 * This state indicates that the request is in the process of being
291 * terminated/aborted.
292 * This state is entered from the CONSTRUCTED state.
293 * This state is entered from the STARTED state.
294 */
295 SCI_REQ_ABORTING,
296
297 /*
298 * Simply the final state for the base request state machine.
299 */
300 SCI_REQ_FINAL,
301};
302
303enum sci_status sci_request_start(struct isci_request *ireq);
304enum sci_status sci_io_request_terminate(struct isci_request *ireq);
305enum sci_status
306sci_io_request_event_handler(struct isci_request *ireq,
307 u32 event_code);
308enum sci_status
309sci_io_request_frame_handler(struct isci_request *ireq,
310 u32 frame_index);
311enum sci_status
312sci_task_request_terminate(struct isci_request *ireq);
313extern enum sci_status
314sci_request_complete(struct isci_request *ireq);
315extern enum sci_status
316sci_io_request_tc_completion(struct isci_request *ireq, u32 code);
317
318/* XXX open code in caller */
319static inline dma_addr_t
320sci_io_request_get_dma_addr(struct isci_request *ireq, void *virt_addr)
321{
322
323 char *requested_addr = (char *)virt_addr;
324 char *base_addr = (char *)ireq;
325
326 BUG_ON(requested_addr < base_addr);
327 BUG_ON((requested_addr - base_addr) >= sizeof(*ireq));
328
329 return ireq->request_daddr + (requested_addr - base_addr);
330}
331
332/**
333 * isci_request_change_state() - This function sets the status of the request
334 * object.
335 * @request: This parameter points to the isci_request object
336 * @status: This Parameter is the new status of the object
337 *
338 */
339static inline enum isci_request_status
340isci_request_change_state(struct isci_request *isci_request,
341 enum isci_request_status status)
342{
343 enum isci_request_status old_state;
344 unsigned long flags;
345
346 dev_dbg(&isci_request->isci_host->pdev->dev,
347 "%s: isci_request = %p, state = 0x%x\n",
348 __func__,
349 isci_request,
350 status);
351
352 BUG_ON(isci_request == NULL);
353
354 spin_lock_irqsave(&isci_request->state_lock, flags);
355 old_state = isci_request->status;
356 isci_request->status = status;
357 spin_unlock_irqrestore(&isci_request->state_lock, flags);
358
359 return old_state;
360}
361
362/**
363 * isci_request_change_started_to_newstate() - This function sets the status of
364 * the request object.
365 * @request: This parameter points to the isci_request object
366 * @status: This Parameter is the new status of the object
367 *
368 * state previous to any change.
369 */
370static inline enum isci_request_status
371isci_request_change_started_to_newstate(struct isci_request *isci_request,
372 struct completion *completion_ptr,
373 enum isci_request_status newstate)
374{
375 enum isci_request_status old_state;
376 unsigned long flags;
377
378 spin_lock_irqsave(&isci_request->state_lock, flags);
379
380 old_state = isci_request->status;
381
382 if (old_state == started || old_state == aborting) {
383 BUG_ON(isci_request->io_request_completion != NULL);
384
385 isci_request->io_request_completion = completion_ptr;
386 isci_request->status = newstate;
387 }
388
389 spin_unlock_irqrestore(&isci_request->state_lock, flags);
390
391 dev_dbg(&isci_request->isci_host->pdev->dev,
392 "%s: isci_request = %p, old_state = 0x%x\n",
393 __func__,
394 isci_request,
395 old_state);
396
397 return old_state;
398}
399
400/**
401 * isci_request_change_started_to_aborted() - This function sets the status of
402 * the request object.
403 * @request: This parameter points to the isci_request object
404 * @completion_ptr: This parameter is saved as the kernel completion structure
405 * signalled when the old request completes.
406 *
407 * state previous to any change.
408 */
409static inline enum isci_request_status
410isci_request_change_started_to_aborted(struct isci_request *isci_request,
411 struct completion *completion_ptr)
412{
413 return isci_request_change_started_to_newstate(isci_request,
414 completion_ptr,
415 aborted);
416}
417
418#define isci_request_access_task(req) ((req)->ttype_ptr.io_task_ptr)
419
420#define isci_request_access_tmf(req) ((req)->ttype_ptr.tmf_task_ptr)
421
422struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
423 struct isci_tmf *isci_tmf,
424 u16 tag);
425int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
426 struct sas_task *task, u16 tag);
427void isci_terminate_pending_requests(struct isci_host *ihost,
428 struct isci_remote_device *idev);
429enum sci_status
430sci_task_request_construct(struct isci_host *ihost,
431 struct isci_remote_device *idev,
432 u16 io_tag,
433 struct isci_request *ireq);
434enum sci_status
435sci_task_request_construct_ssp(struct isci_request *ireq);
436enum sci_status
437sci_task_request_construct_sata(struct isci_request *ireq);
438void sci_smp_request_copy_response(struct isci_request *ireq);
439
440static inline int isci_task_is_ncq_recovery(struct sas_task *task)
441{
442 return (sas_protocol_ata(task->task_proto) &&
443 task->ata_task.fis.command == ATA_CMD_READ_LOG_EXT &&
444 task->ata_task.fis.lbal == ATA_LOG_SATA_NCQ);
445
446}
447
448#endif /* !defined(_ISCI_REQUEST_H_) */
diff --git a/drivers/scsi/isci/sas.h b/drivers/scsi/isci/sas.h
new file mode 100644
index 000000000000..462b15174d3f
--- /dev/null
+++ b/drivers/scsi/isci/sas.h
@@ -0,0 +1,219 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#ifndef _SCI_SAS_H_
57#define _SCI_SAS_H_
58
59#include <linux/kernel.h>
60
61/*
62 * SATA FIS Types These constants depict the various SATA FIS types devined in
63 * the serial ATA specification.
64 * XXX: This needs to go into <scsi/sas.h>
65 */
66#define FIS_REGH2D 0x27
67#define FIS_REGD2H 0x34
68#define FIS_SETDEVBITS 0xA1
69#define FIS_DMA_ACTIVATE 0x39
70#define FIS_DMA_SETUP 0x41
71#define FIS_BIST_ACTIVATE 0x58
72#define FIS_PIO_SETUP 0x5F
73#define FIS_DATA 0x46
74
75/**************************************************************************/
76#define SSP_RESP_IU_MAX_SIZE 280
77
78/*
79 * contents of the SSP COMMAND INFORMATION UNIT.
80 * For specific information on each of these individual fields please
81 * reference the SAS specification SSP transport layer section.
82 * XXX: This needs to go into <scsi/sas.h>
83 */
84struct ssp_cmd_iu {
85 u8 LUN[8];
86 u8 add_cdb_len:6;
87 u8 _r_a:2;
88 u8 _r_b;
89 u8 en_fburst:1;
90 u8 task_prio:4;
91 u8 task_attr:3;
92 u8 _r_c;
93
94 u8 cdb[16];
95} __packed;
96
97/*
98 * contents of the SSP TASK INFORMATION UNIT.
99 * For specific information on each of these individual fields please
100 * reference the SAS specification SSP transport layer section.
101 * XXX: This needs to go into <scsi/sas.h>
102 */
103struct ssp_task_iu {
104 u8 LUN[8];
105 u8 _r_a;
106 u8 task_func;
107 u8 _r_b[4];
108 u16 task_tag;
109 u8 _r_c[12];
110} __packed;
111
112
113/*
114 * struct smp_req_phy_id - This structure defines the contents of
115 * an SMP Request that is comprised of the struct smp_request_header and a
116 * phy identifier.
117 * Examples: SMP_REQUEST_DISCOVER, SMP_REQUEST_REPORT_PHY_SATA.
118 *
119 * For specific information on each of these individual fields please reference
120 * the SAS specification.
121 */
122struct smp_req_phy_id {
123 u8 _r_a[4]; /* bytes 4-7 */
124
125 u8 ign_zone_grp:1; /* byte 8 */
126 u8 _r_b:7;
127
128 u8 phy_id; /* byte 9 */
129 u8 _r_c; /* byte 10 */
130 u8 _r_d; /* byte 11 */
131} __packed;
132
133/*
134 * struct smp_req_config_route_info - This structure defines the
135 * contents of an SMP Configure Route Information request.
136 *
137 * For specific information on each of these individual fields please reference
138 * the SAS specification.
139 */
140struct smp_req_conf_rtinfo {
141 u16 exp_change_cnt; /* bytes 4-5 */
142 u8 exp_rt_idx_hi; /* byte 6 */
143 u8 exp_rt_idx; /* byte 7 */
144
145 u8 _r_a; /* byte 8 */
146 u8 phy_id; /* byte 9 */
147 u16 _r_b; /* bytes 10-11 */
148
149 u8 _r_c:7; /* byte 12 */
150 u8 dis_rt_entry:1;
151 u8 _r_d[3]; /* bytes 13-15 */
152
153 u8 rt_sas_addr[8]; /* bytes 16-23 */
154 u8 _r_e[16]; /* bytes 24-39 */
155} __packed;
156
157/*
158 * struct smp_req_phycntl - This structure defines the contents of an
159 * SMP Phy Controller request.
160 *
161 * For specific information on each of these individual fields please reference
162 * the SAS specification.
163 */
164struct smp_req_phycntl {
165 u16 exp_change_cnt; /* byte 4-5 */
166
167 u8 _r_a[3]; /* bytes 6-8 */
168
169 u8 phy_id; /* byte 9 */
170 u8 phy_op; /* byte 10 */
171
172 u8 upd_pathway:1; /* byte 11 */
173 u8 _r_b:7;
174
175 u8 _r_c[12]; /* byte 12-23 */
176
177 u8 att_dev_name[8]; /* byte 24-31 */
178
179 u8 _r_d:4; /* byte 32 */
180 u8 min_linkrate:4;
181
182 u8 _r_e:4; /* byte 33 */
183 u8 max_linkrate:4;
184
185 u8 _r_f[2]; /* byte 34-35 */
186
187 u8 pathway:4; /* byte 36 */
188 u8 _r_g:4;
189
190 u8 _r_h[3]; /* bytes 37-39 */
191} __packed;
192
193/*
194 * struct smp_req - This structure simply unionizes the existing request
195 * structures into a common request type.
196 *
197 * XXX: This data structure may need to go to scsi/sas.h
198 */
199struct smp_req {
200 u8 type; /* byte 0 */
201 u8 func; /* byte 1 */
202 u8 alloc_resp_len; /* byte 2 */
203 u8 req_len; /* byte 3 */
204 u8 req_data[0];
205} __packed;
206
207#define SMP_RESP_HDR_SZ 4
208
209/*
210 * struct sci_sas_address - This structure depicts how a SAS address is
211 * represented by SCI.
212 * XXX convert this to u8 [SAS_ADDR_SIZE] like the rest of libsas
213 *
214 */
215struct sci_sas_address {
216 u32 high;
217 u32 low;
218};
219#endif
diff --git a/drivers/scsi/isci/scu_completion_codes.h b/drivers/scsi/isci/scu_completion_codes.h
new file mode 100644
index 000000000000..c8b329c695f9
--- /dev/null
+++ b/drivers/scsi/isci/scu_completion_codes.h
@@ -0,0 +1,283 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#ifndef _SCU_COMPLETION_CODES_HEADER_
57#define _SCU_COMPLETION_CODES_HEADER_
58
59/**
60 * This file contains the constants and macros for the SCU hardware completion
61 * codes.
62 *
63 *
64 */
65
66#define SCU_COMPLETION_TYPE_SHIFT 28
67#define SCU_COMPLETION_TYPE_MASK 0x70000000
68
69/**
70 * SCU_COMPLETION_TYPE() -
71 *
72 * This macro constructs an SCU completion type
73 */
74#define SCU_COMPLETION_TYPE(type) \
75 ((u32)(type) << SCU_COMPLETION_TYPE_SHIFT)
76
77/**
78 * SCU_COMPLETION_TYPE() -
79 *
80 * These macros contain the SCU completion types SCU_COMPLETION_TYPE
81 */
82#define SCU_COMPLETION_TYPE_TASK SCU_COMPLETION_TYPE(0)
83#define SCU_COMPLETION_TYPE_SDMA SCU_COMPLETION_TYPE(1)
84#define SCU_COMPLETION_TYPE_UFI SCU_COMPLETION_TYPE(2)
85#define SCU_COMPLETION_TYPE_EVENT SCU_COMPLETION_TYPE(3)
86#define SCU_COMPLETION_TYPE_NOTIFY SCU_COMPLETION_TYPE(4)
87
88/**
89 *
90 *
91 * These constants provide the shift and mask values for the various parts of
92 * an SCU completion code.
93 */
94#define SCU_COMPLETION_STATUS_MASK 0x0FFC0000
95#define SCU_COMPLETION_TL_STATUS_MASK 0x0FC00000
96#define SCU_COMPLETION_TL_STATUS_SHIFT 22
97#define SCU_COMPLETION_SDMA_STATUS_MASK 0x003C0000
98#define SCU_COMPLETION_PEG_MASK 0x00010000
99#define SCU_COMPLETION_PORT_MASK 0x00007000
100#define SCU_COMPLETION_PE_MASK SCU_COMPLETION_PORT_MASK
101#define SCU_COMPLETION_PE_SHIFT 12
102#define SCU_COMPLETION_INDEX_MASK 0x00000FFF
103
104/**
105 * SCU_GET_COMPLETION_TYPE() -
106 *
107 * This macro returns the SCU completion type.
108 */
109#define SCU_GET_COMPLETION_TYPE(completion_code) \
110 ((completion_code) & SCU_COMPLETION_TYPE_MASK)
111
112/**
113 * SCU_GET_COMPLETION_STATUS() -
114 *
115 * This macro returns the SCU completion status.
116 */
117#define SCU_GET_COMPLETION_STATUS(completion_code) \
118 ((completion_code) & SCU_COMPLETION_STATUS_MASK)
119
120/**
121 * SCU_GET_COMPLETION_TL_STATUS() -
122 *
123 * This macro returns the transport layer completion status.
124 */
125#define SCU_GET_COMPLETION_TL_STATUS(completion_code) \
126 ((completion_code) & SCU_COMPLETION_TL_STATUS_MASK)
127
128/**
129 * SCU_MAKE_COMPLETION_STATUS() -
130 *
131 * This macro takes a completion code and performs the shift and mask
132 * operations to turn it into a completion code that can be compared to a
133 * SCU_GET_COMPLETION_TL_STATUS.
134 */
135#define SCU_MAKE_COMPLETION_STATUS(completion_code) \
136 ((u32)(completion_code) << SCU_COMPLETION_TL_STATUS_SHIFT)
137
138/**
139 * SCU_NORMALIZE_COMPLETION_STATUS() -
140 *
141 * This macro takes a SCU_GET_COMPLETION_TL_STATUS and normalizes it for a
142 * return code.
143 */
144#define SCU_NORMALIZE_COMPLETION_STATUS(completion_code) \
145 (\
146 ((completion_code) & SCU_COMPLETION_TL_STATUS_MASK) \
147 >> SCU_COMPLETION_TL_STATUS_SHIFT \
148 )
149
150/**
151 * SCU_GET_COMPLETION_SDMA_STATUS() -
152 *
153 * This macro returns the SDMA completion status.
154 */
155#define SCU_GET_COMPLETION_SDMA_STATUS(completion_code) \
156 ((completion_code) & SCU_COMPLETION_SDMA_STATUS_MASK)
157
158/**
159 * SCU_GET_COMPLETION_PEG() -
160 *
161 * This macro returns the Protocol Engine Group from the completion code.
162 */
163#define SCU_GET_COMPLETION_PEG(completion_code) \
164 ((completion_code) & SCU_COMPLETION_PEG_MASK)
165
166/**
167 * SCU_GET_COMPLETION_PORT() -
168 *
169 * This macro reuturns the logical port index from the completion code.
170 */
171#define SCU_GET_COMPLETION_PORT(completion_code) \
172 ((completion_code) & SCU_COMPLETION_PORT_MASK)
173
174/**
175 * SCU_GET_PROTOCOL_ENGINE_INDEX() -
176 *
177 * This macro returns the PE index from the completion code.
178 */
179#define SCU_GET_PROTOCOL_ENGINE_INDEX(completion_code) \
180 (((completion_code) & SCU_COMPLETION_PE_MASK) >> SCU_COMPLETION_PE_SHIFT)
181
182/**
183 * SCU_GET_COMPLETION_INDEX() -
184 *
185 * This macro returns the index of the completion which is either a TCi or an
186 * RNi depending on the completion type.
187 */
188#define SCU_GET_COMPLETION_INDEX(completion_code) \
189 ((completion_code) & SCU_COMPLETION_INDEX_MASK)
190
191#define SCU_UNSOLICITED_FRAME_MASK 0x0FFF0000
192#define SCU_UNSOLICITED_FRAME_SHIFT 16
193
194/**
195 * SCU_GET_FRAME_INDEX() -
196 *
197 * This macro returns a normalized frame index from an unsolicited frame
198 * completion.
199 */
200#define SCU_GET_FRAME_INDEX(completion_code) \
201 (\
202 ((completion_code) & SCU_UNSOLICITED_FRAME_MASK) \
203 >> SCU_UNSOLICITED_FRAME_SHIFT \
204 )
205
206#define SCU_UNSOLICITED_FRAME_ERROR_MASK 0x00008000
207
208/**
209 * SCU_GET_FRAME_ERROR() -
210 *
211 * This macro returns a zero (0) value if there is no frame error otherwise it
212 * returns non-zero (!0).
213 */
214#define SCU_GET_FRAME_ERROR(completion_code) \
215 ((completion_code) & SCU_UNSOLICITED_FRAME_ERROR_MASK)
216
217/**
218 *
219 *
220 * These constants represent normalized completion codes which must be shifted
221 * 18 bits to match it with the hardware completion code. In a 16-bit compiler,
222 * immediate constants are 16-bit values (the size of an int). If we shift
223 * those by 18 bits, we completely lose the value. To ensure the value is a
224 * 32-bit value like we want, each immediate value must be cast to a u32.
225 */
226#define SCU_TASK_DONE_GOOD ((u32)0x00)
227#define SCU_TASK_DONE_CRC_ERR ((u32)0x14)
228#define SCU_TASK_DONE_CHECK_RESPONSE ((u32)0x14)
229#define SCU_TASK_DONE_GEN_RESPONSE ((u32)0x15)
230#define SCU_TASK_DONE_NAK_CMD_ERR ((u32)0x16)
231#define SCU_TASK_DONE_CMD_LL_R_ERR ((u32)0x16)
232#define SCU_TASK_DONE_LL_R_ERR ((u32)0x17)
233#define SCU_TASK_DONE_ACK_NAK_TO ((u32)0x17)
234#define SCU_TASK_DONE_LL_PERR ((u32)0x18)
235#define SCU_TASK_DONE_LL_SY_TERM ((u32)0x19)
236#define SCU_TASK_DONE_NAK_ERR ((u32)0x19)
237#define SCU_TASK_DONE_LL_LF_TERM ((u32)0x1A)
238#define SCU_TASK_DONE_DATA_LEN_ERR ((u32)0x1A)
239#define SCU_TASK_DONE_LL_CL_TERM ((u32)0x1B)
240#define SCU_TASK_DONE_LL_ABORT_ERR ((u32)0x1B)
241#define SCU_TASK_DONE_SEQ_INV_TYPE ((u32)0x1C)
242#define SCU_TASK_DONE_UNEXP_XR ((u32)0x1C)
243#define SCU_TASK_DONE_INV_FIS_TYPE ((u32)0x1D)
244#define SCU_TASK_DONE_XR_IU_LEN_ERR ((u32)0x1D)
245#define SCU_TASK_DONE_INV_FIS_LEN ((u32)0x1E)
246#define SCU_TASK_DONE_XR_WD_LEN ((u32)0x1E)
247#define SCU_TASK_DONE_SDMA_ERR ((u32)0x1F)
248#define SCU_TASK_DONE_OFFSET_ERR ((u32)0x20)
249#define SCU_TASK_DONE_MAX_PLD_ERR ((u32)0x21)
250#define SCU_TASK_DONE_EXCESS_DATA ((u32)0x22)
251#define SCU_TASK_DONE_LF_ERR ((u32)0x23)
252#define SCU_TASK_DONE_UNEXP_FIS ((u32)0x24)
253#define SCU_TASK_DONE_UNEXP_RESP ((u32)0x24)
254#define SCU_TASK_DONE_EARLY_RESP ((u32)0x25)
255#define SCU_TASK_DONE_SMP_RESP_TO_ERR ((u32)0x26)
256#define SCU_TASK_DONE_DMASETUP_DIRERR ((u32)0x27)
257#define SCU_TASK_DONE_SMP_UFI_ERR ((u32)0x27)
258#define SCU_TASK_DONE_XFERCNT_ERR ((u32)0x28)
259#define SCU_TASK_DONE_SMP_FRM_TYPE_ERR ((u32)0x28)
260#define SCU_TASK_DONE_SMP_LL_RX_ERR ((u32)0x29)
261#define SCU_TASK_DONE_RESP_LEN_ERR ((u32)0x2A)
262#define SCU_TASK_DONE_UNEXP_DATA ((u32)0x2B)
263#define SCU_TASK_DONE_OPEN_FAIL ((u32)0x2C)
264#define SCU_TASK_DONE_UNEXP_SDBFIS ((u32)0x2D)
265#define SCU_TASK_DONE_REG_ERR ((u32)0x2E)
266#define SCU_TASK_DONE_SDB_ERR ((u32)0x2F)
267#define SCU_TASK_DONE_TASK_ABORT ((u32)0x30)
268#define SCU_TASK_DONE_CMD_SDMA_ERR ((U32)0x32)
269#define SCU_TASK_DONE_CMD_LL_ABORT_ERR ((U32)0x33)
270#define SCU_TASK_OPEN_REJECT_WRONG_DESTINATION ((u32)0x34)
271#define SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1 ((u32)0x35)
272#define SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2 ((u32)0x36)
273#define SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3 ((u32)0x37)
274#define SCU_TASK_OPEN_REJECT_BAD_DESTINATION ((u32)0x38)
275#define SCU_TASK_OPEN_REJECT_ZONE_VIOLATION ((u32)0x39)
276#define SCU_TASK_DONE_VIIT_ENTRY_NV ((u32)0x3A)
277#define SCU_TASK_DONE_IIT_ENTRY_NV ((u32)0x3B)
278#define SCU_TASK_DONE_RNCNV_OUTBOUND ((u32)0x3C)
279#define SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY ((u32)0x3D)
280#define SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED ((u32)0x3E)
281#define SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED ((u32)0x3F)
282
283#endif /* _SCU_COMPLETION_CODES_HEADER_ */
diff --git a/drivers/scsi/isci/scu_event_codes.h b/drivers/scsi/isci/scu_event_codes.h
new file mode 100644
index 000000000000..36a945ad5722
--- /dev/null
+++ b/drivers/scsi/isci/scu_event_codes.h
@@ -0,0 +1,336 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#ifndef __SCU_EVENT_CODES_HEADER__
57#define __SCU_EVENT_CODES_HEADER__
58
59/**
60 * This file contains the constants and macros for the SCU event codes.
61 *
62 *
63 */
64
65#define SCU_EVENT_TYPE_CODE_SHIFT 24
66#define SCU_EVENT_TYPE_CODE_MASK 0x0F000000
67
68#define SCU_EVENT_SPECIFIC_CODE_SHIFT 18
69#define SCU_EVENT_SPECIFIC_CODE_MASK 0x00FC0000
70
71#define SCU_EVENT_CODE_MASK \
72 (SCU_EVENT_TYPE_CODE_MASK | SCU_EVENT_SPECIFIC_CODE_MASK)
73
74/**
75 * SCU_EVENT_TYPE() -
76 *
77 * This macro constructs an SCU event type from the type value.
78 */
79#define SCU_EVENT_TYPE(type) \
80 ((u32)(type) << SCU_EVENT_TYPE_CODE_SHIFT)
81
82/**
83 * SCU_EVENT_SPECIFIC() -
84 *
85 * This macro constructs an SCU event specifier from the code value.
86 */
87#define SCU_EVENT_SPECIFIC(code) \
88 ((u32)(code) << SCU_EVENT_SPECIFIC_CODE_SHIFT)
89
90/**
91 * SCU_EVENT_MESSAGE() -
92 *
93 * This macro constructs a combines an SCU event type and SCU event specifier
94 * from the type and code values.
95 */
96#define SCU_EVENT_MESSAGE(type, code) \
97 ((type) | SCU_EVENT_SPECIFIC(code))
98
99/**
100 * SCU_EVENT_TYPE() -
101 *
102 * SCU_EVENT_TYPES
103 */
104#define SCU_EVENT_TYPE_SMU_COMMAND_ERROR SCU_EVENT_TYPE(0x08)
105#define SCU_EVENT_TYPE_SMU_PCQ_ERROR SCU_EVENT_TYPE(0x09)
106#define SCU_EVENT_TYPE_SMU_ERROR SCU_EVENT_TYPE(0x00)
107#define SCU_EVENT_TYPE_TRANSPORT_ERROR SCU_EVENT_TYPE(0x01)
108#define SCU_EVENT_TYPE_BROADCAST_CHANGE SCU_EVENT_TYPE(0x02)
109#define SCU_EVENT_TYPE_OSSP_EVENT SCU_EVENT_TYPE(0x03)
110#define SCU_EVENT_TYPE_FATAL_MEMORY_ERROR SCU_EVENT_TYPE(0x0F)
111#define SCU_EVENT_TYPE_RNC_SUSPEND_TX SCU_EVENT_TYPE(0x04)
112#define SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX SCU_EVENT_TYPE(0x05)
113#define SCU_EVENT_TYPE_RNC_OPS_MISC SCU_EVENT_TYPE(0x06)
114#define SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT SCU_EVENT_TYPE(0x07)
115#define SCU_EVENT_TYPE_ERR_CNT_EVENT SCU_EVENT_TYPE(0x0A)
116
117/**
118 *
119 *
120 * SCU_EVENT_SPECIFIERS
121 */
122#define SCU_EVENT_SPECIFIER_DRIVER_SUSPEND 0x20
123#define SCU_EVENT_SPECIFIER_RNC_RELEASE 0x00
124
125/**
126 *
127 *
128 * SMU_COMMAND_EVENTS
129 */
130#define SCU_EVENT_INVALID_CONTEXT_COMMAND \
131 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_COMMAND_ERROR, 0x00)
132
133/**
134 *
135 *
136 * SMU_PCQ_EVENTS
137 */
138#define SCU_EVENT_UNCORRECTABLE_PCQ_ERROR \
139 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_PCQ_ERROR, 0x00)
140
141/**
142 *
143 *
144 * SMU_EVENTS
145 */
146#define SCU_EVENT_UNCORRECTABLE_REGISTER_WRITE \
147 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x02)
148#define SCU_EVENT_UNCORRECTABLE_REGISTER_READ \
149 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x03)
150#define SCU_EVENT_PCIE_INTERFACE_ERROR \
151 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x04)
152#define SCU_EVENT_FUNCTION_LEVEL_RESET \
153 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x05)
154
155/**
156 *
157 *
158 * TRANSPORT_LEVEL_ERRORS
159 */
160#define SCU_EVENT_ACK_NAK_TIMEOUT_ERROR \
161 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_TRANSPORT_ERROR, 0x00)
162
163/**
164 *
165 *
166 * BROADCAST_CHANGE_EVENTS
167 */
168#define SCU_EVENT_BROADCAST_CHANGE \
169 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x01)
170#define SCU_EVENT_BROADCAST_RESERVED0 \
171 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x02)
172#define SCU_EVENT_BROADCAST_RESERVED1 \
173 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x03)
174#define SCU_EVENT_BROADCAST_SES \
175 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x04)
176#define SCU_EVENT_BROADCAST_EXPANDER \
177 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x05)
178#define SCU_EVENT_BROADCAST_AEN \
179 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x06)
180#define SCU_EVENT_BROADCAST_RESERVED3 \
181 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x07)
182#define SCU_EVENT_BROADCAST_RESERVED4 \
183 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x08)
184#define SCU_EVENT_PE_SUSPENDED \
185 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x09)
186
187/**
188 *
189 *
190 * OSSP_EVENTS
191 */
192#define SCU_EVENT_PORT_SELECTOR_DETECTED \
193 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x10)
194#define SCU_EVENT_SENT_PORT_SELECTION \
195 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x11)
196#define SCU_EVENT_HARD_RESET_TRANSMITTED \
197 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x12)
198#define SCU_EVENT_HARD_RESET_RECEIVED \
199 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x13)
200#define SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT \
201 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x15)
202#define SCU_EVENT_LINK_FAILURE \
203 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x16)
204#define SCU_EVENT_SATA_SPINUP_HOLD \
205 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x17)
206#define SCU_EVENT_SAS_15_SSC \
207 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x18)
208#define SCU_EVENT_SAS_15 \
209 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x19)
210#define SCU_EVENT_SAS_30_SSC \
211 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1A)
212#define SCU_EVENT_SAS_30 \
213 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1B)
214#define SCU_EVENT_SAS_60_SSC \
215 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1C)
216#define SCU_EVENT_SAS_60 \
217 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1D)
218#define SCU_EVENT_SATA_15_SSC \
219 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1E)
220#define SCU_EVENT_SATA_15 \
221 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1F)
222#define SCU_EVENT_SATA_30_SSC \
223 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x20)
224#define SCU_EVENT_SATA_30 \
225 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x21)
226#define SCU_EVENT_SATA_60_SSC \
227 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x22)
228#define SCU_EVENT_SATA_60 \
229 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x23)
230#define SCU_EVENT_SAS_PHY_DETECTED \
231 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x24)
232#define SCU_EVENT_SATA_PHY_DETECTED \
233 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x25)
234
235/**
236 *
237 *
238 * FATAL_INTERNAL_MEMORY_ERROR_EVENTS
239 */
240#define SCU_EVENT_TSC_RNSC_UNCORRECTABLE_ERROR \
241 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_FATAL_MEMORY_ERROR, 0x00)
242#define SCU_EVENT_TC_RNC_UNCORRECTABLE_ERROR \
243 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_FATAL_MEMORY_ERROR, 0x01)
244#define SCU_EVENT_ZPT_UNCORRECTABLE_ERROR \
245 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_FATAL_MEMORY_ERROR, 0x02)
246
247/**
248 *
249 *
250 * REMOTE_NODE_SUSPEND_EVENTS
251 */
252#define SCU_EVENT_TL_RNC_SUSPEND_TX \
253 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX, 0x00)
254#define SCU_EVENT_TL_RNC_SUSPEND_TX_RX \
255 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX, 0x00)
256#define SCU_EVENT_DRIVER_POST_RNC_SUSPEND_TX \
257 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX, 0x20)
258#define SCU_EVENT_DRIVER_POST_RNC_SUSPEND_TX_RX \
259 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX, 0x20)
260
261/**
262 *
263 *
264 * REMOTE_NODE_MISC_EVENTS
265 */
266#define SCU_EVENT_POST_RCN_RELEASE \
267 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, SCU_EVENT_SPECIFIER_RNC_RELEASE)
268#define SCU_EVENT_POST_IT_NEXUS_LOSS_TIMER_ENABLE \
269 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x01)
270#define SCU_EVENT_POST_IT_NEXUS_LOSS_TIMER_DISABLE \
271 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x02)
272#define SCU_EVENT_POST_RNC_COMPLETE \
273 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x03)
274#define SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE \
275 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x04)
276
277/**
278 *
279 *
280 * ERROR_COUNT_EVENT
281 */
282#define SCU_EVENT_RX_CREDIT_BLOCKED_RECEIVED \
283 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_ERR_CNT_EVENT, 0x00)
284#define SCU_EVENT_TX_DONE_CREDIT_TIMEOUT \
285 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_ERR_CNT_EVENT, 0x01)
286#define SCU_EVENT_RX_DONE_CREDIT_TIMEOUT \
287 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_ERR_CNT_EVENT, 0x02)
288
289/**
290 * scu_get_event_type() -
291 *
292 * This macro returns the SCU event type from the event code.
293 */
294#define scu_get_event_type(event_code) \
295 ((event_code) & SCU_EVENT_TYPE_CODE_MASK)
296
297/**
298 * scu_get_event_specifier() -
299 *
300 * This macro returns the SCU event specifier from the event code.
301 */
302#define scu_get_event_specifier(event_code) \
303 ((event_code) & SCU_EVENT_SPECIFIC_CODE_MASK)
304
305/**
306 * scu_get_event_code() -
307 *
308 * This macro returns the combined SCU event type and SCU event specifier from
309 * the event code.
310 */
311#define scu_get_event_code(event_code) \
312 ((event_code) & SCU_EVENT_CODE_MASK)
313
314
315/**
316 *
317 *
318 * PTS_SCHEDULE_EVENT
319 */
320#define SCU_EVENT_SMP_RESPONSE_NO_PE \
321 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT, 0x00)
322#define SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE \
323 scu_get_event_specifier(SCU_EVENT_SMP_RESPONSE_NO_PE)
324
325#define SCU_EVENT_TASK_TIMEOUT \
326 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT, 0x01)
327#define SCU_EVENT_SPECIFIC_TASK_TIMEOUT \
328 scu_get_event_specifier(SCU_EVENT_TASK_TIMEOUT)
329
330#define SCU_EVENT_IT_NEXUS_TIMEOUT \
331 SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT, 0x02)
332#define SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT \
333 scu_get_event_specifier(SCU_EVENT_IT_NEXUS_TIMEOUT)
334
335
336#endif /* __SCU_EVENT_CODES_HEADER__ */
diff --git a/drivers/scsi/isci/scu_remote_node_context.h b/drivers/scsi/isci/scu_remote_node_context.h
new file mode 100644
index 000000000000..33745adc826b
--- /dev/null
+++ b/drivers/scsi/isci/scu_remote_node_context.h
@@ -0,0 +1,229 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#ifndef __SCU_REMOTE_NODE_CONTEXT_HEADER__
57#define __SCU_REMOTE_NODE_CONTEXT_HEADER__
58
59/**
60 * This file contains the structures and constatns used by the SCU hardware to
61 * describe a remote node context.
62 *
63 *
64 */
65
66/**
67 * struct ssp_remote_node_context - This structure contains the SCU hardware
68 * definition for an SSP remote node.
69 *
70 *
71 */
72struct ssp_remote_node_context {
73 /* WORD 0 */
74
75 /**
76 * This field is the remote node index assigned for this remote node. All
77 * remote nodes must have a unique remote node index. The value of the remote
78 * node index can not exceed the maximum number of remote nodes reported in
79 * the SCU device context capacity register.
80 */
81 u32 remote_node_index:12;
82 u32 reserved0_1:4;
83
84 /**
85 * This field tells the SCU hardware how many simultaneous connections that
86 * this remote node will support.
87 */
88 u32 remote_node_port_width:4;
89
90 /**
91 * This field tells the SCU hardware which logical port to associate with this
92 * remote node.
93 */
94 u32 logical_port_index:3;
95 u32 reserved0_2:5;
96
97 /**
98 * This field will enable the I_T nexus loss timer for this remote node.
99 */
100 u32 nexus_loss_timer_enable:1;
101
102 /**
103 * This field is the for driver debug only and is not used.
104 */
105 u32 check_bit:1;
106
107 /**
108 * This field must be set to true when the hardware DMAs the remote node
109 * context to the hardware SRAM. When the remote node is being invalidated
110 * this field must be set to false.
111 */
112 u32 is_valid:1;
113
114 /**
115 * This field must be set to true.
116 */
117 u32 is_remote_node_context:1;
118
119 /* WORD 1 - 2 */
120
121 /**
122 * This is the low word of the remote device SAS Address
123 */
124 u32 remote_sas_address_lo;
125
126 /**
127 * This field is the high word of the remote device SAS Address
128 */
129 u32 remote_sas_address_hi;
130
131 /* WORD 3 */
132 /**
133 * This field reprensets the function number assigned to this remote device.
134 * This value must match the virtual function number that is being used to
135 * communicate to the device.
136 */
137 u32 function_number:8;
138 u32 reserved3_1:8;
139
140 /**
141 * This field provides the driver a way to cheat on the arbitration wait time
142 * for this remote node.
143 */
144 u32 arbitration_wait_time:16;
145
146 /* WORD 4 */
147 /**
148 * This field tells the SCU hardware how long this device may occupy the
149 * connection before it must be closed.
150 */
151 u32 connection_occupancy_timeout:16;
152
153 /**
154 * This field tells the SCU hardware how long to maintain a connection when
155 * there are no frames being transmitted on the link.
156 */
157 u32 connection_inactivity_timeout:16;
158
159 /* WORD 5 */
160 /**
161 * This field allows the driver to cheat on the arbitration wait time for this
162 * remote node.
163 */
164 u32 initial_arbitration_wait_time:16;
165
166 /**
167 * This field is tells the hardware what to program for the connection rate in
168 * the open address frame. See the SAS spec for valid values.
169 */
170 u32 oaf_connection_rate:4;
171
172 /**
173 * This field tells the SCU hardware what to program for the features in the
174 * open address frame. See the SAS spec for valid values.
175 */
176 u32 oaf_features:4;
177
178 /**
179 * This field tells the SCU hardware what to use for the source zone group in
180 * the open address frame. See the SAS spec for more details on zoning.
181 */
182 u32 oaf_source_zone_group:8;
183
184 /* WORD 6 */
185 /**
186 * This field tells the SCU hardware what to use as the more capibilities in
187 * the open address frame. See the SAS Spec for details.
188 */
189 u32 oaf_more_compatibility_features;
190
191 /* WORD 7 */
192 u32 reserved7;
193
194};
195
196/**
197 * struct stp_remote_node_context - This structure contains the SCU hardware
198 * definition for a STP remote node.
199 *
200 * STP Targets are not yet supported so this definition is a placeholder until
201 * we do support them.
202 */
203struct stp_remote_node_context {
204 /**
205 * Placeholder data for the STP remote node.
206 */
207 u32 data[8];
208
209};
210
211/**
212 * This union combines the SAS and SATA remote node definitions.
213 *
214 * union scu_remote_node_context
215 */
216union scu_remote_node_context {
217 /**
218 * SSP Remote Node
219 */
220 struct ssp_remote_node_context ssp;
221
222 /**
223 * STP Remote Node
224 */
225 struct stp_remote_node_context stp;
226
227};
228
229#endif /* __SCU_REMOTE_NODE_CONTEXT_HEADER__ */
diff --git a/drivers/scsi/isci/scu_task_context.h b/drivers/scsi/isci/scu_task_context.h
new file mode 100644
index 000000000000..7df87d923285
--- /dev/null
+++ b/drivers/scsi/isci/scu_task_context.h
@@ -0,0 +1,942 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#ifndef _SCU_TASK_CONTEXT_H_
57#define _SCU_TASK_CONTEXT_H_
58
59/**
60 * This file contains the structures and constants for the SCU hardware task
61 * context.
62 *
63 *
64 */
65
66
67/**
68 * enum scu_ssp_task_type - This enumberation defines the various SSP task
69 * types the SCU hardware will accept. The definition for the various task
70 * types the SCU hardware will accept can be found in the DS specification.
71 *
72 *
73 */
74typedef enum {
75 SCU_TASK_TYPE_IOREAD, /* /< IO READ direction or no direction */
76 SCU_TASK_TYPE_IOWRITE, /* /< IO Write direction */
77 SCU_TASK_TYPE_SMP_REQUEST, /* /< SMP Request type */
78 SCU_TASK_TYPE_RESPONSE, /* /< Driver generated response frame (targt mode) */
79 SCU_TASK_TYPE_RAW_FRAME, /* /< Raw frame request type */
80 SCU_TASK_TYPE_PRIMITIVE /* /< Request for a primitive to be transmitted */
81} scu_ssp_task_type;
82
83/**
84 * enum scu_sata_task_type - This enumeration defines the various SATA task
85 * types the SCU hardware will accept. The definition for the various task
86 * types the SCU hardware will accept can be found in the DS specification.
87 *
88 *
89 */
90typedef enum {
91 SCU_TASK_TYPE_DMA_IN, /* /< Read request */
92 SCU_TASK_TYPE_FPDMAQ_READ, /* /< NCQ read request */
93 SCU_TASK_TYPE_PACKET_DMA_IN, /* /< Packet read request */
94 SCU_TASK_TYPE_SATA_RAW_FRAME, /* /< Raw frame request */
95 RESERVED_4,
96 RESERVED_5,
97 RESERVED_6,
98 RESERVED_7,
99 SCU_TASK_TYPE_DMA_OUT, /* /< Write request */
100 SCU_TASK_TYPE_FPDMAQ_WRITE, /* /< NCQ write Request */
101 SCU_TASK_TYPE_PACKET_DMA_OUT /* /< Packet write request */
102} scu_sata_task_type;
103
104
105/**
106 *
107 *
108 * SCU_CONTEXT_TYPE
109 */
110#define SCU_TASK_CONTEXT_TYPE 0
111#define SCU_RNC_CONTEXT_TYPE 1
112
113/**
114 *
115 *
116 * SCU_TASK_CONTEXT_VALIDITY
117 */
118#define SCU_TASK_CONTEXT_INVALID 0
119#define SCU_TASK_CONTEXT_VALID 1
120
121/**
122 *
123 *
124 * SCU_COMMAND_CODE
125 */
126#define SCU_COMMAND_CODE_INITIATOR_NEW_TASK 0
127#define SCU_COMMAND_CODE_ACTIVE_TASK 1
128#define SCU_COMMAND_CODE_PRIMITIVE_SEQ_TASK 2
129#define SCU_COMMAND_CODE_TARGET_RAW_FRAMES 3
130
131/**
132 *
133 *
134 * SCU_TASK_PRIORITY
135 */
136/**
137 *
138 *
139 * This priority is used when there is no priority request for this request.
140 */
141#define SCU_TASK_PRIORITY_NORMAL 0
142
143/**
144 *
145 *
146 * This priority indicates that the task should be scheduled to the head of the
147 * queue. The task will NOT be executed if the TX is suspended for the remote
148 * node.
149 */
150#define SCU_TASK_PRIORITY_HEAD_OF_Q 1
151
152/**
153 *
154 *
155 * This priority indicates that the task will be executed before all
156 * SCU_TASK_PRIORITY_NORMAL and SCU_TASK_PRIORITY_HEAD_OF_Q tasks. The task
157 * WILL be executed if the TX is suspended for the remote node.
158 */
159#define SCU_TASK_PRIORITY_HIGH 2
160
161/**
162 *
163 *
164 * This task priority is reserved and should not be used.
165 */
166#define SCU_TASK_PRIORITY_RESERVED 3
167
168#define SCU_TASK_INITIATOR_MODE 1
169#define SCU_TASK_TARGET_MODE 0
170
171#define SCU_TASK_REGULAR 0
172#define SCU_TASK_ABORTED 1
173
174/* direction bit defintion */
175/**
176 *
177 *
178 * SATA_DIRECTION
179 */
180#define SCU_SATA_WRITE_DATA_DIRECTION 0
181#define SCU_SATA_READ_DATA_DIRECTION 1
182
183/**
184 *
185 *
186 * SCU_COMMAND_CONTEXT_MACROS These macros provide the mask and shift
187 * operations to construct the various SCU commands
188 */
189#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_SHIFT 21
190#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_MASK 0x00E00000
191#define scu_get_command_request_type(x) \
192 ((x) & SCU_CONTEXT_COMMAND_REQUEST_TYPE_MASK)
193
194#define SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_SHIFT 18
195#define SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_MASK 0x001C0000
196#define scu_get_command_request_subtype(x) \
197 ((x) & SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_MASK)
198
199#define SCU_CONTEXT_COMMAND_REQUEST_FULLTYPE_MASK \
200 (\
201 SCU_CONTEXT_COMMAND_REQUEST_TYPE_MASK \
202 | SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_MASK \
203 )
204#define scu_get_command_request_full_type(x) \
205 ((x) & SCU_CONTEXT_COMMAND_REQUEST_FULLTYPE_MASK)
206
207#define SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT 16
208#define SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_MASK 0x00010000
209#define scu_get_command_protocl_engine_group(x) \
210 ((x) & SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_MASK)
211
212#define SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT 12
213#define SCU_CONTEXT_COMMAND_LOGICAL_PORT_MASK 0x00007000
214#define scu_get_command_reqeust_logical_port(x) \
215 ((x) & SCU_CONTEXT_COMMAND_LOGICAL_PORT_MASK)
216
217
218#define MAKE_SCU_CONTEXT_COMMAND_TYPE(type) \
219 ((u32)(type) << SCU_CONTEXT_COMMAND_REQUEST_TYPE_SHIFT)
220
221/**
222 * MAKE_SCU_CONTEXT_COMMAND_TYPE() -
223 *
224 * SCU_COMMAND_TYPES These constants provide the grouping of the different SCU
225 * command types.
226 */
227#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC MAKE_SCU_CONTEXT_COMMAND_TYPE(0)
228#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC MAKE_SCU_CONTEXT_COMMAND_TYPE(1)
229#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC MAKE_SCU_CONTEXT_COMMAND_TYPE(2)
230#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC MAKE_SCU_CONTEXT_COMMAND_TYPE(3)
231#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC MAKE_SCU_CONTEXT_COMMAND_TYPE(6)
232
233#define MAKE_SCU_CONTEXT_COMMAND_REQUEST(type, command) \
234 ((type) | ((command) << SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_SHIFT))
235
236/**
237 *
238 *
239 * SCU_REQUEST_TYPES These constants are the various request types that can be
240 * posted to the SCU hardware.
241 */
242#define SCU_CONTEXT_COMMAND_REQUST_POST_TC \
243 (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC, 0))
244
245#define SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT \
246 (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC, 1))
247
248#define SCU_CONTEXT_COMMAND_REQUST_DUMP_TC \
249 (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC, 0))
250
251#define SCU_CONTEXT_COMMAND_POST_RNC_32 \
252 (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC, 0))
253
254#define SCU_CONTEXT_COMMAND_POST_RNC_96 \
255 (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC, 1))
256
257#define SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE \
258 (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC, 2))
259
260#define SCU_CONTEXT_COMMAND_DUMP_RNC_32 \
261 (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC, 0))
262
263#define SCU_CONTEXT_COMMAND_DUMP_RNC_96 \
264 (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC, 1))
265
266#define SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX \
267 (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 0))
268
269#define SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX \
270 (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 1))
271
272#define SCU_CONTEXT_COMMAND_POST_RNC_RESUME \
273 (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 2))
274
275#define SCU_CONTEXT_IT_NEXUS_LOSS_TIMER_ENABLE \
276 (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 3))
277
278#define SCU_CONTEXT_IT_NEXUS_LOSS_TIMER_DISABLE \
279 (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 4))
280
281/**
282 *
283 *
284 * SCU_TASK_CONTEXT_PROTOCOL SCU Task context protocol types this is uesd to
285 * program the SCU Task context protocol field in word 0x00.
286 */
287#define SCU_TASK_CONTEXT_PROTOCOL_SMP 0x00
288#define SCU_TASK_CONTEXT_PROTOCOL_SSP 0x01
289#define SCU_TASK_CONTEXT_PROTOCOL_STP 0x02
290#define SCU_TASK_CONTEXT_PROTOCOL_NONE 0x07
291
292/**
293 * struct ssp_task_context - This is the SCU hardware definition for an SSP
294 * request.
295 *
296 *
297 */
298struct ssp_task_context {
299 /* OFFSET 0x18 */
300 u32 reserved00:24;
301 u32 frame_type:8;
302
303 /* OFFSET 0x1C */
304 u32 reserved01;
305
306 /* OFFSET 0x20 */
307 u32 fill_bytes:2;
308 u32 reserved02:6;
309 u32 changing_data_pointer:1;
310 u32 retransmit:1;
311 u32 retry_data_frame:1;
312 u32 tlr_control:2;
313 u32 reserved03:19;
314
315 /* OFFSET 0x24 */
316 u32 uiRsvd4;
317
318 /* OFFSET 0x28 */
319 u32 target_port_transfer_tag:16;
320 u32 tag:16;
321
322 /* OFFSET 0x2C */
323 u32 data_offset;
324};
325
326/**
327 * struct stp_task_context - This is the SCU hardware definition for an STP
328 * request.
329 *
330 *
331 */
332struct stp_task_context {
333 /* OFFSET 0x18 */
334 u32 fis_type:8;
335 u32 pm_port:4;
336 u32 reserved0:3;
337 u32 control:1;
338 u32 command:8;
339 u32 features:8;
340
341 /* OFFSET 0x1C */
342 u32 reserved1;
343
344 /* OFFSET 0x20 */
345 u32 reserved2;
346
347 /* OFFSET 0x24 */
348 u32 reserved3;
349
350 /* OFFSET 0x28 */
351 u32 ncq_tag:5;
352 u32 reserved4:27;
353
354 /* OFFSET 0x2C */
355 u32 data_offset; /* TODO: What is this used for? */
356};
357
358/**
359 * struct smp_task_context - This is the SCU hardware definition for an SMP
360 * request.
361 *
362 *
363 */
364struct smp_task_context {
365 /* OFFSET 0x18 */
366 u32 response_length:8;
367 u32 function_result:8;
368 u32 function:8;
369 u32 frame_type:8;
370
371 /* OFFSET 0x1C */
372 u32 smp_response_ufi:12;
373 u32 reserved1:20;
374
375 /* OFFSET 0x20 */
376 u32 reserved2;
377
378 /* OFFSET 0x24 */
379 u32 reserved3;
380
381 /* OFFSET 0x28 */
382 u32 reserved4;
383
384 /* OFFSET 0x2C */
385 u32 reserved5;
386};
387
388/**
389 * struct primitive_task_context - This is the SCU hardware definition used
390 * when the driver wants to send a primitive on the link.
391 *
392 *
393 */
394struct primitive_task_context {
395 /* OFFSET 0x18 */
396 /**
397 * This field is the control word and it must be 0.
398 */
399 u32 control; /* /< must be set to 0 */
400
401 /* OFFSET 0x1C */
402 /**
403 * This field specifies the primitive that is to be transmitted.
404 */
405 u32 sequence;
406
407 /* OFFSET 0x20 */
408 u32 reserved0;
409
410 /* OFFSET 0x24 */
411 u32 reserved1;
412
413 /* OFFSET 0x28 */
414 u32 reserved2;
415
416 /* OFFSET 0x2C */
417 u32 reserved3;
418};
419
420/**
421 * The union of the protocols that can be selected in the SCU task context
422 * field.
423 *
424 * protocol_context
425 */
426union protocol_context {
427 struct ssp_task_context ssp;
428 struct stp_task_context stp;
429 struct smp_task_context smp;
430 struct primitive_task_context primitive;
431 u32 words[6];
432};
433
434/**
435 * struct scu_sgl_element - This structure represents a single SCU defined SGL
436 * element. SCU SGLs contain a 64 bit address with the maximum data transfer
437 * being 24 bits in size. The SGL can not cross a 4GB boundary.
438 *
439 * struct scu_sgl_element
440 */
441struct scu_sgl_element {
442 /**
443 * This field is the upper 32 bits of the 64 bit physical address.
444 */
445 u32 address_upper;
446
447 /**
448 * This field is the lower 32 bits of the 64 bit physical address.
449 */
450 u32 address_lower;
451
452 /**
453 * This field is the number of bytes to transfer.
454 */
455 u32 length:24;
456
457 /**
458 * This field is the address modifier to be used when a virtual function is
459 * requesting a data transfer.
460 */
461 u32 address_modifier:8;
462
463};
464
465#define SCU_SGL_ELEMENT_PAIR_A 0
466#define SCU_SGL_ELEMENT_PAIR_B 1
467
468/**
469 * struct scu_sgl_element_pair - This structure is the SCU hardware definition
470 * of a pair of SGL elements. The SCU hardware always works on SGL pairs.
471 * They are refered to in the DS specification as SGL A and SGL B. Each SGL
472 * pair is followed by the address of the next pair.
473 *
474 *
475 */
476struct scu_sgl_element_pair {
477 /* OFFSET 0x60-0x68 */
478 /**
479 * This field is the SGL element A of the SGL pair.
480 */
481 struct scu_sgl_element A;
482
483 /* OFFSET 0x6C-0x74 */
484 /**
485 * This field is the SGL element B of the SGL pair.
486 */
487 struct scu_sgl_element B;
488
489 /* OFFSET 0x78-0x7C */
490 /**
491 * This field is the upper 32 bits of the 64 bit address to the next SGL
492 * element pair.
493 */
494 u32 next_pair_upper;
495
496 /**
497 * This field is the lower 32 bits of the 64 bit address to the next SGL
498 * element pair.
499 */
500 u32 next_pair_lower;
501
502};
503
504/**
505 * struct transport_snapshot - This structure is the SCU hardware scratch area
506 * for the task context. This is set to 0 by the driver but can be read by
507 * issuing a dump TC request to the SCU.
508 *
509 *
510 */
511struct transport_snapshot {
512 /* OFFSET 0x48 */
513 u32 xfer_rdy_write_data_length;
514
515 /* OFFSET 0x4C */
516 u32 data_offset;
517
518 /* OFFSET 0x50 */
519 u32 data_transfer_size:24;
520 u32 reserved_50_0:8;
521
522 /* OFFSET 0x54 */
523 u32 next_initiator_write_data_offset;
524
525 /* OFFSET 0x58 */
526 u32 next_initiator_write_data_xfer_size:24;
527 u32 reserved_58_0:8;
528};
529
530/**
531 * struct scu_task_context - This structure defines the contents of the SCU
532 * silicon task context. It lays out all of the fields according to the
533 * expected order and location for the Storage Controller unit.
534 *
535 *
536 */
537struct scu_task_context {
538 /* OFFSET 0x00 ------ */
539 /**
540 * This field must be encoded to one of the valid SCU task priority values
541 * - SCU_TASK_PRIORITY_NORMAL
542 * - SCU_TASK_PRIORITY_HEAD_OF_Q
543 * - SCU_TASK_PRIORITY_HIGH
544 */
545 u32 priority:2;
546
547 /**
548 * This field must be set to true if this is an initiator generated request.
549 * Until target mode is supported all task requests are initiator requests.
550 */
551 u32 initiator_request:1;
552
553 /**
554 * This field must be set to one of the valid connection rates valid values
555 * are 0x8, 0x9, and 0xA.
556 */
557 u32 connection_rate:4;
558
559 /**
560 * This field muse be programed when generating an SMP response since the SMP
561 * connection remains open until the SMP response is generated.
562 */
563 u32 protocol_engine_index:3;
564
565 /**
566 * This field must contain the logical port for the task request.
567 */
568 u32 logical_port_index:3;
569
570 /**
571 * This field must be set to one of the SCU_TASK_CONTEXT_PROTOCOL values
572 * - SCU_TASK_CONTEXT_PROTOCOL_SMP
573 * - SCU_TASK_CONTEXT_PROTOCOL_SSP
574 * - SCU_TASK_CONTEXT_PROTOCOL_STP
575 * - SCU_TASK_CONTEXT_PROTOCOL_NONE
576 */
577 u32 protocol_type:3;
578
579 /**
580 * This filed must be set to the TCi allocated for this task
581 */
582 u32 task_index:12;
583
584 /**
585 * This field is reserved and must be set to 0x00
586 */
587 u32 reserved_00_0:1;
588
589 /**
590 * For a normal task request this must be set to 0. If this is an abort of
591 * this task request it must be set to 1.
592 */
593 u32 abort:1;
594
595 /**
596 * This field must be set to true for the SCU hardware to process the task.
597 */
598 u32 valid:1;
599
600 /**
601 * This field must be set to SCU_TASK_CONTEXT_TYPE
602 */
603 u32 context_type:1;
604
605 /* OFFSET 0x04 */
606 /**
607 * This field contains the RNi that is the target of this request.
608 */
609 u32 remote_node_index:12;
610
611 /**
612 * This field is programmed if this is a mirrored request, which we are not
613 * using, in which case it is the RNi for the mirrored target.
614 */
615 u32 mirrored_node_index:12;
616
617 /**
618 * This field is programmed with the direction of the SATA reqeust
619 * - SCU_SATA_WRITE_DATA_DIRECTION
620 * - SCU_SATA_READ_DATA_DIRECTION
621 */
622 u32 sata_direction:1;
623
624 /**
625 * This field is programmsed with one of the following SCU_COMMAND_CODE
626 * - SCU_COMMAND_CODE_INITIATOR_NEW_TASK
627 * - SCU_COMMAND_CODE_ACTIVE_TASK
628 * - SCU_COMMAND_CODE_PRIMITIVE_SEQ_TASK
629 * - SCU_COMMAND_CODE_TARGET_RAW_FRAMES
630 */
631 u32 command_code:2;
632
633 /**
634 * This field is set to true if the remote node should be suspended.
635 * This bit is only valid for SSP & SMP target devices.
636 */
637 u32 suspend_node:1;
638
639 /**
640 * This field is programmed with one of the following command type codes
641 *
642 * For SAS requests use the scu_ssp_task_type
643 * - SCU_TASK_TYPE_IOREAD
644 * - SCU_TASK_TYPE_IOWRITE
645 * - SCU_TASK_TYPE_SMP_REQUEST
646 * - SCU_TASK_TYPE_RESPONSE
647 * - SCU_TASK_TYPE_RAW_FRAME
648 * - SCU_TASK_TYPE_PRIMITIVE
649 *
650 * For SATA requests use the scu_sata_task_type
651 * - SCU_TASK_TYPE_DMA_IN
652 * - SCU_TASK_TYPE_FPDMAQ_READ
653 * - SCU_TASK_TYPE_PACKET_DMA_IN
654 * - SCU_TASK_TYPE_SATA_RAW_FRAME
655 * - SCU_TASK_TYPE_DMA_OUT
656 * - SCU_TASK_TYPE_FPDMAQ_WRITE
657 * - SCU_TASK_TYPE_PACKET_DMA_OUT
658 */
659 u32 task_type:4;
660
661 /* OFFSET 0x08 */
662 /**
663 * This field is reserved and the must be set to 0x00
664 */
665 u32 link_layer_control:8; /* presently all reserved */
666
667 /**
668 * This field is set to true when TLR is to be enabled
669 */
670 u32 ssp_tlr_enable:1;
671
672 /**
673 * This is field specifies if the SCU DMAs a response frame to host
674 * memory for good response frames when operating in target mode.
675 */
676 u32 dma_ssp_target_good_response:1;
677
678 /**
679 * This field indicates if the SCU should DMA the response frame to
680 * host memory.
681 */
682 u32 do_not_dma_ssp_good_response:1;
683
684 /**
685 * This field is set to true when strict ordering is to be enabled
686 */
687 u32 strict_ordering:1;
688
689 /**
690 * This field indicates the type of endianess to be utilized for the
691 * frame. command, task, and response frames utilized control_frame
692 * set to 1.
693 */
694 u32 control_frame:1;
695
696 /**
697 * This field is reserved and the driver should set to 0x00
698 */
699 u32 tl_control_reserved:3;
700
701 /**
702 * This field is set to true when the SCU hardware task timeout control is to
703 * be enabled
704 */
705 u32 timeout_enable:1;
706
707 /**
708 * This field is reserved and the driver should set it to 0x00
709 */
710 u32 pts_control_reserved:7;
711
712 /**
713 * This field should be set to true when block guard is to be enabled
714 */
715 u32 block_guard_enable:1;
716
717 /**
718 * This field is reserved and the driver should set to 0x00
719 */
720 u32 sdma_control_reserved:7;
721
722 /* OFFSET 0x0C */
723 /**
724 * This field is the address modifier for this io request it should be
725 * programmed with the virtual function that is making the request.
726 */
727 u32 address_modifier:16;
728
729 /**
730 * @todo What we support mirrored SMP response frame?
731 */
732 u32 mirrored_protocol_engine:3; /* mirrored protocol Engine Index */
733
734 /**
735 * If this is a mirrored request the logical port index for the mirrored RNi
736 * must be programmed.
737 */
738 u32 mirrored_logical_port:4; /* mirrored local port index */
739
740 /**
741 * This field is reserved and the driver must set it to 0x00
742 */
743 u32 reserved_0C_0:8;
744
745 /**
746 * This field must be set to true if the mirrored request processing is to be
747 * enabled.
748 */
749 u32 mirror_request_enable:1; /* Mirrored request Enable */
750
751 /* OFFSET 0x10 */
752 /**
753 * This field is the command iu length in dwords
754 */
755 u32 ssp_command_iu_length:8;
756
757 /**
758 * This is the target TLR enable bit it must be set to 0 when creatning the
759 * task context.
760 */
761 u32 xfer_ready_tlr_enable:1;
762
763 /**
764 * This field is reserved and the driver must set it to 0x00
765 */
766 u32 reserved_10_0:7;
767
768 /**
769 * This is the maximum burst size that the SCU hardware will send in one
770 * connection its value is (N x 512) and N must be a multiple of 2. If the
771 * value is 0x00 then maximum burst size is disabled.
772 */
773 u32 ssp_max_burst_size:16;
774
775 /* OFFSET 0x14 */
776 /**
777 * This filed is set to the number of bytes to be transfered in the request.
778 */
779 u32 transfer_length_bytes:24; /* In terms of bytes */
780
781 /**
782 * This field is reserved and the driver should set it to 0x00
783 */
784 u32 reserved_14_0:8;
785
786 /* OFFSET 0x18-0x2C */
787 /**
788 * This union provides for the protocol specif part of the SCU Task Context.
789 */
790 union protocol_context type;
791
792 /* OFFSET 0x30-0x34 */
793 /**
794 * This field is the upper 32 bits of the 64 bit physical address of the
795 * command iu buffer
796 */
797 u32 command_iu_upper;
798
799 /**
800 * This field is the lower 32 bits of the 64 bit physical address of the
801 * command iu buffer
802 */
803 u32 command_iu_lower;
804
805 /* OFFSET 0x38-0x3C */
806 /**
807 * This field is the upper 32 bits of the 64 bit physical address of the
808 * response iu buffer
809 */
810 u32 response_iu_upper;
811
812 /**
813 * This field is the lower 32 bits of the 64 bit physical address of the
814 * response iu buffer
815 */
816 u32 response_iu_lower;
817
818 /* OFFSET 0x40 */
819 /**
820 * This field is set to the task phase of the SCU hardware. The driver must
821 * set this to 0x01
822 */
823 u32 task_phase:8;
824
825 /**
826 * This field is set to the transport layer task status. The driver must set
827 * this to 0x00
828 */
829 u32 task_status:8;
830
831 /**
832 * This field is used during initiator write TLR
833 */
834 u32 previous_extended_tag:4;
835
836 /**
837 * This field is set the maximum number of retries for a STP non-data FIS
838 */
839 u32 stp_retry_count:2;
840
841 /**
842 * This field is reserved and the driver must set it to 0x00
843 */
844 u32 reserved_40_1:2;
845
846 /**
847 * This field is used by the SCU TL to determine when to take a snapshot when
848 * tranmitting read data frames.
849 * - 0x00 The entire IO
850 * - 0x01 32k
851 * - 0x02 64k
852 * - 0x04 128k
853 * - 0x08 256k
854 */
855 u32 ssp_tlr_threshold:4;
856
857 /**
858 * This field is reserved and the driver must set it to 0x00
859 */
860 u32 reserved_40_2:4;
861
862 /* OFFSET 0x44 */
863 u32 write_data_length; /* read only set to 0 */
864
865 /* OFFSET 0x48-0x58 */
866 struct transport_snapshot snapshot; /* read only set to 0 */
867
868 /* OFFSET 0x5C */
869 u32 block_protection_enable:1;
870 u32 block_size:2;
871 u32 block_protection_function:2;
872 u32 reserved_5C_0:9;
873 u32 active_sgl_element:2; /* read only set to 0 */
874 u32 sgl_exhausted:1; /* read only set to 0 */
875 u32 payload_data_transfer_error:4; /* read only set to 0 */
876 u32 frame_buffer_offset:11; /* read only set to 0 */
877
878 /* OFFSET 0x60-0x7C */
879 /**
880 * This field is the first SGL element pair found in the TC data structure.
881 */
882 struct scu_sgl_element_pair sgl_pair_ab;
883 /* OFFSET 0x80-0x9C */
884 /**
885 * This field is the second SGL element pair found in the TC data structure.
886 */
887 struct scu_sgl_element_pair sgl_pair_cd;
888
889 /* OFFSET 0xA0-BC */
890 struct scu_sgl_element_pair sgl_snapshot_ac;
891
892 /* OFFSET 0xC0 */
893 u32 active_sgl_element_pair; /* read only set to 0 */
894
895 /* OFFSET 0xC4-0xCC */
896 u32 reserved_C4_CC[3];
897
898 /* OFFSET 0xD0 */
899 u32 intermediate_crc_value:16;
900 u32 initial_crc_seed:16;
901
902 /* OFFSET 0xD4 */
903 u32 application_tag_for_verify:16;
904 u32 application_tag_for_generate:16;
905
906 /* OFFSET 0xD8 */
907 u32 reference_tag_seed_for_verify_function;
908
909 /* OFFSET 0xDC */
910 u32 reserved_DC;
911
912 /* OFFSET 0xE0 */
913 u32 reserved_E0_0:16;
914 u32 application_tag_mask_for_generate:16;
915
916 /* OFFSET 0xE4 */
917 u32 block_protection_control:16;
918 u32 application_tag_mask_for_verify:16;
919
920 /* OFFSET 0xE8 */
921 u32 block_protection_error:8;
922 u32 reserved_E8_0:24;
923
924 /* OFFSET 0xEC */
925 u32 reference_tag_seed_for_verify;
926
927 /* OFFSET 0xF0 */
928 u32 intermediate_crc_valid_snapshot:16;
929 u32 reserved_F0_0:16;
930
931 /* OFFSET 0xF4 */
932 u32 reference_tag_seed_for_verify_function_snapshot;
933
934 /* OFFSET 0xF8 */
935 u32 snapshot_of_reserved_dword_DC_of_tc;
936
937 /* OFFSET 0xFC */
938 u32 reference_tag_seed_for_generate_function_snapshot;
939
940};
941
942#endif /* _SCU_TASK_CONTEXT_H_ */
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
new file mode 100644
index 000000000000..d6bcdd013dc9
--- /dev/null
+++ b/drivers/scsi/isci/task.c
@@ -0,0 +1,1676 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#include <linux/completion.h>
57#include <linux/irqflags.h>
58#include "sas.h"
59#include <scsi/libsas.h>
60#include "remote_device.h"
61#include "remote_node_context.h"
62#include "isci.h"
63#include "request.h"
64#include "task.h"
65#include "host.h"
66
67/**
68* isci_task_refuse() - complete the request to the upper layer driver in
69* the case where an I/O needs to be completed back in the submit path.
70* @ihost: host on which the the request was queued
71* @task: request to complete
72* @response: response code for the completed task.
73* @status: status code for the completed task.
74*
75*/
76static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
77 enum service_response response,
78 enum exec_status status)
79
80{
81 enum isci_completion_selection disposition;
82
83 disposition = isci_perform_normal_io_completion;
84 disposition = isci_task_set_completion_status(task, response, status,
85 disposition);
86
87 /* Tasks aborted specifically by a call to the lldd_abort_task
88 * function should not be completed to the host in the regular path.
89 */
90 switch (disposition) {
91 case isci_perform_normal_io_completion:
92 /* Normal notification (task_done) */
93 dev_dbg(&ihost->pdev->dev,
94 "%s: Normal - task = %p, response=%d, "
95 "status=%d\n",
96 __func__, task, response, status);
97
98 task->lldd_task = NULL;
99
100 isci_execpath_callback(ihost, task, task->task_done);
101 break;
102
103 case isci_perform_aborted_io_completion:
104 /*
105 * No notification because this request is already in the
106 * abort path.
107 */
108 dev_dbg(&ihost->pdev->dev,
109 "%s: Aborted - task = %p, response=%d, "
110 "status=%d\n",
111 __func__, task, response, status);
112 break;
113
114 case isci_perform_error_io_completion:
115 /* Use sas_task_abort */
116 dev_dbg(&ihost->pdev->dev,
117 "%s: Error - task = %p, response=%d, "
118 "status=%d\n",
119 __func__, task, response, status);
120
121 isci_execpath_callback(ihost, task, sas_task_abort);
122 break;
123
124 default:
125 dev_dbg(&ihost->pdev->dev,
126 "%s: isci task notification default case!",
127 __func__);
128 sas_task_abort(task);
129 break;
130 }
131}
132
133#define for_each_sas_task(num, task) \
134 for (; num > 0; num--,\
135 task = list_entry(task->list.next, struct sas_task, list))
136
137
138static inline int isci_device_io_ready(struct isci_remote_device *idev,
139 struct sas_task *task)
140{
141 return idev ? test_bit(IDEV_IO_READY, &idev->flags) ||
142 (test_bit(IDEV_IO_NCQERROR, &idev->flags) &&
143 isci_task_is_ncq_recovery(task))
144 : 0;
145}
146/**
147 * isci_task_execute_task() - This function is one of the SAS Domain Template
148 * functions. This function is called by libsas to send a task down to
149 * hardware.
150 * @task: This parameter specifies the SAS task to send.
151 * @num: This parameter specifies the number of tasks to queue.
152 * @gfp_flags: This parameter specifies the context of this call.
153 *
154 * status, zero indicates success.
155 */
156int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
157{
158 struct isci_host *ihost = dev_to_ihost(task->dev);
159 struct isci_remote_device *idev;
160 unsigned long flags;
161 bool io_ready;
162 u16 tag;
163
164 dev_dbg(&ihost->pdev->dev, "%s: num=%d\n", __func__, num);
165
166 for_each_sas_task(num, task) {
167 enum sci_status status = SCI_FAILURE;
168
169 spin_lock_irqsave(&ihost->scic_lock, flags);
170 idev = isci_lookup_device(task->dev);
171 io_ready = isci_device_io_ready(idev, task);
172 tag = isci_alloc_tag(ihost);
173 spin_unlock_irqrestore(&ihost->scic_lock, flags);
174
175 dev_dbg(&ihost->pdev->dev,
176 "task: %p, num: %d dev: %p idev: %p:%#lx cmd = %p\n",
177 task, num, task->dev, idev, idev ? idev->flags : 0,
178 task->uldd_task);
179
180 if (!idev) {
181 isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED,
182 SAS_DEVICE_UNKNOWN);
183 } else if (!io_ready || tag == SCI_CONTROLLER_INVALID_IO_TAG) {
184 /* Indicate QUEUE_FULL so that the scsi midlayer
185 * retries.
186 */
187 isci_task_refuse(ihost, task, SAS_TASK_COMPLETE,
188 SAS_QUEUE_FULL);
189 } else {
190 /* There is a device and it's ready for I/O. */
191 spin_lock_irqsave(&task->task_state_lock, flags);
192
193 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
194 /* The I/O was aborted. */
195 spin_unlock_irqrestore(&task->task_state_lock,
196 flags);
197
198 isci_task_refuse(ihost, task,
199 SAS_TASK_UNDELIVERED,
200 SAM_STAT_TASK_ABORTED);
201 } else {
202 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
203 spin_unlock_irqrestore(&task->task_state_lock, flags);
204
205 /* build and send the request. */
206 status = isci_request_execute(ihost, idev, task, tag);
207
208 if (status != SCI_SUCCESS) {
209
210 spin_lock_irqsave(&task->task_state_lock, flags);
211 /* Did not really start this command. */
212 task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
213 spin_unlock_irqrestore(&task->task_state_lock, flags);
214
215 /* Indicate QUEUE_FULL so that the scsi
216 * midlayer retries. if the request
217 * failed for remote device reasons,
218 * it gets returned as
219 * SAS_TASK_UNDELIVERED next time
220 * through.
221 */
222 isci_task_refuse(ihost, task,
223 SAS_TASK_COMPLETE,
224 SAS_QUEUE_FULL);
225 }
226 }
227 }
228 if (status != SCI_SUCCESS && tag != SCI_CONTROLLER_INVALID_IO_TAG) {
229 spin_lock_irqsave(&ihost->scic_lock, flags);
230 /* command never hit the device, so just free
231 * the tci and skip the sequence increment
232 */
233 isci_tci_free(ihost, ISCI_TAG_TCI(tag));
234 spin_unlock_irqrestore(&ihost->scic_lock, flags);
235 }
236 isci_put_device(idev);
237 }
238 return 0;
239}
240
241static enum sci_status isci_sata_management_task_request_build(struct isci_request *ireq)
242{
243 struct isci_tmf *isci_tmf;
244 enum sci_status status;
245
246 if (tmf_task != ireq->ttype)
247 return SCI_FAILURE;
248
249 isci_tmf = isci_request_access_tmf(ireq);
250
251 switch (isci_tmf->tmf_code) {
252
253 case isci_tmf_sata_srst_high:
254 case isci_tmf_sata_srst_low: {
255 struct host_to_dev_fis *fis = &ireq->stp.cmd;
256
257 memset(fis, 0, sizeof(*fis));
258
259 fis->fis_type = 0x27;
260 fis->flags &= ~0x80;
261 fis->flags &= 0xF0;
262 if (isci_tmf->tmf_code == isci_tmf_sata_srst_high)
263 fis->control |= ATA_SRST;
264 else
265 fis->control &= ~ATA_SRST;
266 break;
267 }
268 /* other management commnd go here... */
269 default:
270 return SCI_FAILURE;
271 }
272
273 /* core builds the protocol specific request
274 * based on the h2d fis.
275 */
276 status = sci_task_request_construct_sata(ireq);
277
278 return status;
279}
280
281static struct isci_request *isci_task_request_build(struct isci_host *ihost,
282 struct isci_remote_device *idev,
283 u16 tag, struct isci_tmf *isci_tmf)
284{
285 enum sci_status status = SCI_FAILURE;
286 struct isci_request *ireq = NULL;
287 struct domain_device *dev;
288
289 dev_dbg(&ihost->pdev->dev,
290 "%s: isci_tmf = %p\n", __func__, isci_tmf);
291
292 dev = idev->domain_dev;
293
294 /* do common allocation and init of request object. */
295 ireq = isci_tmf_request_from_tag(ihost, isci_tmf, tag);
296 if (!ireq)
297 return NULL;
298
299 /* let the core do it's construct. */
300 status = sci_task_request_construct(ihost, idev, tag,
301 ireq);
302
303 if (status != SCI_SUCCESS) {
304 dev_warn(&ihost->pdev->dev,
305 "%s: sci_task_request_construct failed - "
306 "status = 0x%x\n",
307 __func__,
308 status);
309 return NULL;
310 }
311
312 /* XXX convert to get this from task->tproto like other drivers */
313 if (dev->dev_type == SAS_END_DEV) {
314 isci_tmf->proto = SAS_PROTOCOL_SSP;
315 status = sci_task_request_construct_ssp(ireq);
316 if (status != SCI_SUCCESS)
317 return NULL;
318 }
319
320 if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
321 isci_tmf->proto = SAS_PROTOCOL_SATA;
322 status = isci_sata_management_task_request_build(ireq);
323
324 if (status != SCI_SUCCESS)
325 return NULL;
326 }
327 return ireq;
328}
329
330static int isci_task_execute_tmf(struct isci_host *ihost,
331 struct isci_remote_device *idev,
332 struct isci_tmf *tmf, unsigned long timeout_ms)
333{
334 DECLARE_COMPLETION_ONSTACK(completion);
335 enum sci_task_status status = SCI_TASK_FAILURE;
336 struct isci_request *ireq;
337 int ret = TMF_RESP_FUNC_FAILED;
338 unsigned long flags;
339 unsigned long timeleft;
340 u16 tag;
341
342 spin_lock_irqsave(&ihost->scic_lock, flags);
343 tag = isci_alloc_tag(ihost);
344 spin_unlock_irqrestore(&ihost->scic_lock, flags);
345
346 if (tag == SCI_CONTROLLER_INVALID_IO_TAG)
347 return ret;
348
349 /* sanity check, return TMF_RESP_FUNC_FAILED
350 * if the device is not there and ready.
351 */
352 if (!idev ||
353 (!test_bit(IDEV_IO_READY, &idev->flags) &&
354 !test_bit(IDEV_IO_NCQERROR, &idev->flags))) {
355 dev_dbg(&ihost->pdev->dev,
356 "%s: idev = %p not ready (%#lx)\n",
357 __func__,
358 idev, idev ? idev->flags : 0);
359 goto err_tci;
360 } else
361 dev_dbg(&ihost->pdev->dev,
362 "%s: idev = %p\n",
363 __func__, idev);
364
365 /* Assign the pointer to the TMF's completion kernel wait structure. */
366 tmf->complete = &completion;
367
368 ireq = isci_task_request_build(ihost, idev, tag, tmf);
369 if (!ireq)
370 goto err_tci;
371
372 spin_lock_irqsave(&ihost->scic_lock, flags);
373
374 /* start the TMF io. */
375 status = sci_controller_start_task(ihost, idev, ireq);
376
377 if (status != SCI_TASK_SUCCESS) {
378 dev_dbg(&ihost->pdev->dev,
379 "%s: start_io failed - status = 0x%x, request = %p\n",
380 __func__,
381 status,
382 ireq);
383 spin_unlock_irqrestore(&ihost->scic_lock, flags);
384 goto err_tci;
385 }
386
387 if (tmf->cb_state_func != NULL)
388 tmf->cb_state_func(isci_tmf_started, tmf, tmf->cb_data);
389
390 isci_request_change_state(ireq, started);
391
392 /* add the request to the remote device request list. */
393 list_add(&ireq->dev_node, &idev->reqs_in_process);
394
395 spin_unlock_irqrestore(&ihost->scic_lock, flags);
396
397 /* Wait for the TMF to complete, or a timeout. */
398 timeleft = wait_for_completion_timeout(&completion,
399 msecs_to_jiffies(timeout_ms));
400
401 if (timeleft == 0) {
402 spin_lock_irqsave(&ihost->scic_lock, flags);
403
404 if (tmf->cb_state_func != NULL)
405 tmf->cb_state_func(isci_tmf_timed_out, tmf, tmf->cb_data);
406
407 sci_controller_terminate_request(ihost,
408 idev,
409 ireq);
410
411 spin_unlock_irqrestore(&ihost->scic_lock, flags);
412
413 wait_for_completion(tmf->complete);
414 }
415
416 isci_print_tmf(tmf);
417
418 if (tmf->status == SCI_SUCCESS)
419 ret = TMF_RESP_FUNC_COMPLETE;
420 else if (tmf->status == SCI_FAILURE_IO_RESPONSE_VALID) {
421 dev_dbg(&ihost->pdev->dev,
422 "%s: tmf.status == "
423 "SCI_FAILURE_IO_RESPONSE_VALID\n",
424 __func__);
425 ret = TMF_RESP_FUNC_COMPLETE;
426 }
427 /* Else - leave the default "failed" status alone. */
428
429 dev_dbg(&ihost->pdev->dev,
430 "%s: completed request = %p\n",
431 __func__,
432 ireq);
433
434 return ret;
435
436 err_tci:
437 spin_lock_irqsave(&ihost->scic_lock, flags);
438 isci_tci_free(ihost, ISCI_TAG_TCI(tag));
439 spin_unlock_irqrestore(&ihost->scic_lock, flags);
440
441 return ret;
442}
443
444static void isci_task_build_tmf(struct isci_tmf *tmf,
445 enum isci_tmf_function_codes code,
446 void (*tmf_sent_cb)(enum isci_tmf_cb_state,
447 struct isci_tmf *,
448 void *),
449 void *cb_data)
450{
451 memset(tmf, 0, sizeof(*tmf));
452
453 tmf->tmf_code = code;
454 tmf->cb_state_func = tmf_sent_cb;
455 tmf->cb_data = cb_data;
456}
457
458static void isci_task_build_abort_task_tmf(struct isci_tmf *tmf,
459 enum isci_tmf_function_codes code,
460 void (*tmf_sent_cb)(enum isci_tmf_cb_state,
461 struct isci_tmf *,
462 void *),
463 struct isci_request *old_request)
464{
465 isci_task_build_tmf(tmf, code, tmf_sent_cb, old_request);
466 tmf->io_tag = old_request->io_tag;
467}
468
469/**
470 * isci_task_validate_request_to_abort() - This function checks the given I/O
471 * against the "started" state. If the request is still "started", it's
472 * state is changed to aborted. NOTE: isci_host->scic_lock MUST BE HELD
473 * BEFORE CALLING THIS FUNCTION.
474 * @isci_request: This parameter specifies the request object to control.
475 * @isci_host: This parameter specifies the ISCI host object
476 * @isci_device: This is the device to which the request is pending.
477 * @aborted_io_completion: This is a completion structure that will be added to
478 * the request in case it is changed to aborting; this completion is
479 * triggered when the request is fully completed.
480 *
481 * Either "started" on successful change of the task status to "aborted", or
482 * "unallocated" if the task cannot be controlled.
483 */
484static enum isci_request_status isci_task_validate_request_to_abort(
485 struct isci_request *isci_request,
486 struct isci_host *isci_host,
487 struct isci_remote_device *isci_device,
488 struct completion *aborted_io_completion)
489{
490 enum isci_request_status old_state = unallocated;
491
492 /* Only abort the task if it's in the
493 * device's request_in_process list
494 */
495 if (isci_request && !list_empty(&isci_request->dev_node)) {
496 old_state = isci_request_change_started_to_aborted(
497 isci_request, aborted_io_completion);
498
499 }
500
501 return old_state;
502}
503
504/**
505* isci_request_cleanup_completed_loiterer() - This function will take care of
506* the final cleanup on any request which has been explicitly terminated.
507* @isci_host: This parameter specifies the ISCI host object
508* @isci_device: This is the device to which the request is pending.
509* @isci_request: This parameter specifies the terminated request object.
510* @task: This parameter is the libsas I/O request.
511*/
512static void isci_request_cleanup_completed_loiterer(
513 struct isci_host *isci_host,
514 struct isci_remote_device *isci_device,
515 struct isci_request *isci_request,
516 struct sas_task *task)
517{
518 unsigned long flags;
519
520 dev_dbg(&isci_host->pdev->dev,
521 "%s: isci_device=%p, request=%p, task=%p\n",
522 __func__, isci_device, isci_request, task);
523
524 if (task != NULL) {
525
526 spin_lock_irqsave(&task->task_state_lock, flags);
527 task->lldd_task = NULL;
528
529 task->task_state_flags &= ~SAS_TASK_NEED_DEV_RESET;
530
531 isci_set_task_doneflags(task);
532
533 /* If this task is not in the abort path, call task_done. */
534 if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
535
536 spin_unlock_irqrestore(&task->task_state_lock, flags);
537 task->task_done(task);
538 } else
539 spin_unlock_irqrestore(&task->task_state_lock, flags);
540 }
541
542 if (isci_request != NULL) {
543 spin_lock_irqsave(&isci_host->scic_lock, flags);
544 list_del_init(&isci_request->dev_node);
545 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
546 }
547}
548
549/**
550 * isci_terminate_request_core() - This function will terminate the given
551 * request, and wait for it to complete. This function must only be called
552 * from a thread that can wait. Note that the request is terminated and
553 * completed (back to the host, if started there).
554 * @ihost: This SCU.
555 * @idev: The target.
556 * @isci_request: The I/O request to be terminated.
557 *
558 */
559static void isci_terminate_request_core(struct isci_host *ihost,
560 struct isci_remote_device *idev,
561 struct isci_request *isci_request)
562{
563 enum sci_status status = SCI_SUCCESS;
564 bool was_terminated = false;
565 bool needs_cleanup_handling = false;
566 enum isci_request_status request_status;
567 unsigned long flags;
568 unsigned long termination_completed = 1;
569 struct completion *io_request_completion;
570 struct sas_task *task;
571
572 dev_dbg(&ihost->pdev->dev,
573 "%s: device = %p; request = %p\n",
574 __func__, idev, isci_request);
575
576 spin_lock_irqsave(&ihost->scic_lock, flags);
577
578 io_request_completion = isci_request->io_request_completion;
579
580 task = (isci_request->ttype == io_task)
581 ? isci_request_access_task(isci_request)
582 : NULL;
583
584 /* Note that we are not going to control
585 * the target to abort the request.
586 */
587 set_bit(IREQ_COMPLETE_IN_TARGET, &isci_request->flags);
588
589 /* Make sure the request wasn't just sitting around signalling
590 * device condition (if the request handle is NULL, then the
591 * request completed but needed additional handling here).
592 */
593 if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) {
594 was_terminated = true;
595 needs_cleanup_handling = true;
596 status = sci_controller_terminate_request(ihost,
597 idev,
598 isci_request);
599 }
600 spin_unlock_irqrestore(&ihost->scic_lock, flags);
601
602 /*
603 * The only time the request to terminate will
604 * fail is when the io request is completed and
605 * being aborted.
606 */
607 if (status != SCI_SUCCESS) {
608 dev_dbg(&ihost->pdev->dev,
609 "%s: sci_controller_terminate_request"
610 " returned = 0x%x\n",
611 __func__, status);
612
613 isci_request->io_request_completion = NULL;
614
615 } else {
616 if (was_terminated) {
617 dev_dbg(&ihost->pdev->dev,
618 "%s: before completion wait (%p/%p)\n",
619 __func__, isci_request, io_request_completion);
620
621 /* Wait here for the request to complete. */
622 #define TERMINATION_TIMEOUT_MSEC 500
623 termination_completed
624 = wait_for_completion_timeout(
625 io_request_completion,
626 msecs_to_jiffies(TERMINATION_TIMEOUT_MSEC));
627
628 if (!termination_completed) {
629
630 /* The request to terminate has timed out. */
631 spin_lock_irqsave(&ihost->scic_lock,
632 flags);
633
634 /* Check for state changes. */
635 if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) {
636
637 /* The best we can do is to have the
638 * request die a silent death if it
639 * ever really completes.
640 *
641 * Set the request state to "dead",
642 * and clear the task pointer so that
643 * an actual completion event callback
644 * doesn't do anything.
645 */
646 isci_request->status = dead;
647 isci_request->io_request_completion
648 = NULL;
649
650 if (isci_request->ttype == io_task) {
651
652 /* Break links with the
653 * sas_task.
654 */
655 isci_request->ttype_ptr.io_task_ptr
656 = NULL;
657 }
658 } else
659 termination_completed = 1;
660
661 spin_unlock_irqrestore(&ihost->scic_lock,
662 flags);
663
664 if (!termination_completed) {
665
666 dev_dbg(&ihost->pdev->dev,
667 "%s: *** Timeout waiting for "
668 "termination(%p/%p)\n",
669 __func__, io_request_completion,
670 isci_request);
671
672 /* The request can no longer be referenced
673 * safely since it may go away if the
674 * termination every really does complete.
675 */
676 isci_request = NULL;
677 }
678 }
679 if (termination_completed)
680 dev_dbg(&ihost->pdev->dev,
681 "%s: after completion wait (%p/%p)\n",
682 __func__, isci_request, io_request_completion);
683 }
684
685 if (termination_completed) {
686
687 isci_request->io_request_completion = NULL;
688
689 /* Peek at the status of the request. This will tell
690 * us if there was special handling on the request such that it
691 * needs to be detached and freed here.
692 */
693 spin_lock_irqsave(&isci_request->state_lock, flags);
694 request_status = isci_request->status;
695
696 if ((isci_request->ttype == io_task) /* TMFs are in their own thread */
697 && ((request_status == aborted)
698 || (request_status == aborting)
699 || (request_status == terminating)
700 || (request_status == completed)
701 || (request_status == dead)
702 )
703 ) {
704
705 /* The completion routine won't free a request in
706 * the aborted/aborting/etc. states, so we do
707 * it here.
708 */
709 needs_cleanup_handling = true;
710 }
711 spin_unlock_irqrestore(&isci_request->state_lock, flags);
712
713 }
714 if (needs_cleanup_handling)
715 isci_request_cleanup_completed_loiterer(
716 ihost, idev, isci_request, task);
717 }
718}
719
720/**
721 * isci_terminate_pending_requests() - This function will change the all of the
722 * requests on the given device's state to "aborting", will terminate the
723 * requests, and wait for them to complete. This function must only be
724 * called from a thread that can wait. Note that the requests are all
725 * terminated and completed (back to the host, if started there).
726 * @isci_host: This parameter specifies SCU.
727 * @idev: This parameter specifies the target.
728 *
729 */
730void isci_terminate_pending_requests(struct isci_host *ihost,
731 struct isci_remote_device *idev)
732{
733 struct completion request_completion;
734 enum isci_request_status old_state;
735 unsigned long flags;
736 LIST_HEAD(list);
737
738 spin_lock_irqsave(&ihost->scic_lock, flags);
739 list_splice_init(&idev->reqs_in_process, &list);
740
741 /* assumes that isci_terminate_request_core deletes from the list */
742 while (!list_empty(&list)) {
743 struct isci_request *ireq = list_entry(list.next, typeof(*ireq), dev_node);
744
745 /* Change state to "terminating" if it is currently
746 * "started".
747 */
748 old_state = isci_request_change_started_to_newstate(ireq,
749 &request_completion,
750 terminating);
751 switch (old_state) {
752 case started:
753 case completed:
754 case aborting:
755 break;
756 default:
757 /* termination in progress, or otherwise dispositioned.
758 * We know the request was on 'list' so should be safe
759 * to move it back to reqs_in_process
760 */
761 list_move(&ireq->dev_node, &idev->reqs_in_process);
762 ireq = NULL;
763 break;
764 }
765
766 if (!ireq)
767 continue;
768 spin_unlock_irqrestore(&ihost->scic_lock, flags);
769
770 init_completion(&request_completion);
771
772 dev_dbg(&ihost->pdev->dev,
773 "%s: idev=%p request=%p; task=%p old_state=%d\n",
774 __func__, idev, ireq,
775 ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL,
776 old_state);
777
778 /* If the old_state is started:
779 * This request was not already being aborted. If it had been,
780 * then the aborting I/O (ie. the TMF request) would not be in
781 * the aborting state, and thus would be terminated here. Note
782 * that since the TMF completion's call to the kernel function
783 * "complete()" does not happen until the pending I/O request
784 * terminate fully completes, we do not have to implement a
785 * special wait here for already aborting requests - the
786 * termination of the TMF request will force the request
787 * to finish it's already started terminate.
788 *
789 * If old_state == completed:
790 * This request completed from the SCU hardware perspective
791 * and now just needs cleaning up in terms of freeing the
792 * request and potentially calling up to libsas.
793 *
794 * If old_state == aborting:
795 * This request has already gone through a TMF timeout, but may
796 * not have been terminated; needs cleaning up at least.
797 */
798 isci_terminate_request_core(ihost, idev, ireq);
799 spin_lock_irqsave(&ihost->scic_lock, flags);
800 }
801 spin_unlock_irqrestore(&ihost->scic_lock, flags);
802}
803
804/**
805 * isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain
806 * Template functions.
807 * @lun: This parameter specifies the lun to be reset.
808 *
809 * status, zero indicates success.
810 */
811static int isci_task_send_lu_reset_sas(
812 struct isci_host *isci_host,
813 struct isci_remote_device *isci_device,
814 u8 *lun)
815{
816 struct isci_tmf tmf;
817 int ret = TMF_RESP_FUNC_FAILED;
818
819 dev_dbg(&isci_host->pdev->dev,
820 "%s: isci_host = %p, isci_device = %p\n",
821 __func__, isci_host, isci_device);
822 /* Send the LUN reset to the target. By the time the call returns,
823 * the TMF has fully exected in the target (in which case the return
824 * value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or
825 * was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED").
826 */
827 isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset, NULL, NULL);
828
829 #define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */
830 ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, ISCI_LU_RESET_TIMEOUT_MS);
831
832 if (ret == TMF_RESP_FUNC_COMPLETE)
833 dev_dbg(&isci_host->pdev->dev,
834 "%s: %p: TMF_LU_RESET passed\n",
835 __func__, isci_device);
836 else
837 dev_dbg(&isci_host->pdev->dev,
838 "%s: %p: TMF_LU_RESET failed (%x)\n",
839 __func__, isci_device, ret);
840
841 return ret;
842}
843
844static int isci_task_send_lu_reset_sata(struct isci_host *ihost,
845 struct isci_remote_device *idev, u8 *lun)
846{
847 int ret = TMF_RESP_FUNC_FAILED;
848 struct isci_tmf tmf;
849
850 /* Send the soft reset to the target */
851 #define ISCI_SRST_TIMEOUT_MS 25000 /* 25 second timeout. */
852 isci_task_build_tmf(&tmf, isci_tmf_sata_srst_high, NULL, NULL);
853
854 ret = isci_task_execute_tmf(ihost, idev, &tmf, ISCI_SRST_TIMEOUT_MS);
855
856 if (ret != TMF_RESP_FUNC_COMPLETE) {
857 dev_dbg(&ihost->pdev->dev,
858 "%s: Assert SRST failed (%p) = %x",
859 __func__, idev, ret);
860
861 /* Return the failure so that the LUN reset is escalated
862 * to a target reset.
863 */
864 }
865 return ret;
866}
867
868/**
869 * isci_task_lu_reset() - This function is one of the SAS Domain Template
870 * functions. This is one of the Task Management functoins called by libsas,
871 * to reset the given lun. Note the assumption that while this call is
872 * executing, no I/O will be sent by the host to the device.
873 * @lun: This parameter specifies the lun to be reset.
874 *
875 * status, zero indicates success.
876 */
877int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun)
878{
879 struct isci_host *isci_host = dev_to_ihost(domain_device);
880 struct isci_remote_device *isci_device;
881 unsigned long flags;
882 int ret;
883
884 spin_lock_irqsave(&isci_host->scic_lock, flags);
885 isci_device = isci_lookup_device(domain_device);
886 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
887
888 dev_dbg(&isci_host->pdev->dev,
889 "%s: domain_device=%p, isci_host=%p; isci_device=%p\n",
890 __func__, domain_device, isci_host, isci_device);
891
892 if (isci_device)
893 set_bit(IDEV_EH, &isci_device->flags);
894
895 /* If there is a device reset pending on any request in the
896 * device's list, fail this LUN reset request in order to
897 * escalate to the device reset.
898 */
899 if (!isci_device ||
900 isci_device_is_reset_pending(isci_host, isci_device)) {
901 dev_dbg(&isci_host->pdev->dev,
902 "%s: No dev (%p), or "
903 "RESET PENDING: domain_device=%p\n",
904 __func__, isci_device, domain_device);
905 ret = TMF_RESP_FUNC_FAILED;
906 goto out;
907 }
908
909 /* Send the task management part of the reset. */
910 if (sas_protocol_ata(domain_device->tproto)) {
911 ret = isci_task_send_lu_reset_sata(isci_host, isci_device, lun);
912 } else
913 ret = isci_task_send_lu_reset_sas(isci_host, isci_device, lun);
914
915 /* If the LUN reset worked, all the I/O can now be terminated. */
916 if (ret == TMF_RESP_FUNC_COMPLETE)
917 /* Terminate all I/O now. */
918 isci_terminate_pending_requests(isci_host,
919 isci_device);
920
921 out:
922 isci_put_device(isci_device);
923 return ret;
924}
925
926
927/* int (*lldd_clear_nexus_port)(struct asd_sas_port *); */
928int isci_task_clear_nexus_port(struct asd_sas_port *port)
929{
930 return TMF_RESP_FUNC_FAILED;
931}
932
933
934
935int isci_task_clear_nexus_ha(struct sas_ha_struct *ha)
936{
937 return TMF_RESP_FUNC_FAILED;
938}
939
940/* Task Management Functions. Must be called from process context. */
941
942/**
943 * isci_abort_task_process_cb() - This is a helper function for the abort task
944 * TMF command. It manages the request state with respect to the successful
945 * transmission / completion of the abort task request.
946 * @cb_state: This parameter specifies when this function was called - after
947 * the TMF request has been started and after it has timed-out.
948 * @tmf: This parameter specifies the TMF in progress.
949 *
950 *
951 */
952static void isci_abort_task_process_cb(
953 enum isci_tmf_cb_state cb_state,
954 struct isci_tmf *tmf,
955 void *cb_data)
956{
957 struct isci_request *old_request;
958
959 old_request = (struct isci_request *)cb_data;
960
961 dev_dbg(&old_request->isci_host->pdev->dev,
962 "%s: tmf=%p, old_request=%p\n",
963 __func__, tmf, old_request);
964
965 switch (cb_state) {
966
967 case isci_tmf_started:
968 /* The TMF has been started. Nothing to do here, since the
969 * request state was already set to "aborted" by the abort
970 * task function.
971 */
972 if ((old_request->status != aborted)
973 && (old_request->status != completed))
974 dev_dbg(&old_request->isci_host->pdev->dev,
975 "%s: Bad request status (%d): tmf=%p, old_request=%p\n",
976 __func__, old_request->status, tmf, old_request);
977 break;
978
979 case isci_tmf_timed_out:
980
981 /* Set the task's state to "aborting", since the abort task
982 * function thread set it to "aborted" (above) in anticipation
983 * of the task management request working correctly. Since the
984 * timeout has now fired, the TMF request failed. We set the
985 * state such that the request completion will indicate the
986 * device is no longer present.
987 */
988 isci_request_change_state(old_request, aborting);
989 break;
990
991 default:
992 dev_dbg(&old_request->isci_host->pdev->dev,
993 "%s: Bad cb_state (%d): tmf=%p, old_request=%p\n",
994 __func__, cb_state, tmf, old_request);
995 break;
996 }
997}
998
999/**
1000 * isci_task_abort_task() - This function is one of the SAS Domain Template
1001 * functions. This function is called by libsas to abort a specified task.
1002 * @task: This parameter specifies the SAS task to abort.
1003 *
1004 * status, zero indicates success.
1005 */
1006int isci_task_abort_task(struct sas_task *task)
1007{
1008 struct isci_host *isci_host = dev_to_ihost(task->dev);
1009 DECLARE_COMPLETION_ONSTACK(aborted_io_completion);
1010 struct isci_request *old_request = NULL;
1011 enum isci_request_status old_state;
1012 struct isci_remote_device *isci_device = NULL;
1013 struct isci_tmf tmf;
1014 int ret = TMF_RESP_FUNC_FAILED;
1015 unsigned long flags;
1016 bool any_dev_reset = false;
1017
1018 /* Get the isci_request reference from the task. Note that
1019 * this check does not depend on the pending request list
1020 * in the device, because tasks driving resets may land here
1021 * after completion in the core.
1022 */
1023 spin_lock_irqsave(&isci_host->scic_lock, flags);
1024 spin_lock(&task->task_state_lock);
1025
1026 old_request = task->lldd_task;
1027
1028 /* If task is already done, the request isn't valid */
1029 if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
1030 (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
1031 old_request)
1032 isci_device = isci_lookup_device(task->dev);
1033
1034 spin_unlock(&task->task_state_lock);
1035 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1036
1037 dev_dbg(&isci_host->pdev->dev,
1038 "%s: task = %p\n", __func__, task);
1039
1040 if (!isci_device || !old_request)
1041 goto out;
1042
1043 set_bit(IDEV_EH, &isci_device->flags);
1044
1045 /* This version of the driver will fail abort requests for
1046 * SATA/STP. Failing the abort request this way will cause the
1047 * SCSI error handler thread to escalate to LUN reset
1048 */
1049 if (sas_protocol_ata(task->task_proto)) {
1050 dev_dbg(&isci_host->pdev->dev,
1051 " task %p is for a STP/SATA device;"
1052 " returning TMF_RESP_FUNC_FAILED\n"
1053 " to cause a LUN reset...\n", task);
1054 goto out;
1055 }
1056
1057 dev_dbg(&isci_host->pdev->dev,
1058 "%s: old_request == %p\n", __func__, old_request);
1059
1060 any_dev_reset = isci_device_is_reset_pending(isci_host, isci_device);
1061
1062 spin_lock_irqsave(&task->task_state_lock, flags);
1063
1064 any_dev_reset = any_dev_reset || (task->task_state_flags & SAS_TASK_NEED_DEV_RESET);
1065
1066 /* If the extraction of the request reference from the task
1067 * failed, then the request has been completed (or if there is a
1068 * pending reset then this abort request function must be failed
1069 * in order to escalate to the target reset).
1070 */
1071 if ((old_request == NULL) || any_dev_reset) {
1072
1073 /* If the device reset task flag is set, fail the task
1074 * management request. Otherwise, the original request
1075 * has completed.
1076 */
1077 if (any_dev_reset) {
1078
1079 /* Turn off the task's DONE to make sure this
1080 * task is escalated to a target reset.
1081 */
1082 task->task_state_flags &= ~SAS_TASK_STATE_DONE;
1083
1084 /* Make the reset happen as soon as possible. */
1085 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
1086
1087 spin_unlock_irqrestore(&task->task_state_lock, flags);
1088
1089 /* Fail the task management request in order to
1090 * escalate to the target reset.
1091 */
1092 ret = TMF_RESP_FUNC_FAILED;
1093
1094 dev_dbg(&isci_host->pdev->dev,
1095 "%s: Failing task abort in order to "
1096 "escalate to target reset because\n"
1097 "SAS_TASK_NEED_DEV_RESET is set for "
1098 "task %p on dev %p\n",
1099 __func__, task, isci_device);
1100
1101
1102 } else {
1103 /* The request has already completed and there
1104 * is nothing to do here other than to set the task
1105 * done bit, and indicate that the task abort function
1106 * was sucessful.
1107 */
1108 isci_set_task_doneflags(task);
1109
1110 spin_unlock_irqrestore(&task->task_state_lock, flags);
1111
1112 ret = TMF_RESP_FUNC_COMPLETE;
1113
1114 dev_dbg(&isci_host->pdev->dev,
1115 "%s: abort task not needed for %p\n",
1116 __func__, task);
1117 }
1118 goto out;
1119 } else {
1120 spin_unlock_irqrestore(&task->task_state_lock, flags);
1121 }
1122
1123 spin_lock_irqsave(&isci_host->scic_lock, flags);
1124
1125 /* Check the request status and change to "aborted" if currently
1126 * "starting"; if true then set the I/O kernel completion
1127 * struct that will be triggered when the request completes.
1128 */
1129 old_state = isci_task_validate_request_to_abort(
1130 old_request, isci_host, isci_device,
1131 &aborted_io_completion);
1132 if ((old_state != started) &&
1133 (old_state != completed) &&
1134 (old_state != aborting)) {
1135
1136 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1137
1138 /* The request was already being handled by someone else (because
1139 * they got to set the state away from started).
1140 */
1141 dev_dbg(&isci_host->pdev->dev,
1142 "%s: device = %p; old_request %p already being aborted\n",
1143 __func__,
1144 isci_device, old_request);
1145 ret = TMF_RESP_FUNC_COMPLETE;
1146 goto out;
1147 }
1148 if (task->task_proto == SAS_PROTOCOL_SMP ||
1149 test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) {
1150
1151 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1152
1153 dev_dbg(&isci_host->pdev->dev,
1154 "%s: SMP request (%d)"
1155 " or complete_in_target (%d), thus no TMF\n",
1156 __func__, (task->task_proto == SAS_PROTOCOL_SMP),
1157 test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags));
1158
1159 /* Set the state on the task. */
1160 isci_task_all_done(task);
1161
1162 ret = TMF_RESP_FUNC_COMPLETE;
1163
1164 /* Stopping and SMP devices are not sent a TMF, and are not
1165 * reset, but the outstanding I/O request is terminated below.
1166 */
1167 } else {
1168 /* Fill in the tmf stucture */
1169 isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort,
1170 isci_abort_task_process_cb,
1171 old_request);
1172
1173 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1174
1175 #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* half second timeout. */
1176 ret = isci_task_execute_tmf(isci_host, isci_device, &tmf,
1177 ISCI_ABORT_TASK_TIMEOUT_MS);
1178
1179 if (ret != TMF_RESP_FUNC_COMPLETE)
1180 dev_dbg(&isci_host->pdev->dev,
1181 "%s: isci_task_send_tmf failed\n",
1182 __func__);
1183 }
1184 if (ret == TMF_RESP_FUNC_COMPLETE) {
1185 set_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags);
1186
1187 /* Clean up the request on our side, and wait for the aborted
1188 * I/O to complete.
1189 */
1190 isci_terminate_request_core(isci_host, isci_device, old_request);
1191 }
1192
1193 /* Make sure we do not leave a reference to aborted_io_completion */
1194 old_request->io_request_completion = NULL;
1195 out:
1196 isci_put_device(isci_device);
1197 return ret;
1198}
1199
1200/**
1201 * isci_task_abort_task_set() - This function is one of the SAS Domain Template
1202 * functions. This is one of the Task Management functoins called by libsas,
1203 * to abort all task for the given lun.
1204 * @d_device: This parameter specifies the domain device associated with this
1205 * request.
1206 * @lun: This parameter specifies the lun associated with this request.
1207 *
1208 * status, zero indicates success.
1209 */
1210int isci_task_abort_task_set(
1211 struct domain_device *d_device,
1212 u8 *lun)
1213{
1214 return TMF_RESP_FUNC_FAILED;
1215}
1216
1217
1218/**
1219 * isci_task_clear_aca() - This function is one of the SAS Domain Template
1220 * functions. This is one of the Task Management functoins called by libsas.
1221 * @d_device: This parameter specifies the domain device associated with this
1222 * request.
1223 * @lun: This parameter specifies the lun associated with this request.
1224 *
1225 * status, zero indicates success.
1226 */
1227int isci_task_clear_aca(
1228 struct domain_device *d_device,
1229 u8 *lun)
1230{
1231 return TMF_RESP_FUNC_FAILED;
1232}
1233
1234
1235
1236/**
1237 * isci_task_clear_task_set() - This function is one of the SAS Domain Template
1238 * functions. This is one of the Task Management functoins called by libsas.
1239 * @d_device: This parameter specifies the domain device associated with this
1240 * request.
1241 * @lun: This parameter specifies the lun associated with this request.
1242 *
1243 * status, zero indicates success.
1244 */
1245int isci_task_clear_task_set(
1246 struct domain_device *d_device,
1247 u8 *lun)
1248{
1249 return TMF_RESP_FUNC_FAILED;
1250}
1251
1252
1253/**
1254 * isci_task_query_task() - This function is implemented to cause libsas to
1255 * correctly escalate the failed abort to a LUN or target reset (this is
1256 * because sas_scsi_find_task libsas function does not correctly interpret
1257 * all return codes from the abort task call). When TMF_RESP_FUNC_SUCC is
1258 * returned, libsas turns this into a LUN reset; when FUNC_FAILED is
1259 * returned, libsas will turn this into a target reset
1260 * @task: This parameter specifies the sas task being queried.
1261 * @lun: This parameter specifies the lun associated with this request.
1262 *
1263 * status, zero indicates success.
1264 */
1265int isci_task_query_task(
1266 struct sas_task *task)
1267{
1268 /* See if there is a pending device reset for this device. */
1269 if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET)
1270 return TMF_RESP_FUNC_FAILED;
1271 else
1272 return TMF_RESP_FUNC_SUCC;
1273}
1274
1275/*
1276 * isci_task_request_complete() - This function is called by the sci core when
1277 * an task request completes.
1278 * @ihost: This parameter specifies the ISCI host object
1279 * @ireq: This parameter is the completed isci_request object.
1280 * @completion_status: This parameter specifies the completion status from the
1281 * sci core.
1282 *
1283 * none.
1284 */
1285void
1286isci_task_request_complete(struct isci_host *ihost,
1287 struct isci_request *ireq,
1288 enum sci_task_status completion_status)
1289{
1290 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
1291 struct completion *tmf_complete;
1292
1293 dev_dbg(&ihost->pdev->dev,
1294 "%s: request = %p, status=%d\n",
1295 __func__, ireq, completion_status);
1296
1297 isci_request_change_state(ireq, completed);
1298
1299 tmf->status = completion_status;
1300 set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags);
1301
1302 if (tmf->proto == SAS_PROTOCOL_SSP) {
1303 memcpy(&tmf->resp.resp_iu,
1304 &ireq->ssp.rsp,
1305 SSP_RESP_IU_MAX_SIZE);
1306 } else if (tmf->proto == SAS_PROTOCOL_SATA) {
1307 memcpy(&tmf->resp.d2h_fis,
1308 &ireq->stp.rsp,
1309 sizeof(struct dev_to_host_fis));
1310 }
1311
1312 /* PRINT_TMF( ((struct isci_tmf *)request->task)); */
1313 tmf_complete = tmf->complete;
1314
1315 sci_controller_complete_io(ihost, ireq->target_device, ireq);
1316 /* set the 'terminated' flag handle to make sure it cannot be terminated
1317 * or completed again.
1318 */
1319 set_bit(IREQ_TERMINATED, &ireq->flags);
1320
1321 isci_request_change_state(ireq, unallocated);
1322 list_del_init(&ireq->dev_node);
1323
1324 /* The task management part completes last. */
1325 complete(tmf_complete);
1326}
1327
1328static void isci_smp_task_timedout(unsigned long _task)
1329{
1330 struct sas_task *task = (void *) _task;
1331 unsigned long flags;
1332
1333 spin_lock_irqsave(&task->task_state_lock, flags);
1334 if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
1335 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1336 spin_unlock_irqrestore(&task->task_state_lock, flags);
1337
1338 complete(&task->completion);
1339}
1340
1341static void isci_smp_task_done(struct sas_task *task)
1342{
1343 if (!del_timer(&task->timer))
1344 return;
1345 complete(&task->completion);
1346}
1347
1348static struct sas_task *isci_alloc_task(void)
1349{
1350 struct sas_task *task = kzalloc(sizeof(*task), GFP_KERNEL);
1351
1352 if (task) {
1353 INIT_LIST_HEAD(&task->list);
1354 spin_lock_init(&task->task_state_lock);
1355 task->task_state_flags = SAS_TASK_STATE_PENDING;
1356 init_timer(&task->timer);
1357 init_completion(&task->completion);
1358 }
1359
1360 return task;
1361}
1362
1363static void isci_free_task(struct isci_host *ihost, struct sas_task *task)
1364{
1365 if (task) {
1366 BUG_ON(!list_empty(&task->list));
1367 kfree(task);
1368 }
1369}
1370
1371static int isci_smp_execute_task(struct isci_host *ihost,
1372 struct domain_device *dev, void *req,
1373 int req_size, void *resp, int resp_size)
1374{
1375 int res, retry;
1376 struct sas_task *task = NULL;
1377
1378 for (retry = 0; retry < 3; retry++) {
1379 task = isci_alloc_task();
1380 if (!task)
1381 return -ENOMEM;
1382
1383 task->dev = dev;
1384 task->task_proto = dev->tproto;
1385 sg_init_one(&task->smp_task.smp_req, req, req_size);
1386 sg_init_one(&task->smp_task.smp_resp, resp, resp_size);
1387
1388 task->task_done = isci_smp_task_done;
1389
1390 task->timer.data = (unsigned long) task;
1391 task->timer.function = isci_smp_task_timedout;
1392 task->timer.expires = jiffies + 10*HZ;
1393 add_timer(&task->timer);
1394
1395 res = isci_task_execute_task(task, 1, GFP_KERNEL);
1396
1397 if (res) {
1398 del_timer(&task->timer);
1399 dev_dbg(&ihost->pdev->dev,
1400 "%s: executing SMP task failed:%d\n",
1401 __func__, res);
1402 goto ex_err;
1403 }
1404
1405 wait_for_completion(&task->completion);
1406 res = -ECOMM;
1407 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1408 dev_dbg(&ihost->pdev->dev,
1409 "%s: smp task timed out or aborted\n",
1410 __func__);
1411 isci_task_abort_task(task);
1412 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1413 dev_dbg(&ihost->pdev->dev,
1414 "%s: SMP task aborted and not done\n",
1415 __func__);
1416 goto ex_err;
1417 }
1418 }
1419 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1420 task->task_status.stat == SAM_STAT_GOOD) {
1421 res = 0;
1422 break;
1423 }
1424 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1425 task->task_status.stat == SAS_DATA_UNDERRUN) {
1426 /* no error, but return the number of bytes of
1427 * underrun */
1428 res = task->task_status.residual;
1429 break;
1430 }
1431 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1432 task->task_status.stat == SAS_DATA_OVERRUN) {
1433 res = -EMSGSIZE;
1434 break;
1435 } else {
1436 dev_dbg(&ihost->pdev->dev,
1437 "%s: task to dev %016llx response: 0x%x "
1438 "status 0x%x\n", __func__,
1439 SAS_ADDR(dev->sas_addr),
1440 task->task_status.resp,
1441 task->task_status.stat);
1442 isci_free_task(ihost, task);
1443 task = NULL;
1444 }
1445 }
1446ex_err:
1447 BUG_ON(retry == 3 && task != NULL);
1448 isci_free_task(ihost, task);
1449 return res;
1450}
1451
1452#define DISCOVER_REQ_SIZE 16
1453#define DISCOVER_RESP_SIZE 56
1454
1455int isci_smp_get_phy_attached_dev_type(struct isci_host *ihost,
1456 struct domain_device *dev,
1457 int phy_id, int *adt)
1458{
1459 struct smp_resp *disc_resp;
1460 u8 *disc_req;
1461 int res;
1462
1463 disc_resp = kzalloc(DISCOVER_RESP_SIZE, GFP_KERNEL);
1464 if (!disc_resp)
1465 return -ENOMEM;
1466
1467 disc_req = kzalloc(DISCOVER_REQ_SIZE, GFP_KERNEL);
1468 if (disc_req) {
1469 disc_req[0] = SMP_REQUEST;
1470 disc_req[1] = SMP_DISCOVER;
1471 disc_req[9] = phy_id;
1472 } else {
1473 kfree(disc_resp);
1474 return -ENOMEM;
1475 }
1476 res = isci_smp_execute_task(ihost, dev, disc_req, DISCOVER_REQ_SIZE,
1477 disc_resp, DISCOVER_RESP_SIZE);
1478 if (!res) {
1479 if (disc_resp->result != SMP_RESP_FUNC_ACC)
1480 res = disc_resp->result;
1481 else
1482 *adt = disc_resp->disc.attached_dev_type;
1483 }
1484 kfree(disc_req);
1485 kfree(disc_resp);
1486
1487 return res;
1488}
1489
1490static void isci_wait_for_smp_phy_reset(struct isci_remote_device *idev, int phy_num)
1491{
1492 struct domain_device *dev = idev->domain_dev;
1493 struct isci_port *iport = idev->isci_port;
1494 struct isci_host *ihost = iport->isci_host;
1495 int res, iteration = 0, attached_device_type;
1496 #define STP_WAIT_MSECS 25000
1497 unsigned long tmo = msecs_to_jiffies(STP_WAIT_MSECS);
1498 unsigned long deadline = jiffies + tmo;
1499 enum {
1500 SMP_PHYWAIT_PHYDOWN,
1501 SMP_PHYWAIT_PHYUP,
1502 SMP_PHYWAIT_DONE
1503 } phy_state = SMP_PHYWAIT_PHYDOWN;
1504
1505 /* While there is time, wait for the phy to go away and come back */
1506 while (time_is_after_jiffies(deadline) && phy_state != SMP_PHYWAIT_DONE) {
1507 int event = atomic_read(&iport->event);
1508
1509 ++iteration;
1510
1511 tmo = wait_event_timeout(ihost->eventq,
1512 event != atomic_read(&iport->event) ||
1513 !test_bit(IPORT_BCN_BLOCKED, &iport->flags),
1514 tmo);
1515 /* link down, stop polling */
1516 if (!test_bit(IPORT_BCN_BLOCKED, &iport->flags))
1517 break;
1518
1519 dev_dbg(&ihost->pdev->dev,
1520 "%s: iport %p, iteration %d,"
1521 " phase %d: time_remaining %lu, bcns = %d\n",
1522 __func__, iport, iteration, phy_state,
1523 tmo, test_bit(IPORT_BCN_PENDING, &iport->flags));
1524
1525 res = isci_smp_get_phy_attached_dev_type(ihost, dev, phy_num,
1526 &attached_device_type);
1527 tmo = deadline - jiffies;
1528
1529 if (res) {
1530 dev_dbg(&ihost->pdev->dev,
1531 "%s: iteration %d, phase %d:"
1532 " SMP error=%d, time_remaining=%lu\n",
1533 __func__, iteration, phy_state, res, tmo);
1534 break;
1535 }
1536 dev_dbg(&ihost->pdev->dev,
1537 "%s: iport %p, iteration %d,"
1538 " phase %d: time_remaining %lu, bcns = %d, "
1539 "attdevtype = %x\n",
1540 __func__, iport, iteration, phy_state,
1541 tmo, test_bit(IPORT_BCN_PENDING, &iport->flags),
1542 attached_device_type);
1543
1544 switch (phy_state) {
1545 case SMP_PHYWAIT_PHYDOWN:
1546 /* Has the device gone away? */
1547 if (!attached_device_type)
1548 phy_state = SMP_PHYWAIT_PHYUP;
1549
1550 break;
1551
1552 case SMP_PHYWAIT_PHYUP:
1553 /* Has the device come back? */
1554 if (attached_device_type)
1555 phy_state = SMP_PHYWAIT_DONE;
1556 break;
1557
1558 case SMP_PHYWAIT_DONE:
1559 break;
1560 }
1561
1562 }
1563 dev_dbg(&ihost->pdev->dev, "%s: done\n", __func__);
1564}
1565
1566static int isci_reset_device(struct isci_host *ihost,
1567 struct isci_remote_device *idev)
1568{
1569 struct sas_phy *phy = sas_find_local_phy(idev->domain_dev);
1570 struct isci_port *iport = idev->isci_port;
1571 enum sci_status status;
1572 unsigned long flags;
1573 int rc;
1574
1575 dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev);
1576
1577 spin_lock_irqsave(&ihost->scic_lock, flags);
1578 status = sci_remote_device_reset(idev);
1579 if (status != SCI_SUCCESS) {
1580 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1581
1582 dev_dbg(&ihost->pdev->dev,
1583 "%s: sci_remote_device_reset(%p) returned %d!\n",
1584 __func__, idev, status);
1585
1586 return TMF_RESP_FUNC_FAILED;
1587 }
1588 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1589
1590 /* Make sure all pending requests are able to be fully terminated. */
1591 isci_device_clear_reset_pending(ihost, idev);
1592
1593 /* If this is a device on an expander, disable BCN processing. */
1594 if (!scsi_is_sas_phy_local(phy))
1595 set_bit(IPORT_BCN_BLOCKED, &iport->flags);
1596
1597 rc = sas_phy_reset(phy, true);
1598
1599 /* Terminate in-progress I/O now. */
1600 isci_remote_device_nuke_requests(ihost, idev);
1601
1602 /* Since all pending TCs have been cleaned, resume the RNC. */
1603 spin_lock_irqsave(&ihost->scic_lock, flags);
1604 status = sci_remote_device_reset_complete(idev);
1605 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1606
1607 /* If this is a device on an expander, bring the phy back up. */
1608 if (!scsi_is_sas_phy_local(phy)) {
1609 /* A phy reset will cause the device to go away then reappear.
1610 * Since libsas will take action on incoming BCNs (eg. remove
1611 * a device going through an SMP phy-control driven reset),
1612 * we need to wait until the phy comes back up before letting
1613 * discovery proceed in libsas.
1614 */
1615 isci_wait_for_smp_phy_reset(idev, phy->number);
1616
1617 spin_lock_irqsave(&ihost->scic_lock, flags);
1618 isci_port_bcn_enable(ihost, idev->isci_port);
1619 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1620 }
1621
1622 if (status != SCI_SUCCESS) {
1623 dev_dbg(&ihost->pdev->dev,
1624 "%s: sci_remote_device_reset_complete(%p) "
1625 "returned %d!\n", __func__, idev, status);
1626 }
1627
1628 dev_dbg(&ihost->pdev->dev, "%s: idev %p complete.\n", __func__, idev);
1629
1630 return rc;
1631}
1632
1633int isci_task_I_T_nexus_reset(struct domain_device *dev)
1634{
1635 struct isci_host *ihost = dev_to_ihost(dev);
1636 struct isci_remote_device *idev;
1637 unsigned long flags;
1638 int ret;
1639
1640 spin_lock_irqsave(&ihost->scic_lock, flags);
1641 idev = isci_lookup_device(dev);
1642 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1643
1644 if (!idev || !test_bit(IDEV_EH, &idev->flags)) {
1645 ret = TMF_RESP_FUNC_COMPLETE;
1646 goto out;
1647 }
1648
1649 ret = isci_reset_device(ihost, idev);
1650 out:
1651 isci_put_device(idev);
1652 return ret;
1653}
1654
1655int isci_bus_reset_handler(struct scsi_cmnd *cmd)
1656{
1657 struct domain_device *dev = sdev_to_domain_dev(cmd->device);
1658 struct isci_host *ihost = dev_to_ihost(dev);
1659 struct isci_remote_device *idev;
1660 unsigned long flags;
1661 int ret;
1662
1663 spin_lock_irqsave(&ihost->scic_lock, flags);
1664 idev = isci_lookup_device(dev);
1665 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1666
1667 if (!idev) {
1668 ret = TMF_RESP_FUNC_COMPLETE;
1669 goto out;
1670 }
1671
1672 ret = isci_reset_device(ihost, idev);
1673 out:
1674 isci_put_device(idev);
1675 return ret;
1676}
diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h
new file mode 100644
index 000000000000..4a7fa90287ef
--- /dev/null
+++ b/drivers/scsi/isci/task.h
@@ -0,0 +1,367 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55#ifndef _ISCI_TASK_H_
56#define _ISCI_TASK_H_
57
58#include <scsi/sas_ata.h>
59#include "host.h"
60
61struct isci_request;
62
63/**
64 * enum isci_tmf_cb_state - This enum defines the possible states in which the
65 * TMF callback function is invoked during the TMF execution process.
66 *
67 *
68 */
69enum isci_tmf_cb_state {
70
71 isci_tmf_init_state = 0,
72 isci_tmf_started,
73 isci_tmf_timed_out
74};
75
76/**
77 * enum isci_tmf_function_codes - This enum defines the possible preparations
78 * of task management requests.
79 *
80 *
81 */
82enum isci_tmf_function_codes {
83
84 isci_tmf_func_none = 0,
85 isci_tmf_ssp_task_abort = TMF_ABORT_TASK,
86 isci_tmf_ssp_lun_reset = TMF_LU_RESET,
87 isci_tmf_sata_srst_high = TMF_LU_RESET + 0x100, /* Non SCSI */
88 isci_tmf_sata_srst_low = TMF_LU_RESET + 0x101 /* Non SCSI */
89};
90/**
91 * struct isci_tmf - This class represents the task management object which
92 * acts as an interface to libsas for processing task management requests
93 *
94 *
95 */
96struct isci_tmf {
97
98 struct completion *complete;
99 enum sas_protocol proto;
100 union {
101 struct ssp_response_iu resp_iu;
102 struct dev_to_host_fis d2h_fis;
103 u8 rsp_buf[SSP_RESP_IU_MAX_SIZE];
104 } resp;
105 unsigned char lun[8];
106 u16 io_tag;
107 struct isci_remote_device *device;
108 enum isci_tmf_function_codes tmf_code;
109 int status;
110
111 /* The optional callback function allows the user process to
112 * track the TMF transmit / timeout conditions.
113 */
114 void (*cb_state_func)(
115 enum isci_tmf_cb_state,
116 struct isci_tmf *, void *);
117 void *cb_data;
118
119};
120
121static inline void isci_print_tmf(struct isci_tmf *tmf)
122{
123 if (SAS_PROTOCOL_SATA == tmf->proto)
124 dev_dbg(&tmf->device->isci_port->isci_host->pdev->dev,
125 "%s: status = %x\n"
126 "tmf->resp.d2h_fis.status = %x\n"
127 "tmf->resp.d2h_fis.error = %x\n",
128 __func__,
129 tmf->status,
130 tmf->resp.d2h_fis.status,
131 tmf->resp.d2h_fis.error);
132 else
133 dev_dbg(&tmf->device->isci_port->isci_host->pdev->dev,
134 "%s: status = %x\n"
135 "tmf->resp.resp_iu.data_present = %x\n"
136 "tmf->resp.resp_iu.status = %x\n"
137 "tmf->resp.resp_iu.data_length = %x\n"
138 "tmf->resp.resp_iu.data[0] = %x\n"
139 "tmf->resp.resp_iu.data[1] = %x\n"
140 "tmf->resp.resp_iu.data[2] = %x\n"
141 "tmf->resp.resp_iu.data[3] = %x\n",
142 __func__,
143 tmf->status,
144 tmf->resp.resp_iu.datapres,
145 tmf->resp.resp_iu.status,
146 be32_to_cpu(tmf->resp.resp_iu.response_data_len),
147 tmf->resp.resp_iu.resp_data[0],
148 tmf->resp.resp_iu.resp_data[1],
149 tmf->resp.resp_iu.resp_data[2],
150 tmf->resp.resp_iu.resp_data[3]);
151}
152
153
154int isci_task_execute_task(
155 struct sas_task *task,
156 int num,
157 gfp_t gfp_flags);
158
159int isci_task_abort_task(
160 struct sas_task *task);
161
162int isci_task_abort_task_set(
163 struct domain_device *d_device,
164 u8 *lun);
165
166int isci_task_clear_aca(
167 struct domain_device *d_device,
168 u8 *lun);
169
170int isci_task_clear_task_set(
171 struct domain_device *d_device,
172 u8 *lun);
173
174int isci_task_query_task(
175 struct sas_task *task);
176
177int isci_task_lu_reset(
178 struct domain_device *d_device,
179 u8 *lun);
180
181int isci_task_clear_nexus_port(
182 struct asd_sas_port *port);
183
184int isci_task_clear_nexus_ha(
185 struct sas_ha_struct *ha);
186
187int isci_task_I_T_nexus_reset(
188 struct domain_device *d_device);
189
190void isci_task_request_complete(
191 struct isci_host *isci_host,
192 struct isci_request *request,
193 enum sci_task_status completion_status);
194
195u16 isci_task_ssp_request_get_io_tag_to_manage(
196 struct isci_request *request);
197
198u8 isci_task_ssp_request_get_function(
199 struct isci_request *request);
200
201
202void *isci_task_ssp_request_get_response_data_address(
203 struct isci_request *request);
204
205u32 isci_task_ssp_request_get_response_data_length(
206 struct isci_request *request);
207
208int isci_queuecommand(
209 struct scsi_cmnd *scsi_cmd,
210 void (*donefunc)(struct scsi_cmnd *));
211
212int isci_bus_reset_handler(struct scsi_cmnd *cmd);
213
214/**
215 * enum isci_completion_selection - This enum defines the possible actions to
216 * take with respect to a given request's notification back to libsas.
217 *
218 *
219 */
220enum isci_completion_selection {
221
222 isci_perform_normal_io_completion, /* Normal notify (task_done) */
223 isci_perform_aborted_io_completion, /* No notification. */
224 isci_perform_error_io_completion /* Use sas_task_abort */
225};
226
227static inline void isci_set_task_doneflags(
228 struct sas_task *task)
229{
230 /* Since no futher action will be taken on this task,
231 * make sure to mark it complete from the lldd perspective.
232 */
233 task->task_state_flags |= SAS_TASK_STATE_DONE;
234 task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
235 task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
236}
237/**
238 * isci_task_all_done() - This function clears the task bits to indicate the
239 * LLDD is done with the task.
240 *
241 *
242 */
243static inline void isci_task_all_done(
244 struct sas_task *task)
245{
246 unsigned long flags;
247
248 /* Since no futher action will be taken on this task,
249 * make sure to mark it complete from the lldd perspective.
250 */
251 spin_lock_irqsave(&task->task_state_lock, flags);
252 isci_set_task_doneflags(task);
253 spin_unlock_irqrestore(&task->task_state_lock, flags);
254}
255
256/**
257 * isci_task_set_completion_status() - This function sets the completion status
258 * for the request.
259 * @task: This parameter is the completed request.
260 * @response: This parameter is the response code for the completed task.
261 * @status: This parameter is the status code for the completed task.
262 *
263* @return The new notification mode for the request.
264*/
265static inline enum isci_completion_selection
266isci_task_set_completion_status(
267 struct sas_task *task,
268 enum service_response response,
269 enum exec_status status,
270 enum isci_completion_selection task_notification_selection)
271{
272 unsigned long flags;
273
274 spin_lock_irqsave(&task->task_state_lock, flags);
275
276 /* If a device reset is being indicated, make sure the I/O
277 * is in the error path.
278 */
279 if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) {
280 /* Fail the I/O to make sure it goes into the error path. */
281 response = SAS_TASK_UNDELIVERED;
282 status = SAM_STAT_TASK_ABORTED;
283
284 task_notification_selection = isci_perform_error_io_completion;
285 }
286 task->task_status.resp = response;
287 task->task_status.stat = status;
288
289 switch (task_notification_selection) {
290
291 case isci_perform_error_io_completion:
292
293 if (task->task_proto == SAS_PROTOCOL_SMP) {
294 /* There is no error escalation in the SMP case.
295 * Convert to a normal completion to avoid the
296 * timeout in the discovery path and to let the
297 * next action take place quickly.
298 */
299 task_notification_selection
300 = isci_perform_normal_io_completion;
301
302 /* Fall through to the normal case... */
303 } else {
304 /* Use sas_task_abort */
305 /* Leave SAS_TASK_STATE_DONE clear
306 * Leave SAS_TASK_AT_INITIATOR set.
307 */
308 break;
309 }
310
311 case isci_perform_aborted_io_completion:
312 /* This path can occur with task-managed requests as well as
313 * requests terminated because of LUN or device resets.
314 */
315 /* Fall through to the normal case... */
316 case isci_perform_normal_io_completion:
317 /* Normal notification (task_done) */
318 isci_set_task_doneflags(task);
319 break;
320 default:
321 WARN_ONCE(1, "unknown task_notification_selection: %d\n",
322 task_notification_selection);
323 break;
324 }
325
326 spin_unlock_irqrestore(&task->task_state_lock, flags);
327
328 return task_notification_selection;
329
330}
331/**
332* isci_execpath_callback() - This function is called from the task
333* execute path when the task needs to callback libsas about the submit-time
334* task failure. The callback occurs either through the task's done function
335* or through sas_task_abort. In the case of regular non-discovery SATA/STP I/O
336* requests, libsas takes the host lock before calling execute task. Therefore
337* in this situation the host lock must be managed before calling the func.
338*
339* @ihost: This parameter is the controller to which the I/O request was sent.
340* @task: This parameter is the I/O request.
341* @func: This parameter is the function to call in the correct context.
342* @status: This parameter is the status code for the completed task.
343*
344*/
345static inline void isci_execpath_callback(struct isci_host *ihost,
346 struct sas_task *task,
347 void (*func)(struct sas_task *))
348{
349 struct domain_device *dev = task->dev;
350
351 if (dev_is_sata(dev) && task->uldd_task) {
352 unsigned long flags;
353
354 /* Since we are still in the submit path, and since
355 * libsas takes the host lock on behalf of SATA
356 * devices before I/O starts (in the non-discovery case),
357 * we need to unlock before we can call the callback function.
358 */
359 raw_local_irq_save(flags);
360 spin_unlock(dev->sata_dev.ap->lock);
361 func(task);
362 spin_lock(dev->sata_dev.ap->lock);
363 raw_local_irq_restore(flags);
364 } else
365 func(task);
366}
367#endif /* !defined(_SCI_TASK_H_) */
diff --git a/drivers/scsi/isci/unsolicited_frame_control.c b/drivers/scsi/isci/unsolicited_frame_control.c
new file mode 100644
index 000000000000..e9e1e2abacb9
--- /dev/null
+++ b/drivers/scsi/isci/unsolicited_frame_control.c
@@ -0,0 +1,225 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#include "host.h"
57#include "unsolicited_frame_control.h"
58#include "registers.h"
59
60int sci_unsolicited_frame_control_construct(struct isci_host *ihost)
61{
62 struct sci_unsolicited_frame_control *uf_control = &ihost->uf_control;
63 struct sci_unsolicited_frame *uf;
64 u32 buf_len, header_len, i;
65 dma_addr_t dma;
66 size_t size;
67 void *virt;
68
69 /*
70 * Prepare all of the memory sizes for the UF headers, UF address
71 * table, and UF buffers themselves.
72 */
73 buf_len = SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
74 header_len = SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header);
75 size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(dma_addr_t);
76
77 /*
78 * The Unsolicited Frame buffers are set at the start of the UF
79 * memory descriptor entry. The headers and address table will be
80 * placed after the buffers.
81 */
82 virt = dmam_alloc_coherent(&ihost->pdev->dev, size, &dma, GFP_KERNEL);
83 if (!virt)
84 return -ENOMEM;
85
86 /*
87 * Program the location of the UF header table into the SCU.
88 * Notes:
89 * - The address must align on a 64-byte boundary. Guaranteed to be
90 * on 64-byte boundary already 1KB boundary for unsolicited frames.
91 * - Program unused header entries to overlap with the last
92 * unsolicited frame. The silicon will never DMA to these unused
93 * headers, since we program the UF address table pointers to
94 * NULL.
95 */
96 uf_control->headers.physical_address = dma + buf_len;
97 uf_control->headers.array = virt + buf_len;
98
99 /*
100 * Program the location of the UF address table into the SCU.
101 * Notes:
102 * - The address must align on a 64-bit boundary. Guaranteed to be on 64
103 * byte boundary already due to above programming headers being on a
104 * 64-bit boundary and headers are on a 64-bytes in size.
105 */
106 uf_control->address_table.physical_address = dma + buf_len + header_len;
107 uf_control->address_table.array = virt + buf_len + header_len;
108 uf_control->get = 0;
109
110 /*
111 * UF buffer requirements are:
112 * - The last entry in the UF queue is not NULL.
113 * - There is a power of 2 number of entries (NULL or not-NULL)
114 * programmed into the queue.
115 * - Aligned on a 1KB boundary. */
116
117 /*
118 * Program the actual used UF buffers into the UF address table and
119 * the controller's array of UFs.
120 */
121 for (i = 0; i < SCU_MAX_UNSOLICITED_FRAMES; i++) {
122 uf = &uf_control->buffers.array[i];
123
124 uf_control->address_table.array[i] = dma;
125
126 uf->buffer = virt;
127 uf->header = &uf_control->headers.array[i];
128 uf->state = UNSOLICITED_FRAME_EMPTY;
129
130 /*
131 * Increment the address of the physical and virtual memory
132 * pointers. Everything is aligned on 1k boundary with an
133 * increment of 1k.
134 */
135 virt += SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
136 dma += SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
137 }
138
139 return 0;
140}
141
142enum sci_status sci_unsolicited_frame_control_get_header(struct sci_unsolicited_frame_control *uf_control,
143 u32 frame_index,
144 void **frame_header)
145{
146 if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) {
147 /* Skip the first word in the frame since this is a controll word used
148 * by the hardware.
149 */
150 *frame_header = &uf_control->buffers.array[frame_index].header->data;
151
152 return SCI_SUCCESS;
153 }
154
155 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
156}
157
158enum sci_status sci_unsolicited_frame_control_get_buffer(struct sci_unsolicited_frame_control *uf_control,
159 u32 frame_index,
160 void **frame_buffer)
161{
162 if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) {
163 *frame_buffer = uf_control->buffers.array[frame_index].buffer;
164
165 return SCI_SUCCESS;
166 }
167
168 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
169}
170
171bool sci_unsolicited_frame_control_release_frame(struct sci_unsolicited_frame_control *uf_control,
172 u32 frame_index)
173{
174 u32 frame_get;
175 u32 frame_cycle;
176
177 frame_get = uf_control->get & (SCU_MAX_UNSOLICITED_FRAMES - 1);
178 frame_cycle = uf_control->get & SCU_MAX_UNSOLICITED_FRAMES;
179
180 /*
181 * In the event there are NULL entries in the UF table, we need to
182 * advance the get pointer in order to find out if this frame should
183 * be released (i.e. update the get pointer)
184 */
185 while (lower_32_bits(uf_control->address_table.array[frame_get]) == 0 &&
186 upper_32_bits(uf_control->address_table.array[frame_get]) == 0 &&
187 frame_get < SCU_MAX_UNSOLICITED_FRAMES)
188 frame_get++;
189
190 /*
191 * The table has a NULL entry as it's last element. This is
192 * illegal.
193 */
194 BUG_ON(frame_get >= SCU_MAX_UNSOLICITED_FRAMES);
195 if (frame_index >= SCU_MAX_UNSOLICITED_FRAMES)
196 return false;
197
198 uf_control->buffers.array[frame_index].state = UNSOLICITED_FRAME_RELEASED;
199
200 if (frame_get != frame_index) {
201 /*
202 * Frames remain in use until we advance the get pointer
203 * so there is nothing we can do here
204 */
205 return false;
206 }
207
208 /*
209 * The frame index is equal to the current get pointer so we
210 * can now free up all of the frame entries that
211 */
212 while (uf_control->buffers.array[frame_get].state == UNSOLICITED_FRAME_RELEASED) {
213 uf_control->buffers.array[frame_get].state = UNSOLICITED_FRAME_EMPTY;
214
215 if (frame_get+1 == SCU_MAX_UNSOLICITED_FRAMES-1) {
216 frame_cycle ^= SCU_MAX_UNSOLICITED_FRAMES;
217 frame_get = 0;
218 } else
219 frame_get++;
220 }
221
222 uf_control->get = SCU_UFQGP_GEN_BIT(ENABLE_BIT) | frame_cycle | frame_get;
223
224 return true;
225}
diff --git a/drivers/scsi/isci/unsolicited_frame_control.h b/drivers/scsi/isci/unsolicited_frame_control.h
new file mode 100644
index 000000000000..31cb9506f52d
--- /dev/null
+++ b/drivers/scsi/isci/unsolicited_frame_control.h
@@ -0,0 +1,278 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#ifndef _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_
57#define _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_
58
59#include "isci.h"
60
61#define SCU_UNSOLICITED_FRAME_HEADER_DATA_DWORDS 15
62
63/**
64 * struct scu_unsolicited_frame_header -
65 *
66 * This structure delineates the format of an unsolicited frame header. The
67 * first DWORD are UF attributes defined by the silicon architecture. The data
68 * depicts actual header information received on the link.
69 */
70struct scu_unsolicited_frame_header {
71 /**
72 * This field indicates if there is an Initiator Index Table entry with
73 * which this header is associated.
74 */
75 u32 iit_exists:1;
76
77 /**
78 * This field simply indicates the protocol type (i.e. SSP, STP, SMP).
79 */
80 u32 protocol_type:3;
81
82 /**
83 * This field indicates if the frame is an address frame (IAF or OAF)
84 * or if it is a information unit frame.
85 */
86 u32 is_address_frame:1;
87
88 /**
89 * This field simply indicates the connection rate at which the frame
90 * was received.
91 */
92 u32 connection_rate:4;
93
94 u32 reserved:23;
95
96 /**
97 * This field represents the actual header data received on the link.
98 */
99 u32 data[SCU_UNSOLICITED_FRAME_HEADER_DATA_DWORDS];
100
101};
102
103
104
105/**
106 * enum unsolicited_frame_state -
107 *
108 * This enumeration represents the current unsolicited frame state. The
109 * controller object can not updtate the hardware unsolicited frame put pointer
110 * unless it has already processed the priror unsolicited frames.
111 */
112enum unsolicited_frame_state {
113 /**
114 * This state is when the frame is empty and not in use. It is
115 * different from the released state in that the hardware could DMA
116 * data to this frame buffer.
117 */
118 UNSOLICITED_FRAME_EMPTY,
119
120 /**
121 * This state is set when the frame buffer is in use by by some
122 * object in the system.
123 */
124 UNSOLICITED_FRAME_IN_USE,
125
126 /**
127 * This state is set when the frame is returned to the free pool
128 * but one or more frames prior to this one are still in use.
129 * Once all of the frame before this one are freed it will go to
130 * the empty state.
131 */
132 UNSOLICITED_FRAME_RELEASED,
133
134 UNSOLICITED_FRAME_MAX_STATES
135};
136
137/**
138 * struct sci_unsolicited_frame -
139 *
140 * This is the unsolicited frame data structure it acts as the container for
141 * the current frame state, frame header and frame buffer.
142 */
143struct sci_unsolicited_frame {
144 /**
145 * This field contains the current frame state
146 */
147 enum unsolicited_frame_state state;
148
149 /**
150 * This field points to the frame header data.
151 */
152 struct scu_unsolicited_frame_header *header;
153
154 /**
155 * This field points to the frame buffer data.
156 */
157 void *buffer;
158
159};
160
161/**
162 * struct sci_uf_header_array -
163 *
164 * This structure contains all of the unsolicited frame header information.
165 */
166struct sci_uf_header_array {
167 /**
168 * This field is represents a virtual pointer to the start
169 * address of the UF address table. The table contains
170 * 64-bit pointers as required by the hardware.
171 */
172 struct scu_unsolicited_frame_header *array;
173
174 /**
175 * This field specifies the physical address location for the UF
176 * buffer array.
177 */
178 dma_addr_t physical_address;
179
180};
181
182/**
183 * struct sci_uf_buffer_array -
184 *
185 * This structure contains all of the unsolicited frame buffer (actual payload)
186 * information.
187 */
188struct sci_uf_buffer_array {
189 /**
190 * This field is the unsolicited frame data its used to manage
191 * the data for the unsolicited frame requests. It also represents
192 * the virtual address location that corresponds to the
193 * physical_address field.
194 */
195 struct sci_unsolicited_frame array[SCU_MAX_UNSOLICITED_FRAMES];
196
197 /**
198 * This field specifies the physical address location for the UF
199 * buffer array.
200 */
201 dma_addr_t physical_address;
202};
203
204/**
205 * struct sci_uf_address_table_array -
206 *
207 * This object maintains all of the unsolicited frame address table specific
208 * data. The address table is a collection of 64-bit pointers that point to
209 * 1KB buffers into which the silicon will DMA unsolicited frames.
210 */
211struct sci_uf_address_table_array {
212 /**
213 * This field represents a virtual pointer that refers to the
214 * starting address of the UF address table.
215 * 64-bit pointers are required by the hardware.
216 */
217 dma_addr_t *array;
218
219 /**
220 * This field specifies the physical address location for the UF
221 * address table.
222 */
223 dma_addr_t physical_address;
224
225};
226
227/**
228 * struct sci_unsolicited_frame_control -
229 *
230 * This object contains all of the data necessary to handle unsolicited frames.
231 */
232struct sci_unsolicited_frame_control {
233 /**
234 * This field is the software copy of the unsolicited frame queue
235 * get pointer. The controller object writes this value to the
236 * hardware to let the hardware put more unsolicited frame entries.
237 */
238 u32 get;
239
240 /**
241 * This field contains all of the unsolicited frame header
242 * specific fields.
243 */
244 struct sci_uf_header_array headers;
245
246 /**
247 * This field contains all of the unsolicited frame buffer
248 * specific fields.
249 */
250 struct sci_uf_buffer_array buffers;
251
252 /**
253 * This field contains all of the unsolicited frame address table
254 * specific fields.
255 */
256 struct sci_uf_address_table_array address_table;
257
258};
259
260struct isci_host;
261
262int sci_unsolicited_frame_control_construct(struct isci_host *ihost);
263
264enum sci_status sci_unsolicited_frame_control_get_header(
265 struct sci_unsolicited_frame_control *uf_control,
266 u32 frame_index,
267 void **frame_header);
268
269enum sci_status sci_unsolicited_frame_control_get_buffer(
270 struct sci_unsolicited_frame_control *uf_control,
271 u32 frame_index,
272 void **frame_buffer);
273
274bool sci_unsolicited_frame_control_release_frame(
275 struct sci_unsolicited_frame_control *uf_control,
276 u32 frame_index);
277
278#endif /* _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_ */