diff options
45 files changed, 9213 insertions, 11 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 64ea80e45e3b..77cbfb1a696c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -1431,6 +1431,14 @@ P: Russell King | |||
1431 | M: linux@arm.linux.org.uk | 1431 | M: linux@arm.linux.org.uk |
1432 | F: include/linux/clk.h | 1432 | F: include/linux/clk.h |
1433 | 1433 | ||
1434 | CISCO FCOE HBA DRIVER | ||
1435 | P: Abhijeet Joglekar | ||
1436 | M: abjoglek@cisco.com | ||
1437 | P: Joe Eykholt | ||
1438 | M: jeykholt@cisco.com | ||
1439 | L: linux-scsi@vger.kernel.org | ||
1440 | S: Supported | ||
1441 | |||
1434 | CODA FILE SYSTEM | 1442 | CODA FILE SYSTEM |
1435 | P: Jan Harkes | 1443 | P: Jan Harkes |
1436 | M: jaharkes@cs.cmu.edu | 1444 | M: jaharkes@cs.cmu.edu |
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c index 3cf61ece71d7..348443bdb23b 100644 --- a/drivers/misc/enclosure.c +++ b/drivers/misc/enclosure.c | |||
@@ -119,7 +119,7 @@ enclosure_register(struct device *dev, const char *name, int components, | |||
119 | edev->edev.class = &enclosure_class; | 119 | edev->edev.class = &enclosure_class; |
120 | edev->edev.parent = get_device(dev); | 120 | edev->edev.parent = get_device(dev); |
121 | edev->cb = cb; | 121 | edev->cb = cb; |
122 | dev_set_name(&edev->edev, name); | 122 | dev_set_name(&edev->edev, "%s", name); |
123 | err = device_register(&edev->edev); | 123 | err = device_register(&edev->edev); |
124 | if (err) | 124 | if (err) |
125 | goto err; | 125 | goto err; |
@@ -255,8 +255,8 @@ enclosure_component_register(struct enclosure_device *edev, | |||
255 | ecomp->number = number; | 255 | ecomp->number = number; |
256 | cdev = &ecomp->cdev; | 256 | cdev = &ecomp->cdev; |
257 | cdev->parent = get_device(&edev->edev); | 257 | cdev->parent = get_device(&edev->edev); |
258 | if (name) | 258 | if (name && name[0]) |
259 | dev_set_name(cdev, name); | 259 | dev_set_name(cdev, "%s", name); |
260 | else | 260 | else |
261 | dev_set_name(cdev, "%u", number); | 261 | dev_set_name(cdev, "%u", number); |
262 | 262 | ||
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index 8b7983aba8f7..36c21b19e5d7 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c | |||
@@ -1978,7 +1978,8 @@ static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id) | |||
1978 | { | 1978 | { |
1979 | struct scsi_cmnd *cmd = tw_dev->srb[request_id]; | 1979 | struct scsi_cmnd *cmd = tw_dev->srb[request_id]; |
1980 | 1980 | ||
1981 | scsi_dma_unmap(cmd); | 1981 | if (cmd->SCp.phase == TW_PHASE_SGLIST) |
1982 | scsi_dma_unmap(cmd); | ||
1982 | } /* End twa_unmap_scsi_data() */ | 1983 | } /* End twa_unmap_scsi_data() */ |
1983 | 1984 | ||
1984 | /* scsi_host_template initializer */ | 1985 | /* scsi_host_template initializer */ |
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c index c03f1d2c9e2e..faa0fcfed71e 100644 --- a/drivers/scsi/3w-xxxx.c +++ b/drivers/scsi/3w-xxxx.c | |||
@@ -6,7 +6,7 @@ | |||
6 | Arnaldo Carvalho de Melo <acme@conectiva.com.br> | 6 | Arnaldo Carvalho de Melo <acme@conectiva.com.br> |
7 | Brad Strand <linux@3ware.com> | 7 | Brad Strand <linux@3ware.com> |
8 | 8 | ||
9 | Copyright (C) 1999-2007 3ware Inc. | 9 | Copyright (C) 1999-2009 3ware Inc. |
10 | 10 | ||
11 | Kernel compatiblity By: Andre Hedrick <andre@suse.com> | 11 | Kernel compatiblity By: Andre Hedrick <andre@suse.com> |
12 | Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com> | 12 | Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com> |
@@ -1294,7 +1294,8 @@ static void tw_unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd) | |||
1294 | { | 1294 | { |
1295 | dprintk(KERN_WARNING "3w-xxxx: tw_unmap_scsi_data()\n"); | 1295 | dprintk(KERN_WARNING "3w-xxxx: tw_unmap_scsi_data()\n"); |
1296 | 1296 | ||
1297 | scsi_dma_unmap(cmd); | 1297 | if (cmd->SCp.phase == TW_PHASE_SGLIST) |
1298 | scsi_dma_unmap(cmd); | ||
1298 | } /* End tw_unmap_scsi_data() */ | 1299 | } /* End tw_unmap_scsi_data() */ |
1299 | 1300 | ||
1300 | /* This function will reset a device extension */ | 1301 | /* This function will reset a device extension */ |
diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h index 8e71e5e122b3..a5a2ba2561d9 100644 --- a/drivers/scsi/3w-xxxx.h +++ b/drivers/scsi/3w-xxxx.h | |||
@@ -6,7 +6,7 @@ | |||
6 | Arnaldo Carvalho de Melo <acme@conectiva.com.br> | 6 | Arnaldo Carvalho de Melo <acme@conectiva.com.br> |
7 | Brad Strand <linux@3ware.com> | 7 | Brad Strand <linux@3ware.com> |
8 | 8 | ||
9 | Copyright (C) 1999-2007 3ware Inc. | 9 | Copyright (C) 1999-2009 3ware Inc. |
10 | 10 | ||
11 | Kernel compatiblity By: Andre Hedrick <andre@suse.com> | 11 | Kernel compatiblity By: Andre Hedrick <andre@suse.com> |
12 | Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com> | 12 | Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com> |
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 8ed2990c826e..fb2740789b68 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
@@ -628,6 +628,17 @@ config FCOE | |||
628 | ---help--- | 628 | ---help--- |
629 | Fibre Channel over Ethernet module | 629 | Fibre Channel over Ethernet module |
630 | 630 | ||
631 | config FCOE_FNIC | ||
632 | tristate "Cisco FNIC Driver" | ||
633 | depends on PCI && X86 | ||
634 | select LIBFC | ||
635 | help | ||
636 | This is support for the Cisco PCI-Express FCoE HBA. | ||
637 | |||
638 | To compile this driver as a module, choose M here and read | ||
639 | <file:Documentation/scsi/scsi.txt>. | ||
640 | The module will be called fnic. | ||
641 | |||
631 | config SCSI_DMX3191D | 642 | config SCSI_DMX3191D |
632 | tristate "DMX3191D SCSI support" | 643 | tristate "DMX3191D SCSI support" |
633 | depends on PCI && SCSI | 644 | depends on PCI && SCSI |
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index e7c861ac417d..a5049cfb40ed 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile | |||
@@ -39,6 +39,7 @@ obj-$(CONFIG_SCSI_DH) += device_handler/ | |||
39 | obj-$(CONFIG_LIBFC) += libfc/ | 39 | obj-$(CONFIG_LIBFC) += libfc/ |
40 | obj-$(CONFIG_LIBFCOE) += fcoe/ | 40 | obj-$(CONFIG_LIBFCOE) += fcoe/ |
41 | obj-$(CONFIG_FCOE) += fcoe/ | 41 | obj-$(CONFIG_FCOE) += fcoe/ |
42 | obj-$(CONFIG_FCOE_FNIC) += fnic/ | ||
42 | obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o | 43 | obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o |
43 | obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o | 44 | obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o |
44 | obj-$(CONFIG_SCSI_A4000T) += 53c700.o a4000t.o | 45 | obj-$(CONFIG_SCSI_A4000T) += 53c700.o a4000t.o |
diff --git a/drivers/scsi/fnic/Makefile b/drivers/scsi/fnic/Makefile new file mode 100644 index 000000000000..37c3440bc17c --- /dev/null +++ b/drivers/scsi/fnic/Makefile | |||
@@ -0,0 +1,15 @@ | |||
1 | obj-$(CONFIG_FCOE_FNIC) += fnic.o | ||
2 | |||
3 | fnic-y := \ | ||
4 | fnic_attrs.o \ | ||
5 | fnic_isr.o \ | ||
6 | fnic_main.o \ | ||
7 | fnic_res.o \ | ||
8 | fnic_fcs.o \ | ||
9 | fnic_scsi.o \ | ||
10 | vnic_cq.o \ | ||
11 | vnic_dev.o \ | ||
12 | vnic_intr.o \ | ||
13 | vnic_rq.o \ | ||
14 | vnic_wq_copy.o \ | ||
15 | vnic_wq.o | ||
diff --git a/drivers/scsi/fnic/cq_desc.h b/drivers/scsi/fnic/cq_desc.h new file mode 100644 index 000000000000..d1225cf6320e --- /dev/null +++ b/drivers/scsi/fnic/cq_desc.h | |||
@@ -0,0 +1,78 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | */ | ||
18 | #ifndef _CQ_DESC_H_ | ||
19 | #define _CQ_DESC_H_ | ||
20 | |||
21 | /* | ||
22 | * Completion queue descriptor types | ||
23 | */ | ||
24 | enum cq_desc_types { | ||
25 | CQ_DESC_TYPE_WQ_ENET = 0, | ||
26 | CQ_DESC_TYPE_DESC_COPY = 1, | ||
27 | CQ_DESC_TYPE_WQ_EXCH = 2, | ||
28 | CQ_DESC_TYPE_RQ_ENET = 3, | ||
29 | CQ_DESC_TYPE_RQ_FCP = 4, | ||
30 | }; | ||
31 | |||
32 | /* Completion queue descriptor: 16B | ||
33 | * | ||
34 | * All completion queues have this basic layout. The | ||
35 | * type_specfic area is unique for each completion | ||
36 | * queue type. | ||
37 | */ | ||
38 | struct cq_desc { | ||
39 | __le16 completed_index; | ||
40 | __le16 q_number; | ||
41 | u8 type_specfic[11]; | ||
42 | u8 type_color; | ||
43 | }; | ||
44 | |||
45 | #define CQ_DESC_TYPE_BITS 4 | ||
46 | #define CQ_DESC_TYPE_MASK ((1 << CQ_DESC_TYPE_BITS) - 1) | ||
47 | #define CQ_DESC_COLOR_MASK 1 | ||
48 | #define CQ_DESC_COLOR_SHIFT 7 | ||
49 | #define CQ_DESC_Q_NUM_BITS 10 | ||
50 | #define CQ_DESC_Q_NUM_MASK ((1 << CQ_DESC_Q_NUM_BITS) - 1) | ||
51 | #define CQ_DESC_COMP_NDX_BITS 12 | ||
52 | #define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1) | ||
53 | |||
54 | static inline void cq_desc_dec(const struct cq_desc *desc_arg, | ||
55 | u8 *type, u8 *color, u16 *q_number, u16 *completed_index) | ||
56 | { | ||
57 | const struct cq_desc *desc = desc_arg; | ||
58 | const u8 type_color = desc->type_color; | ||
59 | |||
60 | *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK; | ||
61 | |||
62 | /* | ||
63 | * Make sure color bit is read from desc *before* other fields | ||
64 | * are read from desc. Hardware guarantees color bit is last | ||
65 | * bit (byte) written. Adding the rmb() prevents the compiler | ||
66 | * and/or CPU from reordering the reads which would potentially | ||
67 | * result in reading stale values. | ||
68 | */ | ||
69 | |||
70 | rmb(); | ||
71 | |||
72 | *type = type_color & CQ_DESC_TYPE_MASK; | ||
73 | *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK; | ||
74 | *completed_index = le16_to_cpu(desc->completed_index) & | ||
75 | CQ_DESC_COMP_NDX_MASK; | ||
76 | } | ||
77 | |||
78 | #endif /* _CQ_DESC_H_ */ | ||
diff --git a/drivers/scsi/fnic/cq_enet_desc.h b/drivers/scsi/fnic/cq_enet_desc.h new file mode 100644 index 000000000000..a9fa26f82ddd --- /dev/null +++ b/drivers/scsi/fnic/cq_enet_desc.h | |||
@@ -0,0 +1,167 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | */ | ||
18 | #ifndef _CQ_ENET_DESC_H_ | ||
19 | #define _CQ_ENET_DESC_H_ | ||
20 | |||
21 | #include "cq_desc.h" | ||
22 | |||
23 | /* Ethernet completion queue descriptor: 16B */ | ||
24 | struct cq_enet_wq_desc { | ||
25 | __le16 completed_index; | ||
26 | __le16 q_number; | ||
27 | u8 reserved[11]; | ||
28 | u8 type_color; | ||
29 | }; | ||
30 | |||
31 | static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc, | ||
32 | u8 *type, u8 *color, u16 *q_number, u16 *completed_index) | ||
33 | { | ||
34 | cq_desc_dec((struct cq_desc *)desc, type, | ||
35 | color, q_number, completed_index); | ||
36 | } | ||
37 | |||
38 | /* Completion queue descriptor: Ethernet receive queue, 16B */ | ||
39 | struct cq_enet_rq_desc { | ||
40 | __le16 completed_index_flags; | ||
41 | __le16 q_number_rss_type_flags; | ||
42 | __le32 rss_hash; | ||
43 | __le16 bytes_written_flags; | ||
44 | __le16 vlan; | ||
45 | __le16 checksum_fcoe; | ||
46 | u8 flags; | ||
47 | u8 type_color; | ||
48 | }; | ||
49 | |||
50 | #define CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT (0x1 << 12) | ||
51 | #define CQ_ENET_RQ_DESC_FLAGS_FCOE (0x1 << 13) | ||
52 | #define CQ_ENET_RQ_DESC_FLAGS_EOP (0x1 << 14) | ||
53 | #define CQ_ENET_RQ_DESC_FLAGS_SOP (0x1 << 15) | ||
54 | |||
55 | #define CQ_ENET_RQ_DESC_RSS_TYPE_BITS 4 | ||
56 | #define CQ_ENET_RQ_DESC_RSS_TYPE_MASK \ | ||
57 | ((1 << CQ_ENET_RQ_DESC_RSS_TYPE_BITS) - 1) | ||
58 | #define CQ_ENET_RQ_DESC_RSS_TYPE_NONE 0 | ||
59 | #define CQ_ENET_RQ_DESC_RSS_TYPE_IPv4 1 | ||
60 | #define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4 2 | ||
61 | #define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6 3 | ||
62 | #define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6 4 | ||
63 | #define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX 5 | ||
64 | #define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX 6 | ||
65 | |||
66 | #define CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC (0x1 << 14) | ||
67 | |||
68 | #define CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS 14 | ||
69 | #define CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK \ | ||
70 | ((1 << CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS) - 1) | ||
71 | #define CQ_ENET_RQ_DESC_FLAGS_TRUNCATED (0x1 << 14) | ||
72 | #define CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED (0x1 << 15) | ||
73 | |||
74 | #define CQ_ENET_RQ_DESC_FCOE_SOF_BITS 4 | ||
75 | #define CQ_ENET_RQ_DESC_FCOE_SOF_MASK \ | ||
76 | ((1 << CQ_ENET_RQ_DESC_FCOE_SOF_BITS) - 1) | ||
77 | #define CQ_ENET_RQ_DESC_FCOE_EOF_BITS 8 | ||
78 | #define CQ_ENET_RQ_DESC_FCOE_EOF_MASK \ | ||
79 | ((1 << CQ_ENET_RQ_DESC_FCOE_EOF_BITS) - 1) | ||
80 | #define CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT 8 | ||
81 | |||
82 | #define CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK (0x1 << 0) | ||
83 | #define CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK (0x1 << 0) | ||
84 | #define CQ_ENET_RQ_DESC_FLAGS_UDP (0x1 << 1) | ||
85 | #define CQ_ENET_RQ_DESC_FCOE_ENC_ERROR (0x1 << 1) | ||
86 | #define CQ_ENET_RQ_DESC_FLAGS_TCP (0x1 << 2) | ||
87 | #define CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK (0x1 << 3) | ||
88 | #define CQ_ENET_RQ_DESC_FLAGS_IPV6 (0x1 << 4) | ||
89 | #define CQ_ENET_RQ_DESC_FLAGS_IPV4 (0x1 << 5) | ||
90 | #define CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT (0x1 << 6) | ||
91 | #define CQ_ENET_RQ_DESC_FLAGS_FCS_OK (0x1 << 7) | ||
92 | |||
93 | static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc, | ||
94 | u8 *type, u8 *color, u16 *q_number, u16 *completed_index, | ||
95 | u8 *ingress_port, u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type, | ||
96 | u8 *csum_not_calc, u32 *rss_hash, u16 *bytes_written, u8 *packet_error, | ||
97 | u8 *vlan_stripped, u16 *vlan, u16 *checksum, u8 *fcoe_sof, | ||
98 | u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error, u8 *fcoe_eof, | ||
99 | u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok, | ||
100 | u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok) | ||
101 | { | ||
102 | u16 completed_index_flags = le16_to_cpu(desc->completed_index_flags); | ||
103 | u16 q_number_rss_type_flags = | ||
104 | le16_to_cpu(desc->q_number_rss_type_flags); | ||
105 | u16 bytes_written_flags = le16_to_cpu(desc->bytes_written_flags); | ||
106 | |||
107 | cq_desc_dec((struct cq_desc *)desc, type, | ||
108 | color, q_number, completed_index); | ||
109 | |||
110 | *ingress_port = (completed_index_flags & | ||
111 | CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0; | ||
112 | *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ? | ||
113 | 1 : 0; | ||
114 | *eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ? | ||
115 | 1 : 0; | ||
116 | *sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ? | ||
117 | 1 : 0; | ||
118 | |||
119 | *rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) & | ||
120 | CQ_ENET_RQ_DESC_RSS_TYPE_MASK); | ||
121 | *csum_not_calc = (q_number_rss_type_flags & | ||
122 | CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0; | ||
123 | |||
124 | *rss_hash = le32_to_cpu(desc->rss_hash); | ||
125 | |||
126 | *bytes_written = bytes_written_flags & | ||
127 | CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK; | ||
128 | *packet_error = (bytes_written_flags & | ||
129 | CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0; | ||
130 | *vlan_stripped = (bytes_written_flags & | ||
131 | CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0; | ||
132 | |||
133 | *vlan = le16_to_cpu(desc->vlan); | ||
134 | |||
135 | if (*fcoe) { | ||
136 | *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) & | ||
137 | CQ_ENET_RQ_DESC_FCOE_SOF_MASK); | ||
138 | *fcoe_fc_crc_ok = (desc->flags & | ||
139 | CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0; | ||
140 | *fcoe_enc_error = (desc->flags & | ||
141 | CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0; | ||
142 | *fcoe_eof = (u8)((desc->checksum_fcoe >> | ||
143 | CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) & | ||
144 | CQ_ENET_RQ_DESC_FCOE_EOF_MASK); | ||
145 | *checksum = 0; | ||
146 | } else { | ||
147 | *fcoe_sof = 0; | ||
148 | *fcoe_fc_crc_ok = 0; | ||
149 | *fcoe_enc_error = 0; | ||
150 | *fcoe_eof = 0; | ||
151 | *checksum = le16_to_cpu(desc->checksum_fcoe); | ||
152 | } | ||
153 | |||
154 | *tcp_udp_csum_ok = | ||
155 | (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0; | ||
156 | *udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0; | ||
157 | *tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0; | ||
158 | *ipv4_csum_ok = | ||
159 | (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0; | ||
160 | *ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0; | ||
161 | *ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0; | ||
162 | *ipv4_fragment = | ||
163 | (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0; | ||
164 | *fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0; | ||
165 | } | ||
166 | |||
167 | #endif /* _CQ_ENET_DESC_H_ */ | ||
diff --git a/drivers/scsi/fnic/cq_exch_desc.h b/drivers/scsi/fnic/cq_exch_desc.h new file mode 100644 index 000000000000..501660cfe228 --- /dev/null +++ b/drivers/scsi/fnic/cq_exch_desc.h | |||
@@ -0,0 +1,182 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | */ | ||
18 | #ifndef _CQ_EXCH_DESC_H_ | ||
19 | #define _CQ_EXCH_DESC_H_ | ||
20 | |||
21 | #include "cq_desc.h" | ||
22 | |||
23 | /* Exchange completion queue descriptor: 16B */ | ||
24 | struct cq_exch_wq_desc { | ||
25 | u16 completed_index; | ||
26 | u16 q_number; | ||
27 | u16 exchange_id; | ||
28 | u8 tmpl; | ||
29 | u8 reserved0; | ||
30 | u32 reserved1; | ||
31 | u8 exch_status; | ||
32 | u8 reserved2[2]; | ||
33 | u8 type_color; | ||
34 | }; | ||
35 | |||
36 | #define CQ_EXCH_WQ_STATUS_BITS 2 | ||
37 | #define CQ_EXCH_WQ_STATUS_MASK ((1 << CQ_EXCH_WQ_STATUS_BITS) - 1) | ||
38 | |||
39 | enum cq_exch_status_types { | ||
40 | CQ_EXCH_WQ_STATUS_TYPE_COMPLETE = 0, | ||
41 | CQ_EXCH_WQ_STATUS_TYPE_ABORT = 1, | ||
42 | CQ_EXCH_WQ_STATUS_TYPE_SGL_EOF = 2, | ||
43 | CQ_EXCH_WQ_STATUS_TYPE_TMPL_ERR = 3, | ||
44 | }; | ||
45 | |||
46 | static inline void cq_exch_wq_desc_dec(struct cq_exch_wq_desc *desc_ptr, | ||
47 | u8 *type, | ||
48 | u8 *color, | ||
49 | u16 *q_number, | ||
50 | u16 *completed_index, | ||
51 | u8 *exch_status) | ||
52 | { | ||
53 | cq_desc_dec((struct cq_desc *)desc_ptr, type, | ||
54 | color, q_number, completed_index); | ||
55 | *exch_status = desc_ptr->exch_status & CQ_EXCH_WQ_STATUS_MASK; | ||
56 | } | ||
57 | |||
58 | struct cq_fcp_rq_desc { | ||
59 | u16 completed_index_eop_sop_prt; | ||
60 | u16 q_number; | ||
61 | u16 exchange_id; | ||
62 | u16 tmpl; | ||
63 | u16 bytes_written; | ||
64 | u16 vlan; | ||
65 | u8 sof; | ||
66 | u8 eof; | ||
67 | u8 fcs_fer_fck; | ||
68 | u8 type_color; | ||
69 | }; | ||
70 | |||
71 | #define CQ_FCP_RQ_DESC_FLAGS_SOP (1 << 15) | ||
72 | #define CQ_FCP_RQ_DESC_FLAGS_EOP (1 << 14) | ||
73 | #define CQ_FCP_RQ_DESC_FLAGS_PRT (1 << 12) | ||
74 | #define CQ_FCP_RQ_DESC_TMPL_MASK 0x1f | ||
75 | #define CQ_FCP_RQ_DESC_BYTES_WRITTEN_MASK 0x3fff | ||
76 | #define CQ_FCP_RQ_DESC_PACKET_ERR_SHIFT 14 | ||
77 | #define CQ_FCP_RQ_DESC_PACKET_ERR_MASK (1 << CQ_FCP_RQ_DESC_PACKET_ERR_SHIFT) | ||
78 | #define CQ_FCP_RQ_DESC_VS_STRIPPED_SHIFT 15 | ||
79 | #define CQ_FCP_RQ_DESC_VS_STRIPPED_MASK (1 << CQ_FCP_RQ_DESC_VS_STRIPPED_SHIFT) | ||
80 | #define CQ_FCP_RQ_DESC_FC_CRC_OK_MASK 0x1 | ||
81 | #define CQ_FCP_RQ_DESC_FCOE_ERR_SHIFT 1 | ||
82 | #define CQ_FCP_RQ_DESC_FCOE_ERR_MASK (1 << CQ_FCP_RQ_DESC_FCOE_ERR_SHIFT) | ||
83 | #define CQ_FCP_RQ_DESC_FCS_OK_SHIFT 7 | ||
84 | #define CQ_FCP_RQ_DESC_FCS_OK_MASK (1 << CQ_FCP_RQ_DESC_FCS_OK_SHIFT) | ||
85 | |||
86 | static inline void cq_fcp_rq_desc_dec(struct cq_fcp_rq_desc *desc_ptr, | ||
87 | u8 *type, | ||
88 | u8 *color, | ||
89 | u16 *q_number, | ||
90 | u16 *completed_index, | ||
91 | u8 *eop, | ||
92 | u8 *sop, | ||
93 | u8 *fck, | ||
94 | u16 *exchange_id, | ||
95 | u16 *tmpl, | ||
96 | u32 *bytes_written, | ||
97 | u8 *sof, | ||
98 | u8 *eof, | ||
99 | u8 *ingress_port, | ||
100 | u8 *packet_err, | ||
101 | u8 *fcoe_err, | ||
102 | u8 *fcs_ok, | ||
103 | u8 *vlan_stripped, | ||
104 | u16 *vlan) | ||
105 | { | ||
106 | cq_desc_dec((struct cq_desc *)desc_ptr, type, | ||
107 | color, q_number, completed_index); | ||
108 | *eop = (desc_ptr->completed_index_eop_sop_prt & | ||
109 | CQ_FCP_RQ_DESC_FLAGS_EOP) ? 1 : 0; | ||
110 | *sop = (desc_ptr->completed_index_eop_sop_prt & | ||
111 | CQ_FCP_RQ_DESC_FLAGS_SOP) ? 1 : 0; | ||
112 | *ingress_port = | ||
113 | (desc_ptr->completed_index_eop_sop_prt & | ||
114 | CQ_FCP_RQ_DESC_FLAGS_PRT) ? 1 : 0; | ||
115 | *exchange_id = desc_ptr->exchange_id; | ||
116 | *tmpl = desc_ptr->tmpl & CQ_FCP_RQ_DESC_TMPL_MASK; | ||
117 | *bytes_written = | ||
118 | desc_ptr->bytes_written & CQ_FCP_RQ_DESC_BYTES_WRITTEN_MASK; | ||
119 | *packet_err = | ||
120 | (desc_ptr->bytes_written & CQ_FCP_RQ_DESC_PACKET_ERR_MASK) >> | ||
121 | CQ_FCP_RQ_DESC_PACKET_ERR_SHIFT; | ||
122 | *vlan_stripped = | ||
123 | (desc_ptr->bytes_written & CQ_FCP_RQ_DESC_VS_STRIPPED_MASK) >> | ||
124 | CQ_FCP_RQ_DESC_VS_STRIPPED_SHIFT; | ||
125 | *vlan = desc_ptr->vlan; | ||
126 | *sof = desc_ptr->sof; | ||
127 | *fck = desc_ptr->fcs_fer_fck & CQ_FCP_RQ_DESC_FC_CRC_OK_MASK; | ||
128 | *fcoe_err = (desc_ptr->fcs_fer_fck & CQ_FCP_RQ_DESC_FCOE_ERR_MASK) >> | ||
129 | CQ_FCP_RQ_DESC_FCOE_ERR_SHIFT; | ||
130 | *eof = desc_ptr->eof; | ||
131 | *fcs_ok = | ||
132 | (desc_ptr->fcs_fer_fck & CQ_FCP_RQ_DESC_FCS_OK_MASK) >> | ||
133 | CQ_FCP_RQ_DESC_FCS_OK_SHIFT; | ||
134 | } | ||
135 | |||
136 | struct cq_sgl_desc { | ||
137 | u16 exchange_id; | ||
138 | u16 q_number; | ||
139 | u32 active_burst_offset; | ||
140 | u32 tot_data_bytes; | ||
141 | u16 tmpl; | ||
142 | u8 sgl_err; | ||
143 | u8 type_color; | ||
144 | }; | ||
145 | |||
146 | enum cq_sgl_err_types { | ||
147 | CQ_SGL_ERR_NO_ERROR = 0, | ||
148 | CQ_SGL_ERR_OVERFLOW, /* data ran beyond end of SGL */ | ||
149 | CQ_SGL_ERR_SGL_LCL_ADDR_ERR, /* sgl access to local vnic addr illegal*/ | ||
150 | CQ_SGL_ERR_ADDR_RSP_ERR, /* sgl address error */ | ||
151 | CQ_SGL_ERR_DATA_RSP_ERR, /* sgl data rsp error */ | ||
152 | CQ_SGL_ERR_CNT_ZERO_ERR, /* SGL count is 0 */ | ||
153 | CQ_SGL_ERR_CNT_MAX_ERR, /* SGL count is larger than supported */ | ||
154 | CQ_SGL_ERR_ORDER_ERR, /* frames recv on both ports, order err */ | ||
155 | CQ_SGL_ERR_DATA_LCL_ADDR_ERR,/* sgl data buf to local vnic addr ill */ | ||
156 | CQ_SGL_ERR_HOST_CQ_ERR, /* host cq entry to local vnic addr ill */ | ||
157 | }; | ||
158 | |||
159 | #define CQ_SGL_SGL_ERR_MASK 0x1f | ||
160 | #define CQ_SGL_TMPL_MASK 0x1f | ||
161 | |||
162 | static inline void cq_sgl_desc_dec(struct cq_sgl_desc *desc_ptr, | ||
163 | u8 *type, | ||
164 | u8 *color, | ||
165 | u16 *q_number, | ||
166 | u16 *exchange_id, | ||
167 | u32 *active_burst_offset, | ||
168 | u32 *tot_data_bytes, | ||
169 | u16 *tmpl, | ||
170 | u8 *sgl_err) | ||
171 | { | ||
172 | /* Cheat a little by assuming exchange_id is the same as completed | ||
173 | index */ | ||
174 | cq_desc_dec((struct cq_desc *)desc_ptr, type, color, q_number, | ||
175 | exchange_id); | ||
176 | *active_burst_offset = desc_ptr->active_burst_offset; | ||
177 | *tot_data_bytes = desc_ptr->tot_data_bytes; | ||
178 | *tmpl = desc_ptr->tmpl & CQ_SGL_TMPL_MASK; | ||
179 | *sgl_err = desc_ptr->sgl_err & CQ_SGL_SGL_ERR_MASK; | ||
180 | } | ||
181 | |||
182 | #endif /* _CQ_EXCH_DESC_H_ */ | ||
diff --git a/drivers/scsi/fnic/fcpio.h b/drivers/scsi/fnic/fcpio.h new file mode 100644 index 000000000000..12d770d885c5 --- /dev/null +++ b/drivers/scsi/fnic/fcpio.h | |||
@@ -0,0 +1,780 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | */ | ||
18 | #ifndef _FCPIO_H_ | ||
19 | #define _FCPIO_H_ | ||
20 | |||
21 | #include <linux/if_ether.h> | ||
22 | |||
23 | /* | ||
24 | * This header file includes all of the data structures used for | ||
25 | * communication by the host driver to the fcp firmware. | ||
26 | */ | ||
27 | |||
28 | /* | ||
29 | * Exchange and sequence id space allocated to the host driver | ||
30 | */ | ||
31 | #define FCPIO_HOST_EXCH_RANGE_START 0x1000 | ||
32 | #define FCPIO_HOST_EXCH_RANGE_END 0x1fff | ||
33 | #define FCPIO_HOST_SEQ_ID_RANGE_START 0x80 | ||
34 | #define FCPIO_HOST_SEQ_ID_RANGE_END 0xff | ||
35 | |||
36 | /* | ||
37 | * Command entry type | ||
38 | */ | ||
39 | enum fcpio_type { | ||
40 | /* | ||
41 | * Initiator request types | ||
42 | */ | ||
43 | FCPIO_ICMND_16 = 0x1, | ||
44 | FCPIO_ICMND_32, | ||
45 | FCPIO_ICMND_CMPL, | ||
46 | FCPIO_ITMF, | ||
47 | FCPIO_ITMF_CMPL, | ||
48 | |||
49 | /* | ||
50 | * Target request types | ||
51 | */ | ||
52 | FCPIO_TCMND_16 = 0x11, | ||
53 | FCPIO_TCMND_32, | ||
54 | FCPIO_TDATA, | ||
55 | FCPIO_TXRDY, | ||
56 | FCPIO_TRSP, | ||
57 | FCPIO_TDRSP_CMPL, | ||
58 | FCPIO_TTMF, | ||
59 | FCPIO_TTMF_ACK, | ||
60 | FCPIO_TABORT, | ||
61 | FCPIO_TABORT_CMPL, | ||
62 | |||
63 | /* | ||
64 | * Misc request types | ||
65 | */ | ||
66 | FCPIO_ACK = 0x20, | ||
67 | FCPIO_RESET, | ||
68 | FCPIO_RESET_CMPL, | ||
69 | FCPIO_FLOGI_REG, | ||
70 | FCPIO_FLOGI_REG_CMPL, | ||
71 | FCPIO_ECHO, | ||
72 | FCPIO_ECHO_CMPL, | ||
73 | FCPIO_LUNMAP_CHNG, | ||
74 | FCPIO_LUNMAP_REQ, | ||
75 | FCPIO_LUNMAP_REQ_CMPL, | ||
76 | FCPIO_FLOGI_FIP_REG, | ||
77 | FCPIO_FLOGI_FIP_REG_CMPL, | ||
78 | }; | ||
79 | |||
80 | /* | ||
81 | * Header status codes from the firmware | ||
82 | */ | ||
83 | enum fcpio_status { | ||
84 | FCPIO_SUCCESS = 0, /* request was successful */ | ||
85 | |||
86 | /* | ||
87 | * If a request to the firmware is rejected, the original request | ||
88 | * header will be returned with the status set to one of the following: | ||
89 | */ | ||
90 | FCPIO_INVALID_HEADER, /* header contains invalid data */ | ||
91 | FCPIO_OUT_OF_RESOURCE, /* out of resources to complete request */ | ||
92 | FCPIO_INVALID_PARAM, /* some parameter in request is invalid */ | ||
93 | FCPIO_REQ_NOT_SUPPORTED, /* request type is not supported */ | ||
94 | FCPIO_IO_NOT_FOUND, /* requested I/O was not found */ | ||
95 | |||
96 | /* | ||
97 | * Once a request is processed, the firmware will usually return | ||
98 | * a cmpl message type. In cases where errors occurred, | ||
99 | * the header status field would be filled in with one of the following: | ||
100 | */ | ||
101 | FCPIO_ABORTED = 0x41, /* request was aborted */ | ||
102 | FCPIO_TIMEOUT, /* request was timed out */ | ||
103 | FCPIO_SGL_INVALID, /* request was aborted due to sgl error */ | ||
104 | FCPIO_MSS_INVALID, /* request was aborted due to mss error */ | ||
105 | FCPIO_DATA_CNT_MISMATCH, /* recv/sent more/less data than exp. */ | ||
106 | FCPIO_FW_ERR, /* request was terminated due to fw error */ | ||
107 | FCPIO_ITMF_REJECTED, /* itmf req was rejected by remote node */ | ||
108 | FCPIO_ITMF_FAILED, /* itmf req was failed by remote node */ | ||
109 | FCPIO_ITMF_INCORRECT_LUN, /* itmf req targeted incorrect LUN */ | ||
110 | FCPIO_CMND_REJECTED, /* request was invalid and rejected */ | ||
111 | FCPIO_NO_PATH_AVAIL, /* no paths to the lun was available */ | ||
112 | FCPIO_PATH_FAILED, /* i/o sent to current path failed */ | ||
113 | FCPIO_LUNMAP_CHNG_PEND, /* i/o rejected due to lunmap change */ | ||
114 | }; | ||
115 | |||
116 | /* | ||
117 | * The header command tag. All host requests will use the "tag" field | ||
118 | * to mark commands with a unique tag. When the firmware responds to | ||
119 | * a host request, it will copy the tag field into the response. | ||
120 | * | ||
121 | * The only firmware requests that will use the rx_id/ox_id fields instead | ||
122 | * of the tag field will be the target command and target task management | ||
123 | * requests. These two requests do not have corresponding host requests | ||
124 | * since they come directly from the FC initiator on the network. | ||
125 | */ | ||
126 | struct fcpio_tag { | ||
127 | union { | ||
128 | u32 req_id; | ||
129 | struct { | ||
130 | u16 rx_id; | ||
131 | u16 ox_id; | ||
132 | } ex_id; | ||
133 | } u; | ||
134 | }; | ||
135 | |||
136 | static inline void | ||
137 | fcpio_tag_id_enc(struct fcpio_tag *tag, u32 id) | ||
138 | { | ||
139 | tag->u.req_id = id; | ||
140 | } | ||
141 | |||
142 | static inline void | ||
143 | fcpio_tag_id_dec(struct fcpio_tag *tag, u32 *id) | ||
144 | { | ||
145 | *id = tag->u.req_id; | ||
146 | } | ||
147 | |||
148 | static inline void | ||
149 | fcpio_tag_exid_enc(struct fcpio_tag *tag, u16 ox_id, u16 rx_id) | ||
150 | { | ||
151 | tag->u.ex_id.rx_id = rx_id; | ||
152 | tag->u.ex_id.ox_id = ox_id; | ||
153 | } | ||
154 | |||
155 | static inline void | ||
156 | fcpio_tag_exid_dec(struct fcpio_tag *tag, u16 *ox_id, u16 *rx_id) | ||
157 | { | ||
158 | *rx_id = tag->u.ex_id.rx_id; | ||
159 | *ox_id = tag->u.ex_id.ox_id; | ||
160 | } | ||
161 | |||
162 | /* | ||
163 | * The header for an fcpio request, whether from the firmware or from the | ||
164 | * host driver | ||
165 | */ | ||
166 | struct fcpio_header { | ||
167 | u8 type; /* enum fcpio_type */ | ||
168 | u8 status; /* header status entry */ | ||
169 | u16 _resvd; /* reserved */ | ||
170 | struct fcpio_tag tag; /* header tag */ | ||
171 | }; | ||
172 | |||
173 | static inline void | ||
174 | fcpio_header_enc(struct fcpio_header *hdr, | ||
175 | u8 type, u8 status, | ||
176 | struct fcpio_tag tag) | ||
177 | { | ||
178 | hdr->type = type; | ||
179 | hdr->status = status; | ||
180 | hdr->_resvd = 0; | ||
181 | hdr->tag = tag; | ||
182 | } | ||
183 | |||
184 | static inline void | ||
185 | fcpio_header_dec(struct fcpio_header *hdr, | ||
186 | u8 *type, u8 *status, | ||
187 | struct fcpio_tag *tag) | ||
188 | { | ||
189 | *type = hdr->type; | ||
190 | *status = hdr->status; | ||
191 | *tag = hdr->tag; | ||
192 | } | ||
193 | |||
194 | #define CDB_16 16 | ||
195 | #define CDB_32 32 | ||
196 | #define LUN_ADDRESS 8 | ||
197 | |||
198 | /* | ||
199 | * fcpio_icmnd_16: host -> firmware request | ||
200 | * | ||
201 | * used for sending out an initiator SCSI 16-byte command | ||
202 | */ | ||
203 | struct fcpio_icmnd_16 { | ||
204 | u32 lunmap_id; /* index into lunmap table */ | ||
205 | u8 special_req_flags; /* special exchange request flags */ | ||
206 | u8 _resvd0[3]; /* reserved */ | ||
207 | u32 sgl_cnt; /* scatter-gather list count */ | ||
208 | u32 sense_len; /* sense buffer length */ | ||
209 | u64 sgl_addr; /* scatter-gather list addr */ | ||
210 | u64 sense_addr; /* sense buffer address */ | ||
211 | u8 crn; /* SCSI Command Reference No. */ | ||
212 | u8 pri_ta; /* SCSI Priority and Task attribute */ | ||
213 | u8 _resvd1; /* reserved: should be 0 */ | ||
214 | u8 flags; /* command flags */ | ||
215 | u8 scsi_cdb[CDB_16]; /* SCSI Cmnd Descriptor Block */ | ||
216 | u32 data_len; /* length of data expected */ | ||
217 | u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */ | ||
218 | u8 _resvd2; /* reserved */ | ||
219 | u8 d_id[3]; /* FC vNIC only: Target D_ID */ | ||
220 | u16 mss; /* FC vNIC only: max burst */ | ||
221 | u16 _resvd3; /* reserved */ | ||
222 | u32 r_a_tov; /* FC vNIC only: Res. Alloc Timeout */ | ||
223 | u32 e_d_tov; /* FC vNIC only: Err Detect Timeout */ | ||
224 | }; | ||
225 | |||
226 | /* | ||
227 | * Special request flags | ||
228 | */ | ||
229 | #define FCPIO_ICMND_SRFLAG_RETRY 0x01 /* Enable Retry handling on exchange */ | ||
230 | |||
231 | /* | ||
232 | * Priority/Task Attribute settings | ||
233 | */ | ||
234 | #define FCPIO_ICMND_PTA_SIMPLE 0 /* simple task attribute */ | ||
235 | #define FCPIO_ICMND_PTA_HEADQ 1 /* head of queue task attribute */ | ||
236 | #define FCPIO_ICMND_PTA_ORDERED 2 /* ordered task attribute */ | ||
237 | #define FCPIO_ICMND_PTA_ACA 4 /* auto contingent allegiance */ | ||
238 | #define FCPIO_ICMND_PRI_SHIFT 3 /* priority field starts in bit 3 */ | ||
239 | |||
240 | /* | ||
241 | * Command flags | ||
242 | */ | ||
243 | #define FCPIO_ICMND_RDDATA 0x02 /* read data */ | ||
244 | #define FCPIO_ICMND_WRDATA 0x01 /* write data */ | ||
245 | |||
246 | /* | ||
247 | * fcpio_icmnd_32: host -> firmware request | ||
248 | * | ||
249 | * used for sending out an initiator SCSI 32-byte command | ||
250 | */ | ||
251 | struct fcpio_icmnd_32 { | ||
252 | u32 lunmap_id; /* index into lunmap table */ | ||
253 | u8 special_req_flags; /* special exchange request flags */ | ||
254 | u8 _resvd0[3]; /* reserved */ | ||
255 | u32 sgl_cnt; /* scatter-gather list count */ | ||
256 | u32 sense_len; /* sense buffer length */ | ||
257 | u64 sgl_addr; /* scatter-gather list addr */ | ||
258 | u64 sense_addr; /* sense buffer address */ | ||
259 | u8 crn; /* SCSI Command Reference No. */ | ||
260 | u8 pri_ta; /* SCSI Priority and Task attribute */ | ||
261 | u8 _resvd1; /* reserved: should be 0 */ | ||
262 | u8 flags; /* command flags */ | ||
263 | u8 scsi_cdb[CDB_32]; /* SCSI Cmnd Descriptor Block */ | ||
264 | u32 data_len; /* length of data expected */ | ||
265 | u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */ | ||
266 | u8 _resvd2; /* reserved */ | ||
267 | u8 d_id[3]; /* FC vNIC only: Target D_ID */ | ||
268 | u16 mss; /* FC vNIC only: max burst */ | ||
269 | u16 _resvd3; /* reserved */ | ||
270 | u32 r_a_tov; /* FC vNIC only: Res. Alloc Timeout */ | ||
271 | u32 e_d_tov; /* FC vNIC only: Error Detect Timeout */ | ||
272 | }; | ||
273 | |||
274 | /* | ||
275 | * fcpio_itmf: host -> firmware request | ||
276 | * | ||
277 | * used for requesting the firmware to abort a request and/or send out | ||
278 | * a task management function | ||
279 | * | ||
280 | * The t_tag field is only needed when the request type is ABT_TASK. | ||
281 | */ | ||
282 | struct fcpio_itmf { | ||
283 | u32 lunmap_id; /* index into lunmap table */ | ||
284 | u32 tm_req; /* SCSI Task Management request */ | ||
285 | u32 t_tag; /* header tag of fcpio to be aborted */ | ||
286 | u32 _resvd; /* _reserved */ | ||
287 | u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */ | ||
288 | u8 _resvd1; /* reserved */ | ||
289 | u8 d_id[3]; /* FC vNIC only: Target D_ID */ | ||
290 | u32 r_a_tov; /* FC vNIC only: R_A_TOV in msec */ | ||
291 | u32 e_d_tov; /* FC vNIC only: E_D_TOV in msec */ | ||
292 | }; | ||
293 | |||
294 | /* | ||
295 | * Task Management request | ||
296 | */ | ||
297 | enum fcpio_itmf_tm_req_type { | ||
298 | FCPIO_ITMF_ABT_TASK_TERM = 0x01, /* abort task and terminate */ | ||
299 | FCPIO_ITMF_ABT_TASK, /* abort task and issue abts */ | ||
300 | FCPIO_ITMF_ABT_TASK_SET, /* abort task set */ | ||
301 | FCPIO_ITMF_CLR_TASK_SET, /* clear task set */ | ||
302 | FCPIO_ITMF_LUN_RESET, /* logical unit reset task mgmt */ | ||
303 | FCPIO_ITMF_CLR_ACA, /* Clear ACA condition */ | ||
304 | }; | ||
305 | |||
306 | /* | ||
307 | * fcpio_tdata: host -> firmware request | ||
308 | * | ||
309 | * used for requesting the firmware to send out a read data transfer for a | ||
310 | * target command | ||
311 | */ | ||
312 | struct fcpio_tdata { | ||
313 | u16 rx_id; /* FC rx_id of target command */ | ||
314 | u16 flags; /* command flags */ | ||
315 | u32 rel_offset; /* data sequence relative offset */ | ||
316 | u32 sgl_cnt; /* scatter-gather list count */ | ||
317 | u32 data_len; /* length of data expected to send */ | ||
318 | u64 sgl_addr; /* scatter-gather list address */ | ||
319 | }; | ||
320 | |||
321 | /* | ||
322 | * Command flags | ||
323 | */ | ||
324 | #define FCPIO_TDATA_SCSI_RSP 0x01 /* send a scsi resp. after last frame */ | ||
325 | |||
326 | /* | ||
327 | * fcpio_txrdy: host -> firmware request | ||
328 | * | ||
329 | * used for requesting the firmware to send out a write data transfer for a | ||
330 | * target command | ||
331 | */ | ||
332 | struct fcpio_txrdy { | ||
333 | u16 rx_id; /* FC rx_id of target command */ | ||
334 | u16 _resvd0; /* reserved */ | ||
335 | u32 rel_offset; /* data sequence relative offset */ | ||
336 | u32 sgl_cnt; /* scatter-gather list count */ | ||
337 | u32 data_len; /* length of data expected to send */ | ||
338 | u64 sgl_addr; /* scatter-gather list address */ | ||
339 | }; | ||
340 | |||
341 | /* | ||
342 | * fcpio_trsp: host -> firmware request | ||
343 | * | ||
344 | * used for requesting the firmware to send out a response for a target | ||
345 | * command | ||
346 | */ | ||
347 | struct fcpio_trsp { | ||
348 | u16 rx_id; /* FC rx_id of target command */ | ||
349 | u16 _resvd0; /* reserved */ | ||
350 | u32 sense_len; /* sense data buffer length */ | ||
351 | u64 sense_addr; /* sense data buffer address */ | ||
352 | u16 _resvd1; /* reserved */ | ||
353 | u8 flags; /* response request flags */ | ||
354 | u8 scsi_status; /* SCSI status */ | ||
355 | u32 residual; /* SCSI data residual value of I/O */ | ||
356 | }; | ||
357 | |||
358 | /* | ||
359 | * resposnse request flags | ||
360 | */ | ||
361 | #define FCPIO_TRSP_RESID_UNDER 0x08 /* residual is valid and is underflow */ | ||
362 | #define FCPIO_TRSP_RESID_OVER 0x04 /* residual is valid and is overflow */ | ||
363 | |||
364 | /* | ||
365 | * fcpio_ttmf_ack: host -> firmware response | ||
366 | * | ||
367 | * used by the host to indicate to the firmware it has received and processed | ||
368 | * the target tmf request | ||
369 | */ | ||
370 | struct fcpio_ttmf_ack { | ||
371 | u16 rx_id; /* FC rx_id of target command */ | ||
372 | u16 _resvd0; /* reserved */ | ||
373 | u32 tmf_status; /* SCSI task management status */ | ||
374 | }; | ||
375 | |||
376 | /* | ||
377 | * fcpio_tabort: host -> firmware request | ||
378 | * | ||
379 | * used by the host to request the firmware to abort a target request that was | ||
380 | * received by the firmware | ||
381 | */ | ||
382 | struct fcpio_tabort { | ||
383 | u16 rx_id; /* rx_id of the target request */ | ||
384 | }; | ||
385 | |||
386 | /* | ||
387 | * fcpio_reset: host -> firmware request | ||
388 | * | ||
389 | * used by the host to signal a reset of the driver to the firmware | ||
390 | * and to request firmware to clean up all outstanding I/O | ||
391 | */ | ||
392 | struct fcpio_reset { | ||
393 | u32 _resvd; | ||
394 | }; | ||
395 | |||
396 | enum fcpio_flogi_reg_format_type { | ||
397 | FCPIO_FLOGI_REG_DEF_DEST = 0, /* Use the oui | s_id mac format */ | ||
398 | FCPIO_FLOGI_REG_GW_DEST, /* Use the fixed gateway mac */ | ||
399 | }; | ||
400 | |||
401 | /* | ||
402 | * fcpio_flogi_reg: host -> firmware request | ||
403 | * | ||
404 | * fc vnic only | ||
405 | * used by the host to notify the firmware of the lif's s_id | ||
406 | * and destination mac address format | ||
407 | */ | ||
408 | struct fcpio_flogi_reg { | ||
409 | u8 format; | ||
410 | u8 s_id[3]; /* FC vNIC only: Source S_ID */ | ||
411 | u8 gateway_mac[ETH_ALEN]; /* Destination gateway mac */ | ||
412 | u16 _resvd; | ||
413 | u32 r_a_tov; /* R_A_TOV in msec */ | ||
414 | u32 e_d_tov; /* E_D_TOV in msec */ | ||
415 | }; | ||
416 | |||
417 | /* | ||
418 | * fcpio_echo: host -> firmware request | ||
419 | * | ||
420 | * sends a heartbeat echo request to the firmware | ||
421 | */ | ||
422 | struct fcpio_echo { | ||
423 | u32 _resvd; | ||
424 | }; | ||
425 | |||
426 | /* | ||
427 | * fcpio_lunmap_req: host -> firmware request | ||
428 | * | ||
429 | * scsi vnic only | ||
430 | * sends a request to retrieve the lunmap table for scsi vnics | ||
431 | */ | ||
432 | struct fcpio_lunmap_req { | ||
433 | u64 addr; /* address of the buffer */ | ||
434 | u32 len; /* len of the buffer */ | ||
435 | }; | ||
436 | |||
437 | /* | ||
438 | * fcpio_flogi_fip_reg: host -> firmware request | ||
439 | * | ||
440 | * fc vnic only | ||
441 | * used by the host to notify the firmware of the lif's s_id | ||
442 | * and destination mac address format | ||
443 | */ | ||
444 | struct fcpio_flogi_fip_reg { | ||
445 | u8 _resvd0; | ||
446 | u8 s_id[3]; /* FC vNIC only: Source S_ID */ | ||
447 | u8 fcf_mac[ETH_ALEN]; /* FCF Target destination mac */ | ||
448 | u16 _resvd1; | ||
449 | u32 r_a_tov; /* R_A_TOV in msec */ | ||
450 | u32 e_d_tov; /* E_D_TOV in msec */ | ||
451 | u8 ha_mac[ETH_ALEN]; /* Host adapter source mac */ | ||
452 | u16 _resvd2; | ||
453 | }; | ||
454 | |||
455 | /* | ||
456 | * Basic structure for all fcpio structures that are sent from the host to the | ||
457 | * firmware. They are 128 bytes per structure. | ||
458 | */ | ||
459 | #define FCPIO_HOST_REQ_LEN 128 /* expected length of host requests */ | ||
460 | |||
461 | struct fcpio_host_req { | ||
462 | struct fcpio_header hdr; | ||
463 | |||
464 | union { | ||
465 | /* | ||
466 | * Defines space needed for request | ||
467 | */ | ||
468 | u8 buf[FCPIO_HOST_REQ_LEN - sizeof(struct fcpio_header)]; | ||
469 | |||
470 | /* | ||
471 | * Initiator host requests | ||
472 | */ | ||
473 | struct fcpio_icmnd_16 icmnd_16; | ||
474 | struct fcpio_icmnd_32 icmnd_32; | ||
475 | struct fcpio_itmf itmf; | ||
476 | |||
477 | /* | ||
478 | * Target host requests | ||
479 | */ | ||
480 | struct fcpio_tdata tdata; | ||
481 | struct fcpio_txrdy txrdy; | ||
482 | struct fcpio_trsp trsp; | ||
483 | struct fcpio_ttmf_ack ttmf_ack; | ||
484 | struct fcpio_tabort tabort; | ||
485 | |||
486 | /* | ||
487 | * Misc requests | ||
488 | */ | ||
489 | struct fcpio_reset reset; | ||
490 | struct fcpio_flogi_reg flogi_reg; | ||
491 | struct fcpio_echo echo; | ||
492 | struct fcpio_lunmap_req lunmap_req; | ||
493 | struct fcpio_flogi_fip_reg flogi_fip_reg; | ||
494 | } u; | ||
495 | }; | ||
496 | |||
497 | /* | ||
498 | * fcpio_icmnd_cmpl: firmware -> host response | ||
499 | * | ||
500 | * used for sending the host a response to an initiator command | ||
501 | */ | ||
502 | struct fcpio_icmnd_cmpl { | ||
503 | u8 _resvd0[6]; /* reserved */ | ||
504 | u8 flags; /* response flags */ | ||
505 | u8 scsi_status; /* SCSI status */ | ||
506 | u32 residual; /* SCSI data residual length */ | ||
507 | u32 sense_len; /* SCSI sense length */ | ||
508 | }; | ||
509 | |||
510 | /* | ||
511 | * response flags | ||
512 | */ | ||
513 | #define FCPIO_ICMND_CMPL_RESID_UNDER 0x08 /* resid under and valid */ | ||
514 | #define FCPIO_ICMND_CMPL_RESID_OVER 0x04 /* resid over and valid */ | ||
515 | |||
516 | /* | ||
517 | * fcpio_itmf_cmpl: firmware -> host response | ||
518 | * | ||
519 | * used for sending the host a response for a itmf request | ||
520 | */ | ||
521 | struct fcpio_itmf_cmpl { | ||
522 | u32 _resvd; /* reserved */ | ||
523 | }; | ||
524 | |||
525 | /* | ||
526 | * fcpio_tcmnd_16: firmware -> host request | ||
527 | * | ||
528 | * used by the firmware to notify the host of an incoming target SCSI 16-Byte | ||
529 | * request | ||
530 | */ | ||
531 | struct fcpio_tcmnd_16 { | ||
532 | u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */ | ||
533 | u8 crn; /* SCSI Command Reference No. */ | ||
534 | u8 pri_ta; /* SCSI Priority and Task attribute */ | ||
535 | u8 _resvd2; /* reserved: should be 0 */ | ||
536 | u8 flags; /* command flags */ | ||
537 | u8 scsi_cdb[CDB_16]; /* SCSI Cmnd Descriptor Block */ | ||
538 | u32 data_len; /* length of data expected */ | ||
539 | u8 _resvd1; /* reserved */ | ||
540 | u8 s_id[3]; /* FC vNIC only: Source S_ID */ | ||
541 | }; | ||
542 | |||
543 | /* | ||
544 | * Priority/Task Attribute settings | ||
545 | */ | ||
546 | #define FCPIO_TCMND_PTA_SIMPLE 0 /* simple task attribute */ | ||
547 | #define FCPIO_TCMND_PTA_HEADQ 1 /* head of queue task attribute */ | ||
548 | #define FCPIO_TCMND_PTA_ORDERED 2 /* ordered task attribute */ | ||
549 | #define FCPIO_TCMND_PTA_ACA 4 /* auto contingent allegiance */ | ||
550 | #define FCPIO_TCMND_PRI_SHIFT 3 /* priority field starts in bit 3 */ | ||
551 | |||
552 | /* | ||
553 | * Command flags | ||
554 | */ | ||
555 | #define FCPIO_TCMND_RDDATA 0x02 /* read data */ | ||
556 | #define FCPIO_TCMND_WRDATA 0x01 /* write data */ | ||
557 | |||
558 | /* | ||
559 | * fcpio_tcmnd_32: firmware -> host request | ||
560 | * | ||
561 | * used by the firmware to notify the host of an incoming target SCSI 32-Byte | ||
562 | * request | ||
563 | */ | ||
564 | struct fcpio_tcmnd_32 { | ||
565 | u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */ | ||
566 | u8 crn; /* SCSI Command Reference No. */ | ||
567 | u8 pri_ta; /* SCSI Priority and Task attribute */ | ||
568 | u8 _resvd2; /* reserved: should be 0 */ | ||
569 | u8 flags; /* command flags */ | ||
570 | u8 scsi_cdb[CDB_32]; /* SCSI Cmnd Descriptor Block */ | ||
571 | u32 data_len; /* length of data expected */ | ||
572 | u8 _resvd0; /* reserved */ | ||
573 | u8 s_id[3]; /* FC vNIC only: Source S_ID */ | ||
574 | }; | ||
575 | |||
576 | /* | ||
577 | * fcpio_tdrsp_cmpl: firmware -> host response | ||
578 | * | ||
579 | * used by the firmware to notify the host of a response to a host target | ||
580 | * command | ||
581 | */ | ||
582 | struct fcpio_tdrsp_cmpl { | ||
583 | u16 rx_id; /* rx_id of the target request */ | ||
584 | u16 _resvd0; /* reserved */ | ||
585 | }; | ||
586 | |||
587 | /* | ||
588 | * fcpio_ttmf: firmware -> host request | ||
589 | * | ||
590 | * used by the firmware to notify the host of an incoming task management | ||
591 | * function request | ||
592 | */ | ||
593 | struct fcpio_ttmf { | ||
594 | u8 _resvd0; /* reserved */ | ||
595 | u8 s_id[3]; /* FC vNIC only: Source S_ID */ | ||
596 | u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */ | ||
597 | u8 crn; /* SCSI Command Reference No. */ | ||
598 | u8 _resvd2[3]; /* reserved */ | ||
599 | u32 tmf_type; /* task management request type */ | ||
600 | }; | ||
601 | |||
602 | /* | ||
603 | * Task Management request | ||
604 | */ | ||
605 | #define FCPIO_TTMF_CLR_ACA 0x40 /* Clear ACA condition */ | ||
606 | #define FCPIO_TTMF_LUN_RESET 0x10 /* logical unit reset task mgmt */ | ||
607 | #define FCPIO_TTMF_CLR_TASK_SET 0x04 /* clear task set */ | ||
608 | #define FCPIO_TTMF_ABT_TASK_SET 0x02 /* abort task set */ | ||
609 | #define FCPIO_TTMF_ABT_TASK 0x01 /* abort task */ | ||
610 | |||
611 | /* | ||
612 | * fcpio_tabort_cmpl: firmware -> host response | ||
613 | * | ||
614 | * used by the firmware to respond to a host's tabort request | ||
615 | */ | ||
616 | struct fcpio_tabort_cmpl { | ||
617 | u16 rx_id; /* rx_id of the target request */ | ||
618 | u16 _resvd0; /* reserved */ | ||
619 | }; | ||
620 | |||
621 | /* | ||
622 | * fcpio_ack: firmware -> host response | ||
623 | * | ||
624 | * used by firmware to notify the host of the last work request received | ||
625 | */ | ||
626 | struct fcpio_ack { | ||
627 | u16 request_out; /* last host entry received */ | ||
628 | u16 _resvd; | ||
629 | }; | ||
630 | |||
631 | /* | ||
632 | * fcpio_reset_cmpl: firmware -> host response | ||
633 | * | ||
634 | * use by firmware to respond to the host's reset request | ||
635 | */ | ||
636 | struct fcpio_reset_cmpl { | ||
637 | u16 vnic_id; | ||
638 | }; | ||
639 | |||
640 | /* | ||
641 | * fcpio_flogi_reg_cmpl: firmware -> host response | ||
642 | * | ||
643 | * fc vnic only | ||
644 | * response to the fcpio_flogi_reg request | ||
645 | */ | ||
646 | struct fcpio_flogi_reg_cmpl { | ||
647 | u32 _resvd; | ||
648 | }; | ||
649 | |||
650 | /* | ||
651 | * fcpio_echo_cmpl: firmware -> host response | ||
652 | * | ||
653 | * response to the fcpio_echo request | ||
654 | */ | ||
655 | struct fcpio_echo_cmpl { | ||
656 | u32 _resvd; | ||
657 | }; | ||
658 | |||
659 | /* | ||
660 | * fcpio_lunmap_chng: firmware -> host notification | ||
661 | * | ||
662 | * scsi vnic only | ||
663 | * notifies the host that the lunmap tables have changed | ||
664 | */ | ||
665 | struct fcpio_lunmap_chng { | ||
666 | u32 _resvd; | ||
667 | }; | ||
668 | |||
669 | /* | ||
670 | * fcpio_lunmap_req_cmpl: firmware -> host response | ||
671 | * | ||
672 | * scsi vnic only | ||
673 | * response for lunmap table request from the host | ||
674 | */ | ||
675 | struct fcpio_lunmap_req_cmpl { | ||
676 | u32 _resvd; | ||
677 | }; | ||
678 | |||
679 | /* | ||
680 | * Basic structure for all fcpio structures that are sent from the firmware to | ||
681 | * the host. They are 64 bytes per structure. | ||
682 | */ | ||
683 | #define FCPIO_FW_REQ_LEN 64 /* expected length of fw requests */ | ||
684 | struct fcpio_fw_req { | ||
685 | struct fcpio_header hdr; | ||
686 | |||
687 | union { | ||
688 | /* | ||
689 | * Defines space needed for request | ||
690 | */ | ||
691 | u8 buf[FCPIO_FW_REQ_LEN - sizeof(struct fcpio_header)]; | ||
692 | |||
693 | /* | ||
694 | * Initiator firmware responses | ||
695 | */ | ||
696 | struct fcpio_icmnd_cmpl icmnd_cmpl; | ||
697 | struct fcpio_itmf_cmpl itmf_cmpl; | ||
698 | |||
699 | /* | ||
700 | * Target firmware new requests | ||
701 | */ | ||
702 | struct fcpio_tcmnd_16 tcmnd_16; | ||
703 | struct fcpio_tcmnd_32 tcmnd_32; | ||
704 | |||
705 | /* | ||
706 | * Target firmware responses | ||
707 | */ | ||
708 | struct fcpio_tdrsp_cmpl tdrsp_cmpl; | ||
709 | struct fcpio_ttmf ttmf; | ||
710 | struct fcpio_tabort_cmpl tabort_cmpl; | ||
711 | |||
712 | /* | ||
713 | * Firmware response to work received | ||
714 | */ | ||
715 | struct fcpio_ack ack; | ||
716 | |||
717 | /* | ||
718 | * Misc requests | ||
719 | */ | ||
720 | struct fcpio_reset_cmpl reset_cmpl; | ||
721 | struct fcpio_flogi_reg_cmpl flogi_reg_cmpl; | ||
722 | struct fcpio_echo_cmpl echo_cmpl; | ||
723 | struct fcpio_lunmap_chng lunmap_chng; | ||
724 | struct fcpio_lunmap_req_cmpl lunmap_req_cmpl; | ||
725 | } u; | ||
726 | }; | ||
727 | |||
728 | /* | ||
729 | * Access routines to encode and decode the color bit, which is the most | ||
730 | * significant bit of the MSB of the structure | ||
731 | */ | ||
732 | static inline void fcpio_color_enc(struct fcpio_fw_req *fw_req, u8 color) | ||
733 | { | ||
734 | u8 *c = ((u8 *) fw_req) + sizeof(struct fcpio_fw_req) - 1; | ||
735 | |||
736 | if (color) | ||
737 | *c |= 0x80; | ||
738 | else | ||
739 | *c &= ~0x80; | ||
740 | } | ||
741 | |||
742 | static inline void fcpio_color_dec(struct fcpio_fw_req *fw_req, u8 *color) | ||
743 | { | ||
744 | u8 *c = ((u8 *) fw_req) + sizeof(struct fcpio_fw_req) - 1; | ||
745 | |||
746 | *color = *c >> 7; | ||
747 | |||
748 | /* | ||
749 | * Make sure color bit is read from desc *before* other fields | ||
750 | * are read from desc. Hardware guarantees color bit is last | ||
751 | * bit (byte) written. Adding the rmb() prevents the compiler | ||
752 | * and/or CPU from reordering the reads which would potentially | ||
753 | * result in reading stale values. | ||
754 | */ | ||
755 | |||
756 | rmb(); | ||
757 | |||
758 | } | ||
759 | |||
760 | /* | ||
761 | * Lunmap table entry for scsi vnics | ||
762 | */ | ||
763 | #define FCPIO_LUNMAP_TABLE_SIZE 256 | ||
764 | #define FCPIO_FLAGS_LUNMAP_VALID 0x80 | ||
765 | #define FCPIO_FLAGS_BOOT 0x01 | ||
766 | struct fcpio_lunmap_entry { | ||
767 | u8 bus; | ||
768 | u8 target; | ||
769 | u8 lun; | ||
770 | u8 path_cnt; | ||
771 | u16 flags; | ||
772 | u16 update_cnt; | ||
773 | }; | ||
774 | |||
775 | struct fcpio_lunmap_tbl { | ||
776 | u32 update_cnt; | ||
777 | struct fcpio_lunmap_entry lunmaps[FCPIO_LUNMAP_TABLE_SIZE]; | ||
778 | }; | ||
779 | |||
780 | #endif /* _FCPIO_H_ */ | ||
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h new file mode 100644 index 000000000000..e4c0a3d7d87b --- /dev/null +++ b/drivers/scsi/fnic/fnic.h | |||
@@ -0,0 +1,265 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | */ | ||
18 | #ifndef _FNIC_H_ | ||
19 | #define _FNIC_H_ | ||
20 | |||
21 | #include <linux/interrupt.h> | ||
22 | #include <linux/netdevice.h> | ||
23 | #include <linux/workqueue.h> | ||
24 | #include <scsi/libfc.h> | ||
25 | #include "fnic_io.h" | ||
26 | #include "fnic_res.h" | ||
27 | #include "vnic_dev.h" | ||
28 | #include "vnic_wq.h" | ||
29 | #include "vnic_rq.h" | ||
30 | #include "vnic_cq.h" | ||
31 | #include "vnic_wq_copy.h" | ||
32 | #include "vnic_intr.h" | ||
33 | #include "vnic_stats.h" | ||
34 | #include "vnic_scsi.h" | ||
35 | |||
36 | #define DRV_NAME "fnic" | ||
37 | #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" | ||
38 | #define DRV_VERSION "1.0.0.1121" | ||
39 | #define PFX DRV_NAME ": " | ||
40 | #define DFX DRV_NAME "%d: " | ||
41 | |||
42 | #define DESC_CLEAN_LOW_WATERMARK 8 | ||
43 | #define FNIC_MAX_IO_REQ 2048 /* scsi_cmnd tag map entries */ | ||
44 | #define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */ | ||
45 | #define FNIC_DFLT_QUEUE_DEPTH 32 | ||
46 | #define FNIC_STATS_RATE_LIMIT 4 /* limit rate at which stats are pulled up */ | ||
47 | |||
48 | /* | ||
49 | * Tag bits used for special requests. | ||
50 | */ | ||
51 | #define BIT(nr) (1UL << (nr)) | ||
52 | #define FNIC_TAG_ABORT BIT(30) /* tag bit indicating abort */ | ||
53 | #define FNIC_TAG_DEV_RST BIT(29) /* indicates device reset */ | ||
54 | #define FNIC_TAG_MASK (BIT(24) - 1) /* mask for lookup */ | ||
55 | #define FNIC_NO_TAG -1 | ||
56 | |||
57 | /* | ||
58 | * Usage of the scsi_cmnd scratchpad. | ||
59 | * These fields are locked by the hashed io_req_lock. | ||
60 | */ | ||
61 | #define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr) | ||
62 | #define CMD_STATE(Cmnd) ((Cmnd)->SCp.phase) | ||
63 | #define CMD_ABTS_STATUS(Cmnd) ((Cmnd)->SCp.Message) | ||
64 | #define CMD_LR_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in) | ||
65 | #define CMD_TAG(Cmnd) ((Cmnd)->SCp.sent_command) | ||
66 | |||
67 | #define FCPIO_INVALID_CODE 0x100 /* hdr_status value unused by firmware */ | ||
68 | |||
69 | #define FNIC_LUN_RESET_TIMEOUT 10000 /* mSec */ | ||
70 | #define FNIC_HOST_RESET_TIMEOUT 10000 /* mSec */ | ||
71 | #define FNIC_RMDEVICE_TIMEOUT 1000 /* mSec */ | ||
72 | #define FNIC_HOST_RESET_SETTLE_TIME 30 /* Sec */ | ||
73 | |||
74 | #define FNIC_MAX_FCP_TARGET 256 | ||
75 | |||
76 | extern unsigned int fnic_log_level; | ||
77 | |||
78 | #define FNIC_MAIN_LOGGING 0x01 | ||
79 | #define FNIC_FCS_LOGGING 0x02 | ||
80 | #define FNIC_SCSI_LOGGING 0x04 | ||
81 | #define FNIC_ISR_LOGGING 0x08 | ||
82 | |||
83 | #define FNIC_CHECK_LOGGING(LEVEL, CMD) \ | ||
84 | do { \ | ||
85 | if (unlikely(fnic_log_level & LEVEL)) \ | ||
86 | do { \ | ||
87 | CMD; \ | ||
88 | } while (0); \ | ||
89 | } while (0) | ||
90 | |||
91 | #define FNIC_MAIN_DBG(kern_level, host, fmt, args...) \ | ||
92 | FNIC_CHECK_LOGGING(FNIC_MAIN_LOGGING, \ | ||
93 | shost_printk(kern_level, host, fmt, ##args);) | ||
94 | |||
95 | #define FNIC_FCS_DBG(kern_level, host, fmt, args...) \ | ||
96 | FNIC_CHECK_LOGGING(FNIC_FCS_LOGGING, \ | ||
97 | shost_printk(kern_level, host, fmt, ##args);) | ||
98 | |||
99 | #define FNIC_SCSI_DBG(kern_level, host, fmt, args...) \ | ||
100 | FNIC_CHECK_LOGGING(FNIC_SCSI_LOGGING, \ | ||
101 | shost_printk(kern_level, host, fmt, ##args);) | ||
102 | |||
103 | #define FNIC_ISR_DBG(kern_level, host, fmt, args...) \ | ||
104 | FNIC_CHECK_LOGGING(FNIC_ISR_LOGGING, \ | ||
105 | shost_printk(kern_level, host, fmt, ##args);) | ||
106 | |||
107 | extern const char *fnic_state_str[]; | ||
108 | |||
109 | enum fnic_intx_intr_index { | ||
110 | FNIC_INTX_WQ_RQ_COPYWQ, | ||
111 | FNIC_INTX_ERR, | ||
112 | FNIC_INTX_NOTIFY, | ||
113 | FNIC_INTX_INTR_MAX, | ||
114 | }; | ||
115 | |||
116 | enum fnic_msix_intr_index { | ||
117 | FNIC_MSIX_RQ, | ||
118 | FNIC_MSIX_WQ, | ||
119 | FNIC_MSIX_WQ_COPY, | ||
120 | FNIC_MSIX_ERR_NOTIFY, | ||
121 | FNIC_MSIX_INTR_MAX, | ||
122 | }; | ||
123 | |||
124 | struct fnic_msix_entry { | ||
125 | int requested; | ||
126 | char devname[IFNAMSIZ]; | ||
127 | irqreturn_t (*isr)(int, void *); | ||
128 | void *devid; | ||
129 | }; | ||
130 | |||
131 | enum fnic_state { | ||
132 | FNIC_IN_FC_MODE = 0, | ||
133 | FNIC_IN_FC_TRANS_ETH_MODE, | ||
134 | FNIC_IN_ETH_MODE, | ||
135 | FNIC_IN_ETH_TRANS_FC_MODE, | ||
136 | }; | ||
137 | |||
138 | #define FNIC_WQ_COPY_MAX 1 | ||
139 | #define FNIC_WQ_MAX 1 | ||
140 | #define FNIC_RQ_MAX 1 | ||
141 | #define FNIC_CQ_MAX (FNIC_WQ_COPY_MAX + FNIC_WQ_MAX + FNIC_RQ_MAX) | ||
142 | |||
143 | struct mempool; | ||
144 | |||
145 | /* Per-instance private data structure */ | ||
146 | struct fnic { | ||
147 | struct fc_lport *lport; | ||
148 | struct vnic_dev_bar bar0; | ||
149 | |||
150 | struct msix_entry msix_entry[FNIC_MSIX_INTR_MAX]; | ||
151 | struct fnic_msix_entry msix[FNIC_MSIX_INTR_MAX]; | ||
152 | |||
153 | struct vnic_stats *stats; | ||
154 | unsigned long stats_time; /* time of stats update */ | ||
155 | struct vnic_nic_cfg *nic_cfg; | ||
156 | char name[IFNAMSIZ]; | ||
157 | struct timer_list notify_timer; /* used for MSI interrupts */ | ||
158 | |||
159 | unsigned int err_intr_offset; | ||
160 | unsigned int link_intr_offset; | ||
161 | |||
162 | unsigned int wq_count; | ||
163 | unsigned int cq_count; | ||
164 | |||
165 | u32 fcoui_mode:1; /* use fcoui address*/ | ||
166 | u32 vlan_hw_insert:1; /* let hw insert the tag */ | ||
167 | u32 in_remove:1; /* fnic device in removal */ | ||
168 | u32 stop_rx_link_events:1; /* stop proc. rx frames, link events */ | ||
169 | |||
170 | struct completion *remove_wait; /* device remove thread blocks */ | ||
171 | |||
172 | struct fc_frame *flogi; | ||
173 | struct fc_frame *flogi_resp; | ||
174 | u16 flogi_oxid; | ||
175 | unsigned long s_id; | ||
176 | enum fnic_state state; | ||
177 | spinlock_t fnic_lock; | ||
178 | |||
179 | u16 vlan_id; /* VLAN tag including priority */ | ||
180 | u8 mac_addr[ETH_ALEN]; | ||
181 | u8 dest_addr[ETH_ALEN]; | ||
182 | u8 data_src_addr[ETH_ALEN]; | ||
183 | u64 fcp_input_bytes; /* internal statistic */ | ||
184 | u64 fcp_output_bytes; /* internal statistic */ | ||
185 | u32 link_down_cnt; | ||
186 | int link_status; | ||
187 | |||
188 | struct list_head list; | ||
189 | struct pci_dev *pdev; | ||
190 | struct vnic_fc_config config; | ||
191 | struct vnic_dev *vdev; | ||
192 | unsigned int raw_wq_count; | ||
193 | unsigned int wq_copy_count; | ||
194 | unsigned int rq_count; | ||
195 | int fw_ack_index[FNIC_WQ_COPY_MAX]; | ||
196 | unsigned short fw_ack_recd[FNIC_WQ_COPY_MAX]; | ||
197 | unsigned short wq_copy_desc_low[FNIC_WQ_COPY_MAX]; | ||
198 | unsigned int intr_count; | ||
199 | u32 __iomem *legacy_pba; | ||
200 | struct fnic_host_tag *tags; | ||
201 | mempool_t *io_req_pool; | ||
202 | mempool_t *io_sgl_pool[FNIC_SGL_NUM_CACHES]; | ||
203 | spinlock_t io_req_lock[FNIC_IO_LOCKS]; /* locks for scsi cmnds */ | ||
204 | |||
205 | struct work_struct link_work; | ||
206 | struct work_struct frame_work; | ||
207 | struct sk_buff_head frame_queue; | ||
208 | |||
209 | /* copy work queue cache line section */ | ||
210 | ____cacheline_aligned struct vnic_wq_copy wq_copy[FNIC_WQ_COPY_MAX]; | ||
211 | /* completion queue cache line section */ | ||
212 | ____cacheline_aligned struct vnic_cq cq[FNIC_CQ_MAX]; | ||
213 | |||
214 | spinlock_t wq_copy_lock[FNIC_WQ_COPY_MAX]; | ||
215 | |||
216 | /* work queue cache line section */ | ||
217 | ____cacheline_aligned struct vnic_wq wq[FNIC_WQ_MAX]; | ||
218 | spinlock_t wq_lock[FNIC_WQ_MAX]; | ||
219 | |||
220 | /* receive queue cache line section */ | ||
221 | ____cacheline_aligned struct vnic_rq rq[FNIC_RQ_MAX]; | ||
222 | |||
223 | /* interrupt resource cache line section */ | ||
224 | ____cacheline_aligned struct vnic_intr intr[FNIC_MSIX_INTR_MAX]; | ||
225 | }; | ||
226 | |||
227 | extern struct workqueue_struct *fnic_event_queue; | ||
228 | extern struct device_attribute *fnic_attrs[]; | ||
229 | |||
230 | void fnic_clear_intr_mode(struct fnic *fnic); | ||
231 | int fnic_set_intr_mode(struct fnic *fnic); | ||
232 | void fnic_free_intr(struct fnic *fnic); | ||
233 | int fnic_request_intr(struct fnic *fnic); | ||
234 | |||
235 | int fnic_send(struct fc_lport *, struct fc_frame *); | ||
236 | void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf); | ||
237 | void fnic_handle_frame(struct work_struct *work); | ||
238 | void fnic_handle_link(struct work_struct *work); | ||
239 | int fnic_rq_cmpl_handler(struct fnic *fnic, int); | ||
240 | int fnic_alloc_rq_frame(struct vnic_rq *rq); | ||
241 | void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf); | ||
242 | int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp); | ||
243 | |||
244 | int fnic_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *)); | ||
245 | int fnic_abort_cmd(struct scsi_cmnd *); | ||
246 | int fnic_device_reset(struct scsi_cmnd *); | ||
247 | int fnic_host_reset(struct scsi_cmnd *); | ||
248 | int fnic_reset(struct Scsi_Host *); | ||
249 | void fnic_scsi_cleanup(struct fc_lport *); | ||
250 | void fnic_scsi_abort_io(struct fc_lport *); | ||
251 | void fnic_empty_scsi_cleanup(struct fc_lport *); | ||
252 | void fnic_exch_mgr_reset(struct fc_lport *, u32, u32); | ||
253 | int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int); | ||
254 | int fnic_wq_cmpl_handler(struct fnic *fnic, int); | ||
255 | int fnic_flogi_reg_handler(struct fnic *fnic); | ||
256 | void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq, | ||
257 | struct fcpio_host_req *desc); | ||
258 | int fnic_fw_reset_handler(struct fnic *fnic); | ||
259 | void fnic_terminate_rport_io(struct fc_rport *); | ||
260 | const char *fnic_state_to_str(unsigned int state); | ||
261 | |||
262 | void fnic_log_q_error(struct fnic *fnic); | ||
263 | void fnic_handle_link_event(struct fnic *fnic); | ||
264 | |||
265 | #endif /* _FNIC_H_ */ | ||
diff --git a/drivers/scsi/fnic/fnic_attrs.c b/drivers/scsi/fnic/fnic_attrs.c new file mode 100644 index 000000000000..aea0c3becfd4 --- /dev/null +++ b/drivers/scsi/fnic/fnic_attrs.c | |||
@@ -0,0 +1,56 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | */ | ||
18 | #include <linux/string.h> | ||
19 | #include <linux/device.h> | ||
20 | #include <scsi/scsi_host.h> | ||
21 | #include "fnic.h" | ||
22 | |||
23 | static ssize_t fnic_show_state(struct device *dev, | ||
24 | struct device_attribute *attr, char *buf) | ||
25 | { | ||
26 | struct fc_lport *lp = shost_priv(class_to_shost(dev)); | ||
27 | struct fnic *fnic = lport_priv(lp); | ||
28 | |||
29 | return snprintf(buf, PAGE_SIZE, "%s\n", fnic_state_str[fnic->state]); | ||
30 | } | ||
31 | |||
32 | static ssize_t fnic_show_drv_version(struct device *dev, | ||
33 | struct device_attribute *attr, char *buf) | ||
34 | { | ||
35 | return snprintf(buf, PAGE_SIZE, "%s\n", DRV_VERSION); | ||
36 | } | ||
37 | |||
38 | static ssize_t fnic_show_link_state(struct device *dev, | ||
39 | struct device_attribute *attr, char *buf) | ||
40 | { | ||
41 | struct fc_lport *lp = shost_priv(class_to_shost(dev)); | ||
42 | |||
43 | return snprintf(buf, PAGE_SIZE, "%s\n", (lp->link_up) | ||
44 | ? "Link Up" : "Link Down"); | ||
45 | } | ||
46 | |||
47 | static DEVICE_ATTR(fnic_state, S_IRUGO, fnic_show_state, NULL); | ||
48 | static DEVICE_ATTR(drv_version, S_IRUGO, fnic_show_drv_version, NULL); | ||
49 | static DEVICE_ATTR(link_state, S_IRUGO, fnic_show_link_state, NULL); | ||
50 | |||
51 | struct device_attribute *fnic_attrs[] = { | ||
52 | &dev_attr_fnic_state, | ||
53 | &dev_attr_drv_version, | ||
54 | &dev_attr_link_state, | ||
55 | NULL, | ||
56 | }; | ||
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c new file mode 100644 index 000000000000..07e6eedb83ce --- /dev/null +++ b/drivers/scsi/fnic/fnic_fcs.c | |||
@@ -0,0 +1,742 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | */ | ||
18 | #include <linux/errno.h> | ||
19 | #include <linux/pci.h> | ||
20 | #include <linux/skbuff.h> | ||
21 | #include <linux/interrupt.h> | ||
22 | #include <linux/spinlock.h> | ||
23 | #include <linux/if_ether.h> | ||
24 | #include <linux/if_vlan.h> | ||
25 | #include <linux/workqueue.h> | ||
26 | #include <scsi/fc/fc_els.h> | ||
27 | #include <scsi/fc/fc_fcoe.h> | ||
28 | #include <scsi/fc_frame.h> | ||
29 | #include <scsi/libfc.h> | ||
30 | #include "fnic_io.h" | ||
31 | #include "fnic.h" | ||
32 | #include "cq_enet_desc.h" | ||
33 | #include "cq_exch_desc.h" | ||
34 | |||
35 | struct workqueue_struct *fnic_event_queue; | ||
36 | |||
37 | void fnic_handle_link(struct work_struct *work) | ||
38 | { | ||
39 | struct fnic *fnic = container_of(work, struct fnic, link_work); | ||
40 | unsigned long flags; | ||
41 | int old_link_status; | ||
42 | u32 old_link_down_cnt; | ||
43 | |||
44 | spin_lock_irqsave(&fnic->fnic_lock, flags); | ||
45 | |||
46 | if (fnic->stop_rx_link_events) { | ||
47 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
48 | return; | ||
49 | } | ||
50 | |||
51 | old_link_down_cnt = fnic->link_down_cnt; | ||
52 | old_link_status = fnic->link_status; | ||
53 | fnic->link_status = vnic_dev_link_status(fnic->vdev); | ||
54 | fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev); | ||
55 | |||
56 | if (old_link_status == fnic->link_status) { | ||
57 | if (!fnic->link_status) | ||
58 | /* DOWN -> DOWN */ | ||
59 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
60 | else { | ||
61 | if (old_link_down_cnt != fnic->link_down_cnt) { | ||
62 | /* UP -> DOWN -> UP */ | ||
63 | fnic->lport->host_stats.link_failure_count++; | ||
64 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
65 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, | ||
66 | "link down\n"); | ||
67 | fc_linkdown(fnic->lport); | ||
68 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, | ||
69 | "link up\n"); | ||
70 | fc_linkup(fnic->lport); | ||
71 | } else | ||
72 | /* UP -> UP */ | ||
73 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
74 | } | ||
75 | } else if (fnic->link_status) { | ||
76 | /* DOWN -> UP */ | ||
77 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
78 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n"); | ||
79 | fc_linkup(fnic->lport); | ||
80 | } else { | ||
81 | /* UP -> DOWN */ | ||
82 | fnic->lport->host_stats.link_failure_count++; | ||
83 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
84 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n"); | ||
85 | fc_linkdown(fnic->lport); | ||
86 | } | ||
87 | |||
88 | } | ||
89 | |||
90 | /* | ||
91 | * This function passes incoming fabric frames to libFC | ||
92 | */ | ||
93 | void fnic_handle_frame(struct work_struct *work) | ||
94 | { | ||
95 | struct fnic *fnic = container_of(work, struct fnic, frame_work); | ||
96 | struct fc_lport *lp = fnic->lport; | ||
97 | unsigned long flags; | ||
98 | struct sk_buff *skb; | ||
99 | struct fc_frame *fp; | ||
100 | |||
101 | while ((skb = skb_dequeue(&fnic->frame_queue))) { | ||
102 | |||
103 | spin_lock_irqsave(&fnic->fnic_lock, flags); | ||
104 | if (fnic->stop_rx_link_events) { | ||
105 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
106 | dev_kfree_skb(skb); | ||
107 | return; | ||
108 | } | ||
109 | fp = (struct fc_frame *)skb; | ||
110 | /* if Flogi resp frame, register the address */ | ||
111 | if (fr_flags(fp)) { | ||
112 | vnic_dev_add_addr(fnic->vdev, | ||
113 | fnic->data_src_addr); | ||
114 | fr_flags(fp) = 0; | ||
115 | } | ||
116 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
117 | |||
118 | fc_exch_recv(lp, lp->emp, fp); | ||
119 | } | ||
120 | |||
121 | } | ||
122 | |||
123 | static inline void fnic_import_rq_fc_frame(struct sk_buff *skb, | ||
124 | u32 len, u8 sof, u8 eof) | ||
125 | { | ||
126 | struct fc_frame *fp = (struct fc_frame *)skb; | ||
127 | |||
128 | skb_trim(skb, len); | ||
129 | fr_eof(fp) = eof; | ||
130 | fr_sof(fp) = sof; | ||
131 | } | ||
132 | |||
133 | |||
134 | static inline int fnic_import_rq_eth_pkt(struct sk_buff *skb, u32 len) | ||
135 | { | ||
136 | struct fc_frame *fp; | ||
137 | struct ethhdr *eh; | ||
138 | struct vlan_ethhdr *vh; | ||
139 | struct fcoe_hdr *fcoe_hdr; | ||
140 | struct fcoe_crc_eof *ft; | ||
141 | u32 transport_len = 0; | ||
142 | |||
143 | eh = (struct ethhdr *)skb->data; | ||
144 | vh = (struct vlan_ethhdr *)skb->data; | ||
145 | if (vh->h_vlan_proto == htons(ETH_P_8021Q) && | ||
146 | vh->h_vlan_encapsulated_proto == htons(ETH_P_FCOE)) { | ||
147 | skb_pull(skb, sizeof(struct vlan_ethhdr)); | ||
148 | transport_len += sizeof(struct vlan_ethhdr); | ||
149 | } else if (eh->h_proto == htons(ETH_P_FCOE)) { | ||
150 | transport_len += sizeof(struct ethhdr); | ||
151 | skb_pull(skb, sizeof(struct ethhdr)); | ||
152 | } else | ||
153 | return -1; | ||
154 | |||
155 | fcoe_hdr = (struct fcoe_hdr *)skb->data; | ||
156 | if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER) | ||
157 | return -1; | ||
158 | |||
159 | fp = (struct fc_frame *)skb; | ||
160 | fc_frame_init(fp); | ||
161 | fr_sof(fp) = fcoe_hdr->fcoe_sof; | ||
162 | skb_pull(skb, sizeof(struct fcoe_hdr)); | ||
163 | transport_len += sizeof(struct fcoe_hdr); | ||
164 | |||
165 | ft = (struct fcoe_crc_eof *)(skb->data + len - | ||
166 | transport_len - sizeof(*ft)); | ||
167 | fr_eof(fp) = ft->fcoe_eof; | ||
168 | skb_trim(skb, len - transport_len - sizeof(*ft)); | ||
169 | return 0; | ||
170 | } | ||
171 | |||
172 | static inline int fnic_handle_flogi_resp(struct fnic *fnic, | ||
173 | struct fc_frame *fp) | ||
174 | { | ||
175 | u8 mac[ETH_ALEN] = FC_FCOE_FLOGI_MAC; | ||
176 | struct ethhdr *eth_hdr; | ||
177 | struct fc_frame_header *fh; | ||
178 | int ret = 0; | ||
179 | unsigned long flags; | ||
180 | struct fc_frame *old_flogi_resp = NULL; | ||
181 | |||
182 | fh = (struct fc_frame_header *)fr_hdr(fp); | ||
183 | |||
184 | spin_lock_irqsave(&fnic->fnic_lock, flags); | ||
185 | |||
186 | if (fnic->state == FNIC_IN_ETH_MODE) { | ||
187 | |||
188 | /* | ||
189 | * Check if oxid matches on taking the lock. A new Flogi | ||
190 | * issued by libFC might have changed the fnic cached oxid | ||
191 | */ | ||
192 | if (fnic->flogi_oxid != ntohs(fh->fh_ox_id)) { | ||
193 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, | ||
194 | "Flogi response oxid not" | ||
195 | " matching cached oxid, dropping frame" | ||
196 | "\n"); | ||
197 | ret = -1; | ||
198 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
199 | dev_kfree_skb_irq(fp_skb(fp)); | ||
200 | goto handle_flogi_resp_end; | ||
201 | } | ||
202 | |||
203 | /* Drop older cached flogi response frame, cache this frame */ | ||
204 | old_flogi_resp = fnic->flogi_resp; | ||
205 | fnic->flogi_resp = fp; | ||
206 | fnic->flogi_oxid = FC_XID_UNKNOWN; | ||
207 | |||
208 | /* | ||
209 | * this frame is part of flogi get the src mac addr from this | ||
210 | * frame if the src mac is fcoui based then we mark the | ||
211 | * address mode flag to use fcoui base for dst mac addr | ||
212 | * otherwise we have to store the fcoe gateway addr | ||
213 | */ | ||
214 | eth_hdr = (struct ethhdr *)skb_mac_header(fp_skb(fp)); | ||
215 | memcpy(mac, eth_hdr->h_source, ETH_ALEN); | ||
216 | |||
217 | if (ntoh24(mac) == FC_FCOE_OUI) | ||
218 | fnic->fcoui_mode = 1; | ||
219 | else { | ||
220 | fnic->fcoui_mode = 0; | ||
221 | memcpy(fnic->dest_addr, mac, ETH_ALEN); | ||
222 | } | ||
223 | |||
224 | /* | ||
225 | * Except for Flogi frame, all outbound frames from us have the | ||
226 | * Eth Src address as FC_FCOE_OUI"our_sid". Flogi frame uses | ||
227 | * the vnic MAC address as the Eth Src address | ||
228 | */ | ||
229 | fc_fcoe_set_mac(fnic->data_src_addr, fh->fh_d_id); | ||
230 | |||
231 | /* We get our s_id from the d_id of the flogi resp frame */ | ||
232 | fnic->s_id = ntoh24(fh->fh_d_id); | ||
233 | |||
234 | /* Change state to reflect transition from Eth to FC mode */ | ||
235 | fnic->state = FNIC_IN_ETH_TRANS_FC_MODE; | ||
236 | |||
237 | } else { | ||
238 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, | ||
239 | "Unexpected fnic state %s while" | ||
240 | " processing flogi resp\n", | ||
241 | fnic_state_to_str(fnic->state)); | ||
242 | ret = -1; | ||
243 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
244 | dev_kfree_skb_irq(fp_skb(fp)); | ||
245 | goto handle_flogi_resp_end; | ||
246 | } | ||
247 | |||
248 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
249 | |||
250 | /* Drop older cached frame */ | ||
251 | if (old_flogi_resp) | ||
252 | dev_kfree_skb_irq(fp_skb(old_flogi_resp)); | ||
253 | |||
254 | /* | ||
255 | * send flogi reg request to firmware, this will put the fnic in | ||
256 | * in FC mode | ||
257 | */ | ||
258 | ret = fnic_flogi_reg_handler(fnic); | ||
259 | |||
260 | if (ret < 0) { | ||
261 | int free_fp = 1; | ||
262 | spin_lock_irqsave(&fnic->fnic_lock, flags); | ||
263 | /* | ||
264 | * free the frame is some other thread is not | ||
265 | * pointing to it | ||
266 | */ | ||
267 | if (fnic->flogi_resp != fp) | ||
268 | free_fp = 0; | ||
269 | else | ||
270 | fnic->flogi_resp = NULL; | ||
271 | |||
272 | if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) | ||
273 | fnic->state = FNIC_IN_ETH_MODE; | ||
274 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
275 | if (free_fp) | ||
276 | dev_kfree_skb_irq(fp_skb(fp)); | ||
277 | } | ||
278 | |||
279 | handle_flogi_resp_end: | ||
280 | return ret; | ||
281 | } | ||
282 | |||
283 | /* Returns 1 for a response that matches cached flogi oxid */ | ||
284 | static inline int is_matching_flogi_resp_frame(struct fnic *fnic, | ||
285 | struct fc_frame *fp) | ||
286 | { | ||
287 | struct fc_frame_header *fh; | ||
288 | int ret = 0; | ||
289 | u32 f_ctl; | ||
290 | |||
291 | fh = fc_frame_header_get(fp); | ||
292 | f_ctl = ntoh24(fh->fh_f_ctl); | ||
293 | |||
294 | if (fnic->flogi_oxid == ntohs(fh->fh_ox_id) && | ||
295 | fh->fh_r_ctl == FC_RCTL_ELS_REP && | ||
296 | (f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) == FC_FC_EX_CTX && | ||
297 | fh->fh_type == FC_TYPE_ELS) | ||
298 | ret = 1; | ||
299 | |||
300 | return ret; | ||
301 | } | ||
302 | |||
303 | static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc | ||
304 | *cq_desc, struct vnic_rq_buf *buf, | ||
305 | int skipped __attribute__((unused)), | ||
306 | void *opaque) | ||
307 | { | ||
308 | struct fnic *fnic = vnic_dev_priv(rq->vdev); | ||
309 | struct sk_buff *skb; | ||
310 | struct fc_frame *fp; | ||
311 | unsigned int eth_hdrs_stripped; | ||
312 | u8 type, color, eop, sop, ingress_port, vlan_stripped; | ||
313 | u8 fcoe = 0, fcoe_sof, fcoe_eof; | ||
314 | u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0; | ||
315 | u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; | ||
316 | u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc; | ||
317 | u8 fcs_ok = 1, packet_error = 0; | ||
318 | u16 q_number, completed_index, bytes_written = 0, vlan, checksum; | ||
319 | u32 rss_hash; | ||
320 | u16 exchange_id, tmpl; | ||
321 | u8 sof = 0; | ||
322 | u8 eof = 0; | ||
323 | u32 fcp_bytes_written = 0; | ||
324 | unsigned long flags; | ||
325 | |||
326 | pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len, | ||
327 | PCI_DMA_FROMDEVICE); | ||
328 | skb = buf->os_buf; | ||
329 | buf->os_buf = NULL; | ||
330 | |||
331 | cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index); | ||
332 | if (type == CQ_DESC_TYPE_RQ_FCP) { | ||
333 | cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc, | ||
334 | &type, &color, &q_number, &completed_index, | ||
335 | &eop, &sop, &fcoe_fc_crc_ok, &exchange_id, | ||
336 | &tmpl, &fcp_bytes_written, &sof, &eof, | ||
337 | &ingress_port, &packet_error, | ||
338 | &fcoe_enc_error, &fcs_ok, &vlan_stripped, | ||
339 | &vlan); | ||
340 | eth_hdrs_stripped = 1; | ||
341 | |||
342 | } else if (type == CQ_DESC_TYPE_RQ_ENET) { | ||
343 | cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, | ||
344 | &type, &color, &q_number, &completed_index, | ||
345 | &ingress_port, &fcoe, &eop, &sop, | ||
346 | &rss_type, &csum_not_calc, &rss_hash, | ||
347 | &bytes_written, &packet_error, | ||
348 | &vlan_stripped, &vlan, &checksum, | ||
349 | &fcoe_sof, &fcoe_fc_crc_ok, | ||
350 | &fcoe_enc_error, &fcoe_eof, | ||
351 | &tcp_udp_csum_ok, &udp, &tcp, | ||
352 | &ipv4_csum_ok, &ipv6, &ipv4, | ||
353 | &ipv4_fragment, &fcs_ok); | ||
354 | eth_hdrs_stripped = 0; | ||
355 | |||
356 | } else { | ||
357 | /* wrong CQ type*/ | ||
358 | shost_printk(KERN_ERR, fnic->lport->host, | ||
359 | "fnic rq_cmpl wrong cq type x%x\n", type); | ||
360 | goto drop; | ||
361 | } | ||
362 | |||
363 | if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) { | ||
364 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, | ||
365 | "fnic rq_cmpl fcoe x%x fcsok x%x" | ||
366 | " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err" | ||
367 | " x%x\n", | ||
368 | fcoe, fcs_ok, packet_error, | ||
369 | fcoe_fc_crc_ok, fcoe_enc_error); | ||
370 | goto drop; | ||
371 | } | ||
372 | |||
373 | if (eth_hdrs_stripped) | ||
374 | fnic_import_rq_fc_frame(skb, fcp_bytes_written, sof, eof); | ||
375 | else if (fnic_import_rq_eth_pkt(skb, bytes_written)) | ||
376 | goto drop; | ||
377 | |||
378 | fp = (struct fc_frame *)skb; | ||
379 | |||
380 | /* | ||
381 | * If frame is an ELS response that matches the cached FLOGI OX_ID, | ||
382 | * and is accept, issue flogi_reg_request copy wq request to firmware | ||
383 | * to register the S_ID and determine whether FC_OUI mode or GW mode. | ||
384 | */ | ||
385 | if (is_matching_flogi_resp_frame(fnic, fp)) { | ||
386 | if (!eth_hdrs_stripped) { | ||
387 | if (fc_frame_payload_op(fp) == ELS_LS_ACC) { | ||
388 | fnic_handle_flogi_resp(fnic, fp); | ||
389 | return; | ||
390 | } | ||
391 | /* | ||
392 | * Recd. Flogi reject. No point registering | ||
393 | * with fw, but forward to libFC | ||
394 | */ | ||
395 | goto forward; | ||
396 | } | ||
397 | goto drop; | ||
398 | } | ||
399 | if (!eth_hdrs_stripped) | ||
400 | goto drop; | ||
401 | |||
402 | forward: | ||
403 | spin_lock_irqsave(&fnic->fnic_lock, flags); | ||
404 | if (fnic->stop_rx_link_events) { | ||
405 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
406 | goto drop; | ||
407 | } | ||
408 | /* Use fr_flags to indicate whether succ. flogi resp or not */ | ||
409 | fr_flags(fp) = 0; | ||
410 | fr_dev(fp) = fnic->lport; | ||
411 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
412 | |||
413 | skb_queue_tail(&fnic->frame_queue, skb); | ||
414 | queue_work(fnic_event_queue, &fnic->frame_work); | ||
415 | |||
416 | return; | ||
417 | drop: | ||
418 | dev_kfree_skb_irq(skb); | ||
419 | } | ||
420 | |||
421 | static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev, | ||
422 | struct cq_desc *cq_desc, u8 type, | ||
423 | u16 q_number, u16 completed_index, | ||
424 | void *opaque) | ||
425 | { | ||
426 | struct fnic *fnic = vnic_dev_priv(vdev); | ||
427 | |||
428 | vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index, | ||
429 | VNIC_RQ_RETURN_DESC, fnic_rq_cmpl_frame_recv, | ||
430 | NULL); | ||
431 | return 0; | ||
432 | } | ||
433 | |||
434 | int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do) | ||
435 | { | ||
436 | unsigned int tot_rq_work_done = 0, cur_work_done; | ||
437 | unsigned int i; | ||
438 | int err; | ||
439 | |||
440 | for (i = 0; i < fnic->rq_count; i++) { | ||
441 | cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do, | ||
442 | fnic_rq_cmpl_handler_cont, | ||
443 | NULL); | ||
444 | if (cur_work_done) { | ||
445 | err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame); | ||
446 | if (err) | ||
447 | shost_printk(KERN_ERR, fnic->lport->host, | ||
448 | "fnic_alloc_rq_frame cant alloc" | ||
449 | " frame\n"); | ||
450 | } | ||
451 | tot_rq_work_done += cur_work_done; | ||
452 | } | ||
453 | |||
454 | return tot_rq_work_done; | ||
455 | } | ||
456 | |||
457 | /* | ||
458 | * This function is called once at init time to allocate and fill RQ | ||
459 | * buffers. Subsequently, it is called in the interrupt context after RQ | ||
460 | * buffer processing to replenish the buffers in the RQ | ||
461 | */ | ||
462 | int fnic_alloc_rq_frame(struct vnic_rq *rq) | ||
463 | { | ||
464 | struct fnic *fnic = vnic_dev_priv(rq->vdev); | ||
465 | struct sk_buff *skb; | ||
466 | u16 len; | ||
467 | dma_addr_t pa; | ||
468 | |||
469 | len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM; | ||
470 | skb = dev_alloc_skb(len); | ||
471 | if (!skb) { | ||
472 | FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, | ||
473 | "Unable to allocate RQ sk_buff\n"); | ||
474 | return -ENOMEM; | ||
475 | } | ||
476 | skb_reset_mac_header(skb); | ||
477 | skb_reset_transport_header(skb); | ||
478 | skb_reset_network_header(skb); | ||
479 | skb_put(skb, len); | ||
480 | pa = pci_map_single(fnic->pdev, skb->data, len, PCI_DMA_FROMDEVICE); | ||
481 | fnic_queue_rq_desc(rq, skb, pa, len); | ||
482 | return 0; | ||
483 | } | ||
484 | |||
485 | void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) | ||
486 | { | ||
487 | struct fc_frame *fp = buf->os_buf; | ||
488 | struct fnic *fnic = vnic_dev_priv(rq->vdev); | ||
489 | |||
490 | pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len, | ||
491 | PCI_DMA_FROMDEVICE); | ||
492 | |||
493 | dev_kfree_skb(fp_skb(fp)); | ||
494 | buf->os_buf = NULL; | ||
495 | } | ||
496 | |||
497 | static inline int is_flogi_frame(struct fc_frame_header *fh) | ||
498 | { | ||
499 | return fh->fh_r_ctl == FC_RCTL_ELS_REQ && *(u8 *)(fh + 1) == ELS_FLOGI; | ||
500 | } | ||
501 | |||
502 | int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp) | ||
503 | { | ||
504 | struct vnic_wq *wq = &fnic->wq[0]; | ||
505 | struct sk_buff *skb; | ||
506 | dma_addr_t pa; | ||
507 | struct ethhdr *eth_hdr; | ||
508 | struct vlan_ethhdr *vlan_hdr; | ||
509 | struct fcoe_hdr *fcoe_hdr; | ||
510 | struct fc_frame_header *fh; | ||
511 | u32 tot_len, eth_hdr_len; | ||
512 | int ret = 0; | ||
513 | unsigned long flags; | ||
514 | |||
515 | fh = fc_frame_header_get(fp); | ||
516 | skb = fp_skb(fp); | ||
517 | |||
518 | if (!fnic->vlan_hw_insert) { | ||
519 | eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr); | ||
520 | vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, eth_hdr_len); | ||
521 | eth_hdr = (struct ethhdr *)vlan_hdr; | ||
522 | vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q); | ||
523 | vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE); | ||
524 | vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id); | ||
525 | fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1); | ||
526 | } else { | ||
527 | eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr); | ||
528 | eth_hdr = (struct ethhdr *)skb_push(skb, eth_hdr_len); | ||
529 | eth_hdr->h_proto = htons(ETH_P_FCOE); | ||
530 | fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1); | ||
531 | } | ||
532 | |||
533 | if (is_flogi_frame(fh)) { | ||
534 | fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id); | ||
535 | memcpy(eth_hdr->h_source, fnic->mac_addr, ETH_ALEN); | ||
536 | } else { | ||
537 | if (fnic->fcoui_mode) | ||
538 | fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id); | ||
539 | else | ||
540 | memcpy(eth_hdr->h_dest, fnic->dest_addr, ETH_ALEN); | ||
541 | memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN); | ||
542 | } | ||
543 | |||
544 | tot_len = skb->len; | ||
545 | BUG_ON(tot_len % 4); | ||
546 | |||
547 | memset(fcoe_hdr, 0, sizeof(*fcoe_hdr)); | ||
548 | fcoe_hdr->fcoe_sof = fr_sof(fp); | ||
549 | if (FC_FCOE_VER) | ||
550 | FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER); | ||
551 | |||
552 | pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE); | ||
553 | |||
554 | spin_lock_irqsave(&fnic->wq_lock[0], flags); | ||
555 | |||
556 | if (!vnic_wq_desc_avail(wq)) { | ||
557 | pci_unmap_single(fnic->pdev, pa, | ||
558 | tot_len, PCI_DMA_TODEVICE); | ||
559 | ret = -1; | ||
560 | goto fnic_send_frame_end; | ||
561 | } | ||
562 | |||
563 | fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp), | ||
564 | fnic->vlan_hw_insert, fnic->vlan_id, 1, 1, 1); | ||
565 | fnic_send_frame_end: | ||
566 | spin_unlock_irqrestore(&fnic->wq_lock[0], flags); | ||
567 | |||
568 | if (ret) | ||
569 | dev_kfree_skb_any(fp_skb(fp)); | ||
570 | |||
571 | return ret; | ||
572 | } | ||
573 | |||
574 | /* | ||
575 | * fnic_send | ||
576 | * Routine to send a raw frame | ||
577 | */ | ||
578 | int fnic_send(struct fc_lport *lp, struct fc_frame *fp) | ||
579 | { | ||
580 | struct fnic *fnic = lport_priv(lp); | ||
581 | struct fc_frame_header *fh; | ||
582 | int ret = 0; | ||
583 | enum fnic_state old_state; | ||
584 | unsigned long flags; | ||
585 | struct fc_frame *old_flogi = NULL; | ||
586 | struct fc_frame *old_flogi_resp = NULL; | ||
587 | |||
588 | if (fnic->in_remove) { | ||
589 | dev_kfree_skb(fp_skb(fp)); | ||
590 | ret = -1; | ||
591 | goto fnic_send_end; | ||
592 | } | ||
593 | |||
594 | fh = fc_frame_header_get(fp); | ||
595 | /* if not an Flogi frame, send it out, this is the common case */ | ||
596 | if (!is_flogi_frame(fh)) | ||
597 | return fnic_send_frame(fnic, fp); | ||
598 | |||
599 | /* Flogi frame, now enter the state machine */ | ||
600 | |||
601 | spin_lock_irqsave(&fnic->fnic_lock, flags); | ||
602 | again: | ||
603 | /* Get any old cached frames, free them after dropping lock */ | ||
604 | old_flogi = fnic->flogi; | ||
605 | fnic->flogi = NULL; | ||
606 | old_flogi_resp = fnic->flogi_resp; | ||
607 | fnic->flogi_resp = NULL; | ||
608 | |||
609 | fnic->flogi_oxid = FC_XID_UNKNOWN; | ||
610 | |||
611 | old_state = fnic->state; | ||
612 | switch (old_state) { | ||
613 | case FNIC_IN_FC_MODE: | ||
614 | case FNIC_IN_ETH_TRANS_FC_MODE: | ||
615 | default: | ||
616 | fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; | ||
617 | vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr); | ||
618 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
619 | |||
620 | if (old_flogi) { | ||
621 | dev_kfree_skb(fp_skb(old_flogi)); | ||
622 | old_flogi = NULL; | ||
623 | } | ||
624 | if (old_flogi_resp) { | ||
625 | dev_kfree_skb(fp_skb(old_flogi_resp)); | ||
626 | old_flogi_resp = NULL; | ||
627 | } | ||
628 | |||
629 | ret = fnic_fw_reset_handler(fnic); | ||
630 | |||
631 | spin_lock_irqsave(&fnic->fnic_lock, flags); | ||
632 | if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE) | ||
633 | goto again; | ||
634 | if (ret) { | ||
635 | fnic->state = old_state; | ||
636 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
637 | dev_kfree_skb(fp_skb(fp)); | ||
638 | goto fnic_send_end; | ||
639 | } | ||
640 | old_flogi = fnic->flogi; | ||
641 | fnic->flogi = fp; | ||
642 | fnic->flogi_oxid = ntohs(fh->fh_ox_id); | ||
643 | old_flogi_resp = fnic->flogi_resp; | ||
644 | fnic->flogi_resp = NULL; | ||
645 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
646 | break; | ||
647 | |||
648 | case FNIC_IN_FC_TRANS_ETH_MODE: | ||
649 | /* | ||
650 | * A reset is pending with the firmware. Store the flogi | ||
651 | * and its oxid. The transition out of this state happens | ||
652 | * only when Firmware completes the reset, either with | ||
653 | * success or failed. If success, transition to | ||
654 | * FNIC_IN_ETH_MODE, if fail, then transition to | ||
655 | * FNIC_IN_FC_MODE | ||
656 | */ | ||
657 | fnic->flogi = fp; | ||
658 | fnic->flogi_oxid = ntohs(fh->fh_ox_id); | ||
659 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
660 | break; | ||
661 | |||
662 | case FNIC_IN_ETH_MODE: | ||
663 | /* | ||
664 | * The fw/hw is already in eth mode. Store the oxid, | ||
665 | * and send the flogi frame out. The transition out of this | ||
666 | * state happens only we receive flogi response from the | ||
667 | * network, and the oxid matches the cached oxid when the | ||
668 | * flogi frame was sent out. If they match, then we issue | ||
669 | * a flogi_reg request and transition to state | ||
670 | * FNIC_IN_ETH_TRANS_FC_MODE | ||
671 | */ | ||
672 | fnic->flogi_oxid = ntohs(fh->fh_ox_id); | ||
673 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
674 | ret = fnic_send_frame(fnic, fp); | ||
675 | break; | ||
676 | } | ||
677 | |||
678 | fnic_send_end: | ||
679 | if (old_flogi) | ||
680 | dev_kfree_skb(fp_skb(old_flogi)); | ||
681 | if (old_flogi_resp) | ||
682 | dev_kfree_skb(fp_skb(old_flogi_resp)); | ||
683 | return ret; | ||
684 | } | ||
685 | |||
686 | static void fnic_wq_complete_frame_send(struct vnic_wq *wq, | ||
687 | struct cq_desc *cq_desc, | ||
688 | struct vnic_wq_buf *buf, void *opaque) | ||
689 | { | ||
690 | struct sk_buff *skb = buf->os_buf; | ||
691 | struct fc_frame *fp = (struct fc_frame *)skb; | ||
692 | struct fnic *fnic = vnic_dev_priv(wq->vdev); | ||
693 | |||
694 | pci_unmap_single(fnic->pdev, buf->dma_addr, | ||
695 | buf->len, PCI_DMA_TODEVICE); | ||
696 | dev_kfree_skb_irq(fp_skb(fp)); | ||
697 | buf->os_buf = NULL; | ||
698 | } | ||
699 | |||
700 | static int fnic_wq_cmpl_handler_cont(struct vnic_dev *vdev, | ||
701 | struct cq_desc *cq_desc, u8 type, | ||
702 | u16 q_number, u16 completed_index, | ||
703 | void *opaque) | ||
704 | { | ||
705 | struct fnic *fnic = vnic_dev_priv(vdev); | ||
706 | unsigned long flags; | ||
707 | |||
708 | spin_lock_irqsave(&fnic->wq_lock[q_number], flags); | ||
709 | vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index, | ||
710 | fnic_wq_complete_frame_send, NULL); | ||
711 | spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags); | ||
712 | |||
713 | return 0; | ||
714 | } | ||
715 | |||
716 | int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do) | ||
717 | { | ||
718 | unsigned int wq_work_done = 0; | ||
719 | unsigned int i; | ||
720 | |||
721 | for (i = 0; i < fnic->raw_wq_count; i++) { | ||
722 | wq_work_done += vnic_cq_service(&fnic->cq[fnic->rq_count+i], | ||
723 | work_to_do, | ||
724 | fnic_wq_cmpl_handler_cont, | ||
725 | NULL); | ||
726 | } | ||
727 | |||
728 | return wq_work_done; | ||
729 | } | ||
730 | |||
731 | |||
732 | void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) | ||
733 | { | ||
734 | struct fc_frame *fp = buf->os_buf; | ||
735 | struct fnic *fnic = vnic_dev_priv(wq->vdev); | ||
736 | |||
737 | pci_unmap_single(fnic->pdev, buf->dma_addr, | ||
738 | buf->len, PCI_DMA_TODEVICE); | ||
739 | |||
740 | dev_kfree_skb(fp_skb(fp)); | ||
741 | buf->os_buf = NULL; | ||
742 | } | ||
diff --git a/drivers/scsi/fnic/fnic_io.h b/drivers/scsi/fnic/fnic_io.h new file mode 100644 index 000000000000..f0b896988cd5 --- /dev/null +++ b/drivers/scsi/fnic/fnic_io.h | |||
@@ -0,0 +1,67 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | */ | ||
18 | #ifndef _FNIC_IO_H_ | ||
19 | #define _FNIC_IO_H_ | ||
20 | |||
21 | #include <scsi/fc/fc_fcp.h> | ||
22 | |||
23 | #define FNIC_DFLT_SG_DESC_CNT 32 | ||
24 | #define FNIC_MAX_SG_DESC_CNT 1024 /* Maximum descriptors per sgl */ | ||
25 | #define FNIC_SG_DESC_ALIGN 16 /* Descriptor address alignment */ | ||
26 | |||
27 | struct host_sg_desc { | ||
28 | __le64 addr; | ||
29 | __le32 len; | ||
30 | u32 _resvd; | ||
31 | }; | ||
32 | |||
33 | struct fnic_dflt_sgl_list { | ||
34 | struct host_sg_desc sg_desc[FNIC_DFLT_SG_DESC_CNT]; | ||
35 | }; | ||
36 | |||
37 | struct fnic_sgl_list { | ||
38 | struct host_sg_desc sg_desc[FNIC_MAX_SG_DESC_CNT]; | ||
39 | }; | ||
40 | |||
41 | enum fnic_sgl_list_type { | ||
42 | FNIC_SGL_CACHE_DFLT = 0, /* cache with default size sgl */ | ||
43 | FNIC_SGL_CACHE_MAX, /* cache with max size sgl */ | ||
44 | FNIC_SGL_NUM_CACHES /* number of sgl caches */ | ||
45 | }; | ||
46 | |||
47 | enum fnic_ioreq_state { | ||
48 | FNIC_IOREQ_CMD_PENDING = 0, | ||
49 | FNIC_IOREQ_ABTS_PENDING, | ||
50 | FNIC_IOREQ_ABTS_COMPLETE, | ||
51 | FNIC_IOREQ_CMD_COMPLETE, | ||
52 | }; | ||
53 | |||
54 | struct fnic_io_req { | ||
55 | struct host_sg_desc *sgl_list; /* sgl list */ | ||
56 | void *sgl_list_alloc; /* sgl list address used for free */ | ||
57 | dma_addr_t sense_buf_pa; /* dma address for sense buffer*/ | ||
58 | dma_addr_t sgl_list_pa; /* dma address for sgl list */ | ||
59 | u16 sgl_cnt; | ||
60 | u8 sgl_type; /* device DMA descriptor list type */ | ||
61 | u8 io_completed:1; /* set to 1 when fw completes IO */ | ||
62 | u32 port_id; /* remote port DID */ | ||
63 | struct completion *abts_done; /* completion for abts */ | ||
64 | struct completion *dr_done; /* completion for device reset */ | ||
65 | }; | ||
66 | |||
67 | #endif /* _FNIC_IO_H_ */ | ||
diff --git a/drivers/scsi/fnic/fnic_isr.c b/drivers/scsi/fnic/fnic_isr.c new file mode 100644 index 000000000000..2b3064828aea --- /dev/null +++ b/drivers/scsi/fnic/fnic_isr.c | |||
@@ -0,0 +1,332 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | */ | ||
18 | #include <linux/string.h> | ||
19 | #include <linux/errno.h> | ||
20 | #include <linux/pci.h> | ||
21 | #include <linux/interrupt.h> | ||
22 | #include <scsi/libfc.h> | ||
23 | #include <scsi/fc_frame.h> | ||
24 | #include "vnic_dev.h" | ||
25 | #include "vnic_intr.h" | ||
26 | #include "vnic_stats.h" | ||
27 | #include "fnic_io.h" | ||
28 | #include "fnic.h" | ||
29 | |||
30 | static irqreturn_t fnic_isr_legacy(int irq, void *data) | ||
31 | { | ||
32 | struct fnic *fnic = data; | ||
33 | u32 pba; | ||
34 | unsigned long work_done = 0; | ||
35 | |||
36 | pba = vnic_intr_legacy_pba(fnic->legacy_pba); | ||
37 | if (!pba) | ||
38 | return IRQ_NONE; | ||
39 | |||
40 | if (pba & (1 << FNIC_INTX_NOTIFY)) { | ||
41 | vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_NOTIFY]); | ||
42 | fnic_handle_link_event(fnic); | ||
43 | } | ||
44 | |||
45 | if (pba & (1 << FNIC_INTX_ERR)) { | ||
46 | vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_ERR]); | ||
47 | fnic_log_q_error(fnic); | ||
48 | } | ||
49 | |||
50 | if (pba & (1 << FNIC_INTX_WQ_RQ_COPYWQ)) { | ||
51 | work_done += fnic_wq_copy_cmpl_handler(fnic, 8); | ||
52 | work_done += fnic_wq_cmpl_handler(fnic, 4); | ||
53 | work_done += fnic_rq_cmpl_handler(fnic, 4); | ||
54 | |||
55 | vnic_intr_return_credits(&fnic->intr[FNIC_INTX_WQ_RQ_COPYWQ], | ||
56 | work_done, | ||
57 | 1 /* unmask intr */, | ||
58 | 1 /* reset intr timer */); | ||
59 | } | ||
60 | |||
61 | return IRQ_HANDLED; | ||
62 | } | ||
63 | |||
64 | static irqreturn_t fnic_isr_msi(int irq, void *data) | ||
65 | { | ||
66 | struct fnic *fnic = data; | ||
67 | unsigned long work_done = 0; | ||
68 | |||
69 | work_done += fnic_wq_copy_cmpl_handler(fnic, 8); | ||
70 | work_done += fnic_wq_cmpl_handler(fnic, 4); | ||
71 | work_done += fnic_rq_cmpl_handler(fnic, 4); | ||
72 | |||
73 | vnic_intr_return_credits(&fnic->intr[0], | ||
74 | work_done, | ||
75 | 1 /* unmask intr */, | ||
76 | 1 /* reset intr timer */); | ||
77 | |||
78 | return IRQ_HANDLED; | ||
79 | } | ||
80 | |||
81 | static irqreturn_t fnic_isr_msix_rq(int irq, void *data) | ||
82 | { | ||
83 | struct fnic *fnic = data; | ||
84 | unsigned long rq_work_done = 0; | ||
85 | |||
86 | rq_work_done = fnic_rq_cmpl_handler(fnic, 4); | ||
87 | vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_RQ], | ||
88 | rq_work_done, | ||
89 | 1 /* unmask intr */, | ||
90 | 1 /* reset intr timer */); | ||
91 | |||
92 | return IRQ_HANDLED; | ||
93 | } | ||
94 | |||
95 | static irqreturn_t fnic_isr_msix_wq(int irq, void *data) | ||
96 | { | ||
97 | struct fnic *fnic = data; | ||
98 | unsigned long wq_work_done = 0; | ||
99 | |||
100 | wq_work_done = fnic_wq_cmpl_handler(fnic, 4); | ||
101 | vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ], | ||
102 | wq_work_done, | ||
103 | 1 /* unmask intr */, | ||
104 | 1 /* reset intr timer */); | ||
105 | return IRQ_HANDLED; | ||
106 | } | ||
107 | |||
108 | static irqreturn_t fnic_isr_msix_wq_copy(int irq, void *data) | ||
109 | { | ||
110 | struct fnic *fnic = data; | ||
111 | unsigned long wq_copy_work_done = 0; | ||
112 | |||
113 | wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, 8); | ||
114 | vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ_COPY], | ||
115 | wq_copy_work_done, | ||
116 | 1 /* unmask intr */, | ||
117 | 1 /* reset intr timer */); | ||
118 | return IRQ_HANDLED; | ||
119 | } | ||
120 | |||
121 | static irqreturn_t fnic_isr_msix_err_notify(int irq, void *data) | ||
122 | { | ||
123 | struct fnic *fnic = data; | ||
124 | |||
125 | vnic_intr_return_all_credits(&fnic->intr[FNIC_MSIX_ERR_NOTIFY]); | ||
126 | fnic_log_q_error(fnic); | ||
127 | fnic_handle_link_event(fnic); | ||
128 | |||
129 | return IRQ_HANDLED; | ||
130 | } | ||
131 | |||
132 | void fnic_free_intr(struct fnic *fnic) | ||
133 | { | ||
134 | int i; | ||
135 | |||
136 | switch (vnic_dev_get_intr_mode(fnic->vdev)) { | ||
137 | case VNIC_DEV_INTR_MODE_INTX: | ||
138 | case VNIC_DEV_INTR_MODE_MSI: | ||
139 | free_irq(fnic->pdev->irq, fnic); | ||
140 | break; | ||
141 | |||
142 | case VNIC_DEV_INTR_MODE_MSIX: | ||
143 | for (i = 0; i < ARRAY_SIZE(fnic->msix); i++) | ||
144 | if (fnic->msix[i].requested) | ||
145 | free_irq(fnic->msix_entry[i].vector, | ||
146 | fnic->msix[i].devid); | ||
147 | break; | ||
148 | |||
149 | default: | ||
150 | break; | ||
151 | } | ||
152 | } | ||
153 | |||
154 | int fnic_request_intr(struct fnic *fnic) | ||
155 | { | ||
156 | int err = 0; | ||
157 | int i; | ||
158 | |||
159 | switch (vnic_dev_get_intr_mode(fnic->vdev)) { | ||
160 | |||
161 | case VNIC_DEV_INTR_MODE_INTX: | ||
162 | err = request_irq(fnic->pdev->irq, &fnic_isr_legacy, | ||
163 | IRQF_SHARED, DRV_NAME, fnic); | ||
164 | break; | ||
165 | |||
166 | case VNIC_DEV_INTR_MODE_MSI: | ||
167 | err = request_irq(fnic->pdev->irq, &fnic_isr_msi, | ||
168 | 0, fnic->name, fnic); | ||
169 | break; | ||
170 | |||
171 | case VNIC_DEV_INTR_MODE_MSIX: | ||
172 | |||
173 | sprintf(fnic->msix[FNIC_MSIX_RQ].devname, | ||
174 | "%.11s-fcs-rq", fnic->name); | ||
175 | fnic->msix[FNIC_MSIX_RQ].isr = fnic_isr_msix_rq; | ||
176 | fnic->msix[FNIC_MSIX_RQ].devid = fnic; | ||
177 | |||
178 | sprintf(fnic->msix[FNIC_MSIX_WQ].devname, | ||
179 | "%.11s-fcs-wq", fnic->name); | ||
180 | fnic->msix[FNIC_MSIX_WQ].isr = fnic_isr_msix_wq; | ||
181 | fnic->msix[FNIC_MSIX_WQ].devid = fnic; | ||
182 | |||
183 | sprintf(fnic->msix[FNIC_MSIX_WQ_COPY].devname, | ||
184 | "%.11s-scsi-wq", fnic->name); | ||
185 | fnic->msix[FNIC_MSIX_WQ_COPY].isr = fnic_isr_msix_wq_copy; | ||
186 | fnic->msix[FNIC_MSIX_WQ_COPY].devid = fnic; | ||
187 | |||
188 | sprintf(fnic->msix[FNIC_MSIX_ERR_NOTIFY].devname, | ||
189 | "%.11s-err-notify", fnic->name); | ||
190 | fnic->msix[FNIC_MSIX_ERR_NOTIFY].isr = | ||
191 | fnic_isr_msix_err_notify; | ||
192 | fnic->msix[FNIC_MSIX_ERR_NOTIFY].devid = fnic; | ||
193 | |||
194 | for (i = 0; i < ARRAY_SIZE(fnic->msix); i++) { | ||
195 | err = request_irq(fnic->msix_entry[i].vector, | ||
196 | fnic->msix[i].isr, 0, | ||
197 | fnic->msix[i].devname, | ||
198 | fnic->msix[i].devid); | ||
199 | if (err) { | ||
200 | shost_printk(KERN_ERR, fnic->lport->host, | ||
201 | "MSIX: request_irq" | ||
202 | " failed %d\n", err); | ||
203 | fnic_free_intr(fnic); | ||
204 | break; | ||
205 | } | ||
206 | fnic->msix[i].requested = 1; | ||
207 | } | ||
208 | break; | ||
209 | |||
210 | default: | ||
211 | break; | ||
212 | } | ||
213 | |||
214 | return err; | ||
215 | } | ||
216 | |||
217 | int fnic_set_intr_mode(struct fnic *fnic) | ||
218 | { | ||
219 | unsigned int n = ARRAY_SIZE(fnic->rq); | ||
220 | unsigned int m = ARRAY_SIZE(fnic->wq); | ||
221 | unsigned int o = ARRAY_SIZE(fnic->wq_copy); | ||
222 | unsigned int i; | ||
223 | |||
224 | /* | ||
225 | * Set interrupt mode (INTx, MSI, MSI-X) depending | ||
226 | * system capabilities. | ||
227 | * | ||
228 | * Try MSI-X first | ||
229 | * | ||
230 | * We need n RQs, m WQs, o Copy WQs, n+m+o CQs, and n+m+o+1 INTRs | ||
231 | * (last INTR is used for WQ/RQ errors and notification area) | ||
232 | */ | ||
233 | |||
234 | BUG_ON(ARRAY_SIZE(fnic->msix_entry) < n + m + o + 1); | ||
235 | for (i = 0; i < n + m + o + 1; i++) | ||
236 | fnic->msix_entry[i].entry = i; | ||
237 | |||
238 | if (fnic->rq_count >= n && | ||
239 | fnic->raw_wq_count >= m && | ||
240 | fnic->wq_copy_count >= o && | ||
241 | fnic->cq_count >= n + m + o) { | ||
242 | if (!pci_enable_msix(fnic->pdev, fnic->msix_entry, | ||
243 | n + m + o + 1)) { | ||
244 | fnic->rq_count = n; | ||
245 | fnic->raw_wq_count = m; | ||
246 | fnic->wq_copy_count = o; | ||
247 | fnic->wq_count = m + o; | ||
248 | fnic->cq_count = n + m + o; | ||
249 | fnic->intr_count = n + m + o + 1; | ||
250 | fnic->err_intr_offset = FNIC_MSIX_ERR_NOTIFY; | ||
251 | |||
252 | FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, | ||
253 | "Using MSI-X Interrupts\n"); | ||
254 | vnic_dev_set_intr_mode(fnic->vdev, | ||
255 | VNIC_DEV_INTR_MODE_MSIX); | ||
256 | return 0; | ||
257 | } | ||
258 | } | ||
259 | |||
260 | /* | ||
261 | * Next try MSI | ||
262 | * We need 1 RQ, 1 WQ, 1 WQ_COPY, 3 CQs, and 1 INTR | ||
263 | */ | ||
264 | if (fnic->rq_count >= 1 && | ||
265 | fnic->raw_wq_count >= 1 && | ||
266 | fnic->wq_copy_count >= 1 && | ||
267 | fnic->cq_count >= 3 && | ||
268 | fnic->intr_count >= 1 && | ||
269 | !pci_enable_msi(fnic->pdev)) { | ||
270 | |||
271 | fnic->rq_count = 1; | ||
272 | fnic->raw_wq_count = 1; | ||
273 | fnic->wq_copy_count = 1; | ||
274 | fnic->wq_count = 2; | ||
275 | fnic->cq_count = 3; | ||
276 | fnic->intr_count = 1; | ||
277 | fnic->err_intr_offset = 0; | ||
278 | |||
279 | FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, | ||
280 | "Using MSI Interrupts\n"); | ||
281 | vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_MSI); | ||
282 | |||
283 | return 0; | ||
284 | } | ||
285 | |||
286 | /* | ||
287 | * Next try INTx | ||
288 | * We need 1 RQ, 1 WQ, 1 WQ_COPY, 3 CQs, and 3 INTRs | ||
289 | * 1 INTR is used for all 3 queues, 1 INTR for queue errors | ||
290 | * 1 INTR for notification area | ||
291 | */ | ||
292 | |||
293 | if (fnic->rq_count >= 1 && | ||
294 | fnic->raw_wq_count >= 1 && | ||
295 | fnic->wq_copy_count >= 1 && | ||
296 | fnic->cq_count >= 3 && | ||
297 | fnic->intr_count >= 3) { | ||
298 | |||
299 | fnic->rq_count = 1; | ||
300 | fnic->raw_wq_count = 1; | ||
301 | fnic->wq_copy_count = 1; | ||
302 | fnic->cq_count = 3; | ||
303 | fnic->intr_count = 3; | ||
304 | |||
305 | FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, | ||
306 | "Using Legacy Interrupts\n"); | ||
307 | vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX); | ||
308 | |||
309 | return 0; | ||
310 | } | ||
311 | |||
312 | vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); | ||
313 | |||
314 | return -EINVAL; | ||
315 | } | ||
316 | |||
317 | void fnic_clear_intr_mode(struct fnic *fnic) | ||
318 | { | ||
319 | switch (vnic_dev_get_intr_mode(fnic->vdev)) { | ||
320 | case VNIC_DEV_INTR_MODE_MSIX: | ||
321 | pci_disable_msix(fnic->pdev); | ||
322 | break; | ||
323 | case VNIC_DEV_INTR_MODE_MSI: | ||
324 | pci_disable_msi(fnic->pdev); | ||
325 | break; | ||
326 | default: | ||
327 | break; | ||
328 | } | ||
329 | |||
330 | vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX); | ||
331 | } | ||
332 | |||
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c new file mode 100644 index 000000000000..32ef6b87d895 --- /dev/null +++ b/drivers/scsi/fnic/fnic_main.c | |||
@@ -0,0 +1,942 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | */ | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/mempool.h> | ||
20 | #include <linux/string.h> | ||
21 | #include <linux/errno.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/pci.h> | ||
24 | #include <linux/skbuff.h> | ||
25 | #include <linux/interrupt.h> | ||
26 | #include <linux/spinlock.h> | ||
27 | #include <linux/workqueue.h> | ||
28 | #include <scsi/scsi_host.h> | ||
29 | #include <scsi/scsi_transport.h> | ||
30 | #include <scsi/scsi_transport_fc.h> | ||
31 | #include <scsi/scsi_tcq.h> | ||
32 | #include <scsi/libfc.h> | ||
33 | #include <scsi/fc_frame.h> | ||
34 | |||
35 | #include "vnic_dev.h" | ||
36 | #include "vnic_intr.h" | ||
37 | #include "vnic_stats.h" | ||
38 | #include "fnic_io.h" | ||
39 | #include "fnic.h" | ||
40 | |||
41 | #define PCI_DEVICE_ID_CISCO_FNIC 0x0045 | ||
42 | |||
43 | /* Timer to poll notification area for events. Used for MSI interrupts */ | ||
44 | #define FNIC_NOTIFY_TIMER_PERIOD (2 * HZ) | ||
45 | |||
46 | static struct kmem_cache *fnic_sgl_cache[FNIC_SGL_NUM_CACHES]; | ||
47 | static struct kmem_cache *fnic_io_req_cache; | ||
48 | LIST_HEAD(fnic_list); | ||
49 | DEFINE_SPINLOCK(fnic_list_lock); | ||
50 | |||
51 | /* Supported devices by fnic module */ | ||
52 | static struct pci_device_id fnic_id_table[] = { | ||
53 | { PCI_DEVICE(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_FNIC) }, | ||
54 | { 0, } | ||
55 | }; | ||
56 | |||
57 | MODULE_DESCRIPTION(DRV_DESCRIPTION); | ||
58 | MODULE_AUTHOR("Abhijeet Joglekar <abjoglek@cisco.com>, " | ||
59 | "Joseph R. Eykholt <jeykholt@cisco.com>"); | ||
60 | MODULE_LICENSE("GPL v2"); | ||
61 | MODULE_VERSION(DRV_VERSION); | ||
62 | MODULE_DEVICE_TABLE(pci, fnic_id_table); | ||
63 | |||
64 | unsigned int fnic_log_level; | ||
65 | module_param(fnic_log_level, int, S_IRUGO|S_IWUSR); | ||
66 | MODULE_PARM_DESC(fnic_log_level, "bit mask of fnic logging levels"); | ||
67 | |||
68 | |||
69 | static struct libfc_function_template fnic_transport_template = { | ||
70 | .frame_send = fnic_send, | ||
71 | .fcp_abort_io = fnic_empty_scsi_cleanup, | ||
72 | .fcp_cleanup = fnic_empty_scsi_cleanup, | ||
73 | .exch_mgr_reset = fnic_exch_mgr_reset | ||
74 | }; | ||
75 | |||
76 | static int fnic_slave_alloc(struct scsi_device *sdev) | ||
77 | { | ||
78 | struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); | ||
79 | struct fc_lport *lp = shost_priv(sdev->host); | ||
80 | struct fnic *fnic = lport_priv(lp); | ||
81 | |||
82 | sdev->tagged_supported = 1; | ||
83 | |||
84 | if (!rport || fc_remote_port_chkready(rport)) | ||
85 | return -ENXIO; | ||
86 | |||
87 | scsi_activate_tcq(sdev, FNIC_DFLT_QUEUE_DEPTH); | ||
88 | rport->dev_loss_tmo = fnic->config.port_down_timeout / 1000; | ||
89 | |||
90 | return 0; | ||
91 | } | ||
92 | |||
93 | static struct scsi_host_template fnic_host_template = { | ||
94 | .module = THIS_MODULE, | ||
95 | .name = DRV_NAME, | ||
96 | .queuecommand = fnic_queuecommand, | ||
97 | .eh_abort_handler = fnic_abort_cmd, | ||
98 | .eh_device_reset_handler = fnic_device_reset, | ||
99 | .eh_host_reset_handler = fnic_host_reset, | ||
100 | .slave_alloc = fnic_slave_alloc, | ||
101 | .change_queue_depth = fc_change_queue_depth, | ||
102 | .change_queue_type = fc_change_queue_type, | ||
103 | .this_id = -1, | ||
104 | .cmd_per_lun = 3, | ||
105 | .can_queue = FNIC_MAX_IO_REQ, | ||
106 | .use_clustering = ENABLE_CLUSTERING, | ||
107 | .sg_tablesize = FNIC_MAX_SG_DESC_CNT, | ||
108 | .max_sectors = 0xffff, | ||
109 | .shost_attrs = fnic_attrs, | ||
110 | }; | ||
111 | |||
112 | static void fnic_get_host_speed(struct Scsi_Host *shost); | ||
113 | static struct scsi_transport_template *fnic_fc_transport; | ||
114 | static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *); | ||
115 | |||
116 | static struct fc_function_template fnic_fc_functions = { | ||
117 | |||
118 | .show_host_node_name = 1, | ||
119 | .show_host_port_name = 1, | ||
120 | .show_host_supported_classes = 1, | ||
121 | .show_host_supported_fc4s = 1, | ||
122 | .show_host_active_fc4s = 1, | ||
123 | .show_host_maxframe_size = 1, | ||
124 | .show_host_port_id = 1, | ||
125 | .show_host_supported_speeds = 1, | ||
126 | .get_host_speed = fnic_get_host_speed, | ||
127 | .show_host_speed = 1, | ||
128 | .show_host_port_type = 1, | ||
129 | .get_host_port_state = fc_get_host_port_state, | ||
130 | .show_host_port_state = 1, | ||
131 | .show_host_symbolic_name = 1, | ||
132 | .show_rport_maxframe_size = 1, | ||
133 | .show_rport_supported_classes = 1, | ||
134 | .show_host_fabric_name = 1, | ||
135 | .show_starget_node_name = 1, | ||
136 | .show_starget_port_name = 1, | ||
137 | .show_starget_port_id = 1, | ||
138 | .show_rport_dev_loss_tmo = 1, | ||
139 | .issue_fc_host_lip = fnic_reset, | ||
140 | .get_fc_host_stats = fnic_get_stats, | ||
141 | .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv), | ||
142 | .terminate_rport_io = fnic_terminate_rport_io, | ||
143 | }; | ||
144 | |||
145 | static void fnic_get_host_speed(struct Scsi_Host *shost) | ||
146 | { | ||
147 | struct fc_lport *lp = shost_priv(shost); | ||
148 | struct fnic *fnic = lport_priv(lp); | ||
149 | u32 port_speed = vnic_dev_port_speed(fnic->vdev); | ||
150 | |||
151 | /* Add in other values as they get defined in fw */ | ||
152 | switch (port_speed) { | ||
153 | case 10000: | ||
154 | fc_host_speed(shost) = FC_PORTSPEED_10GBIT; | ||
155 | break; | ||
156 | default: | ||
157 | fc_host_speed(shost) = FC_PORTSPEED_10GBIT; | ||
158 | break; | ||
159 | } | ||
160 | } | ||
161 | |||
162 | static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *host) | ||
163 | { | ||
164 | int ret; | ||
165 | struct fc_lport *lp = shost_priv(host); | ||
166 | struct fnic *fnic = lport_priv(lp); | ||
167 | struct fc_host_statistics *stats = &lp->host_stats; | ||
168 | struct vnic_stats *vs; | ||
169 | unsigned long flags; | ||
170 | |||
171 | if (time_before(jiffies, fnic->stats_time + HZ / FNIC_STATS_RATE_LIMIT)) | ||
172 | return stats; | ||
173 | fnic->stats_time = jiffies; | ||
174 | |||
175 | spin_lock_irqsave(&fnic->fnic_lock, flags); | ||
176 | ret = vnic_dev_stats_dump(fnic->vdev, &fnic->stats); | ||
177 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
178 | |||
179 | if (ret) { | ||
180 | FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host, | ||
181 | "fnic: Get vnic stats failed" | ||
182 | " 0x%x", ret); | ||
183 | return stats; | ||
184 | } | ||
185 | vs = fnic->stats; | ||
186 | stats->tx_frames = vs->tx.tx_unicast_frames_ok; | ||
187 | stats->tx_words = vs->tx.tx_unicast_bytes_ok / 4; | ||
188 | stats->rx_frames = vs->rx.rx_unicast_frames_ok; | ||
189 | stats->rx_words = vs->rx.rx_unicast_bytes_ok / 4; | ||
190 | stats->error_frames = vs->tx.tx_errors + vs->rx.rx_errors; | ||
191 | stats->dumped_frames = vs->tx.tx_drops + vs->rx.rx_drop; | ||
192 | stats->invalid_crc_count = vs->rx.rx_crc_errors; | ||
193 | stats->seconds_since_last_reset = (jiffies - lp->boot_time) / HZ; | ||
194 | stats->fcp_input_megabytes = div_u64(fnic->fcp_input_bytes, 1000000); | ||
195 | stats->fcp_output_megabytes = div_u64(fnic->fcp_output_bytes, 1000000); | ||
196 | |||
197 | return stats; | ||
198 | } | ||
199 | |||
200 | void fnic_log_q_error(struct fnic *fnic) | ||
201 | { | ||
202 | unsigned int i; | ||
203 | u32 error_status; | ||
204 | |||
205 | for (i = 0; i < fnic->raw_wq_count; i++) { | ||
206 | error_status = ioread32(&fnic->wq[i].ctrl->error_status); | ||
207 | if (error_status) | ||
208 | shost_printk(KERN_ERR, fnic->lport->host, | ||
209 | "WQ[%d] error_status" | ||
210 | " %d\n", i, error_status); | ||
211 | } | ||
212 | |||
213 | for (i = 0; i < fnic->rq_count; i++) { | ||
214 | error_status = ioread32(&fnic->rq[i].ctrl->error_status); | ||
215 | if (error_status) | ||
216 | shost_printk(KERN_ERR, fnic->lport->host, | ||
217 | "RQ[%d] error_status" | ||
218 | " %d\n", i, error_status); | ||
219 | } | ||
220 | |||
221 | for (i = 0; i < fnic->wq_copy_count; i++) { | ||
222 | error_status = ioread32(&fnic->wq_copy[i].ctrl->error_status); | ||
223 | if (error_status) | ||
224 | shost_printk(KERN_ERR, fnic->lport->host, | ||
225 | "CWQ[%d] error_status" | ||
226 | " %d\n", i, error_status); | ||
227 | } | ||
228 | } | ||
229 | |||
230 | void fnic_handle_link_event(struct fnic *fnic) | ||
231 | { | ||
232 | unsigned long flags; | ||
233 | |||
234 | spin_lock_irqsave(&fnic->fnic_lock, flags); | ||
235 | if (fnic->stop_rx_link_events) { | ||
236 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
237 | return; | ||
238 | } | ||
239 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
240 | |||
241 | queue_work(fnic_event_queue, &fnic->link_work); | ||
242 | |||
243 | } | ||
244 | |||
245 | static int fnic_notify_set(struct fnic *fnic) | ||
246 | { | ||
247 | int err; | ||
248 | |||
249 | switch (vnic_dev_get_intr_mode(fnic->vdev)) { | ||
250 | case VNIC_DEV_INTR_MODE_INTX: | ||
251 | err = vnic_dev_notify_set(fnic->vdev, FNIC_INTX_NOTIFY); | ||
252 | break; | ||
253 | case VNIC_DEV_INTR_MODE_MSI: | ||
254 | err = vnic_dev_notify_set(fnic->vdev, -1); | ||
255 | break; | ||
256 | case VNIC_DEV_INTR_MODE_MSIX: | ||
257 | err = vnic_dev_notify_set(fnic->vdev, FNIC_MSIX_ERR_NOTIFY); | ||
258 | break; | ||
259 | default: | ||
260 | shost_printk(KERN_ERR, fnic->lport->host, | ||
261 | "Interrupt mode should be set up" | ||
262 | " before devcmd notify set %d\n", | ||
263 | vnic_dev_get_intr_mode(fnic->vdev)); | ||
264 | err = -1; | ||
265 | break; | ||
266 | } | ||
267 | |||
268 | return err; | ||
269 | } | ||
270 | |||
271 | static void fnic_notify_timer(unsigned long data) | ||
272 | { | ||
273 | struct fnic *fnic = (struct fnic *)data; | ||
274 | |||
275 | fnic_handle_link_event(fnic); | ||
276 | mod_timer(&fnic->notify_timer, | ||
277 | round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD)); | ||
278 | } | ||
279 | |||
280 | static void fnic_notify_timer_start(struct fnic *fnic) | ||
281 | { | ||
282 | switch (vnic_dev_get_intr_mode(fnic->vdev)) { | ||
283 | case VNIC_DEV_INTR_MODE_MSI: | ||
284 | /* | ||
285 | * Schedule first timeout immediately. The driver is | ||
286 | * initiatialized and ready to look for link up notification | ||
287 | */ | ||
288 | mod_timer(&fnic->notify_timer, jiffies); | ||
289 | break; | ||
290 | default: | ||
291 | /* Using intr for notification for INTx/MSI-X */ | ||
292 | break; | ||
293 | }; | ||
294 | } | ||
295 | |||
296 | static int fnic_dev_wait(struct vnic_dev *vdev, | ||
297 | int (*start)(struct vnic_dev *, int), | ||
298 | int (*finished)(struct vnic_dev *, int *), | ||
299 | int arg) | ||
300 | { | ||
301 | unsigned long time; | ||
302 | int done; | ||
303 | int err; | ||
304 | |||
305 | err = start(vdev, arg); | ||
306 | if (err) | ||
307 | return err; | ||
308 | |||
309 | /* Wait for func to complete...2 seconds max */ | ||
310 | time = jiffies + (HZ * 2); | ||
311 | do { | ||
312 | err = finished(vdev, &done); | ||
313 | if (err) | ||
314 | return err; | ||
315 | if (done) | ||
316 | return 0; | ||
317 | schedule_timeout_uninterruptible(HZ / 10); | ||
318 | } while (time_after(time, jiffies)); | ||
319 | |||
320 | return -ETIMEDOUT; | ||
321 | } | ||
322 | |||
323 | static int fnic_cleanup(struct fnic *fnic) | ||
324 | { | ||
325 | unsigned int i; | ||
326 | int err; | ||
327 | unsigned long flags; | ||
328 | struct fc_frame *flogi = NULL; | ||
329 | struct fc_frame *flogi_resp = NULL; | ||
330 | |||
331 | vnic_dev_disable(fnic->vdev); | ||
332 | for (i = 0; i < fnic->intr_count; i++) | ||
333 | vnic_intr_mask(&fnic->intr[i]); | ||
334 | |||
335 | for (i = 0; i < fnic->rq_count; i++) { | ||
336 | err = vnic_rq_disable(&fnic->rq[i]); | ||
337 | if (err) | ||
338 | return err; | ||
339 | } | ||
340 | for (i = 0; i < fnic->raw_wq_count; i++) { | ||
341 | err = vnic_wq_disable(&fnic->wq[i]); | ||
342 | if (err) | ||
343 | return err; | ||
344 | } | ||
345 | for (i = 0; i < fnic->wq_copy_count; i++) { | ||
346 | err = vnic_wq_copy_disable(&fnic->wq_copy[i]); | ||
347 | if (err) | ||
348 | return err; | ||
349 | } | ||
350 | |||
351 | /* Clean up completed IOs and FCS frames */ | ||
352 | fnic_wq_copy_cmpl_handler(fnic, -1); | ||
353 | fnic_wq_cmpl_handler(fnic, -1); | ||
354 | fnic_rq_cmpl_handler(fnic, -1); | ||
355 | |||
356 | /* Clean up the IOs and FCS frames that have not completed */ | ||
357 | for (i = 0; i < fnic->raw_wq_count; i++) | ||
358 | vnic_wq_clean(&fnic->wq[i], fnic_free_wq_buf); | ||
359 | for (i = 0; i < fnic->rq_count; i++) | ||
360 | vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf); | ||
361 | for (i = 0; i < fnic->wq_copy_count; i++) | ||
362 | vnic_wq_copy_clean(&fnic->wq_copy[i], | ||
363 | fnic_wq_copy_cleanup_handler); | ||
364 | |||
365 | for (i = 0; i < fnic->cq_count; i++) | ||
366 | vnic_cq_clean(&fnic->cq[i]); | ||
367 | for (i = 0; i < fnic->intr_count; i++) | ||
368 | vnic_intr_clean(&fnic->intr[i]); | ||
369 | |||
370 | /* | ||
371 | * Remove cached flogi and flogi resp frames if any | ||
372 | * These frames are not in any queue, and therefore queue | ||
373 | * cleanup does not clean them. So clean them explicitly | ||
374 | */ | ||
375 | spin_lock_irqsave(&fnic->fnic_lock, flags); | ||
376 | flogi = fnic->flogi; | ||
377 | fnic->flogi = NULL; | ||
378 | flogi_resp = fnic->flogi_resp; | ||
379 | fnic->flogi_resp = NULL; | ||
380 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
381 | |||
382 | if (flogi) | ||
383 | dev_kfree_skb(fp_skb(flogi)); | ||
384 | |||
385 | if (flogi_resp) | ||
386 | dev_kfree_skb(fp_skb(flogi_resp)); | ||
387 | |||
388 | mempool_destroy(fnic->io_req_pool); | ||
389 | for (i = 0; i < FNIC_SGL_NUM_CACHES; i++) | ||
390 | mempool_destroy(fnic->io_sgl_pool[i]); | ||
391 | |||
392 | return 0; | ||
393 | } | ||
394 | |||
395 | static void fnic_iounmap(struct fnic *fnic) | ||
396 | { | ||
397 | if (fnic->bar0.vaddr) | ||
398 | iounmap(fnic->bar0.vaddr); | ||
399 | } | ||
400 | |||
401 | /* | ||
402 | * Allocate element for mempools requiring GFP_DMA flag. | ||
403 | * Otherwise, checks in kmem_flagcheck() hit BUG_ON(). | ||
404 | */ | ||
405 | static void *fnic_alloc_slab_dma(gfp_t gfp_mask, void *pool_data) | ||
406 | { | ||
407 | struct kmem_cache *mem = pool_data; | ||
408 | |||
409 | return kmem_cache_alloc(mem, gfp_mask | GFP_ATOMIC | GFP_DMA); | ||
410 | } | ||
411 | |||
412 | static int __devinit fnic_probe(struct pci_dev *pdev, | ||
413 | const struct pci_device_id *ent) | ||
414 | { | ||
415 | struct Scsi_Host *host; | ||
416 | struct fc_lport *lp; | ||
417 | struct fnic *fnic; | ||
418 | mempool_t *pool; | ||
419 | int err; | ||
420 | int i; | ||
421 | unsigned long flags; | ||
422 | |||
423 | /* | ||
424 | * Allocate SCSI Host and set up association between host, | ||
425 | * local port, and fnic | ||
426 | */ | ||
427 | host = scsi_host_alloc(&fnic_host_template, | ||
428 | sizeof(struct fc_lport) + sizeof(struct fnic)); | ||
429 | if (!host) { | ||
430 | printk(KERN_ERR PFX "Unable to alloc SCSI host\n"); | ||
431 | err = -ENOMEM; | ||
432 | goto err_out; | ||
433 | } | ||
434 | lp = shost_priv(host); | ||
435 | lp->host = host; | ||
436 | fnic = lport_priv(lp); | ||
437 | fnic->lport = lp; | ||
438 | |||
439 | snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME, | ||
440 | host->host_no); | ||
441 | |||
442 | host->transportt = fnic_fc_transport; | ||
443 | |||
444 | err = scsi_init_shared_tag_map(host, FNIC_MAX_IO_REQ); | ||
445 | if (err) { | ||
446 | shost_printk(KERN_ERR, fnic->lport->host, | ||
447 | "Unable to alloc shared tag map\n"); | ||
448 | goto err_out_free_hba; | ||
449 | } | ||
450 | |||
451 | /* Setup PCI resources */ | ||
452 | pci_set_drvdata(pdev, fnic); | ||
453 | |||
454 | fnic->pdev = pdev; | ||
455 | |||
456 | err = pci_enable_device(pdev); | ||
457 | if (err) { | ||
458 | shost_printk(KERN_ERR, fnic->lport->host, | ||
459 | "Cannot enable PCI device, aborting.\n"); | ||
460 | goto err_out_free_hba; | ||
461 | } | ||
462 | |||
463 | err = pci_request_regions(pdev, DRV_NAME); | ||
464 | if (err) { | ||
465 | shost_printk(KERN_ERR, fnic->lport->host, | ||
466 | "Cannot enable PCI resources, aborting\n"); | ||
467 | goto err_out_disable_device; | ||
468 | } | ||
469 | |||
470 | pci_set_master(pdev); | ||
471 | |||
472 | /* Query PCI controller on system for DMA addressing | ||
473 | * limitation for the device. Try 40-bit first, and | ||
474 | * fail to 32-bit. | ||
475 | */ | ||
476 | err = pci_set_dma_mask(pdev, DMA_40BIT_MASK); | ||
477 | if (err) { | ||
478 | err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | ||
479 | if (err) { | ||
480 | shost_printk(KERN_ERR, fnic->lport->host, | ||
481 | "No usable DMA configuration " | ||
482 | "aborting\n"); | ||
483 | goto err_out_release_regions; | ||
484 | } | ||
485 | err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); | ||
486 | if (err) { | ||
487 | shost_printk(KERN_ERR, fnic->lport->host, | ||
488 | "Unable to obtain 32-bit DMA " | ||
489 | "for consistent allocations, aborting.\n"); | ||
490 | goto err_out_release_regions; | ||
491 | } | ||
492 | } else { | ||
493 | err = pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK); | ||
494 | if (err) { | ||
495 | shost_printk(KERN_ERR, fnic->lport->host, | ||
496 | "Unable to obtain 40-bit DMA " | ||
497 | "for consistent allocations, aborting.\n"); | ||
498 | goto err_out_release_regions; | ||
499 | } | ||
500 | } | ||
501 | |||
502 | /* Map vNIC resources from BAR0 */ | ||
503 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { | ||
504 | shost_printk(KERN_ERR, fnic->lport->host, | ||
505 | "BAR0 not memory-map'able, aborting.\n"); | ||
506 | err = -ENODEV; | ||
507 | goto err_out_release_regions; | ||
508 | } | ||
509 | |||
510 | fnic->bar0.vaddr = pci_iomap(pdev, 0, 0); | ||
511 | fnic->bar0.bus_addr = pci_resource_start(pdev, 0); | ||
512 | fnic->bar0.len = pci_resource_len(pdev, 0); | ||
513 | |||
514 | if (!fnic->bar0.vaddr) { | ||
515 | shost_printk(KERN_ERR, fnic->lport->host, | ||
516 | "Cannot memory-map BAR0 res hdr, " | ||
517 | "aborting.\n"); | ||
518 | err = -ENODEV; | ||
519 | goto err_out_release_regions; | ||
520 | } | ||
521 | |||
522 | fnic->vdev = vnic_dev_register(NULL, fnic, pdev, &fnic->bar0); | ||
523 | if (!fnic->vdev) { | ||
524 | shost_printk(KERN_ERR, fnic->lport->host, | ||
525 | "vNIC registration failed, " | ||
526 | "aborting.\n"); | ||
527 | err = -ENODEV; | ||
528 | goto err_out_iounmap; | ||
529 | } | ||
530 | |||
531 | err = fnic_dev_wait(fnic->vdev, vnic_dev_open, | ||
532 | vnic_dev_open_done, 0); | ||
533 | if (err) { | ||
534 | shost_printk(KERN_ERR, fnic->lport->host, | ||
535 | "vNIC dev open failed, aborting.\n"); | ||
536 | goto err_out_vnic_unregister; | ||
537 | } | ||
538 | |||
539 | err = vnic_dev_init(fnic->vdev, 0); | ||
540 | if (err) { | ||
541 | shost_printk(KERN_ERR, fnic->lport->host, | ||
542 | "vNIC dev init failed, aborting.\n"); | ||
543 | goto err_out_dev_close; | ||
544 | } | ||
545 | |||
546 | err = vnic_dev_mac_addr(fnic->vdev, fnic->mac_addr); | ||
547 | if (err) { | ||
548 | shost_printk(KERN_ERR, fnic->lport->host, | ||
549 | "vNIC get MAC addr failed \n"); | ||
550 | goto err_out_dev_close; | ||
551 | } | ||
552 | |||
553 | /* Get vNIC configuration */ | ||
554 | err = fnic_get_vnic_config(fnic); | ||
555 | if (err) { | ||
556 | shost_printk(KERN_ERR, fnic->lport->host, | ||
557 | "Get vNIC configuration failed, " | ||
558 | "aborting.\n"); | ||
559 | goto err_out_dev_close; | ||
560 | } | ||
561 | host->max_lun = fnic->config.luns_per_tgt; | ||
562 | host->max_id = FNIC_MAX_FCP_TARGET; | ||
563 | |||
564 | fnic_get_res_counts(fnic); | ||
565 | |||
566 | err = fnic_set_intr_mode(fnic); | ||
567 | if (err) { | ||
568 | shost_printk(KERN_ERR, fnic->lport->host, | ||
569 | "Failed to set intr mode, " | ||
570 | "aborting.\n"); | ||
571 | goto err_out_dev_close; | ||
572 | } | ||
573 | |||
574 | err = fnic_request_intr(fnic); | ||
575 | if (err) { | ||
576 | shost_printk(KERN_ERR, fnic->lport->host, | ||
577 | "Unable to request irq.\n"); | ||
578 | goto err_out_clear_intr; | ||
579 | } | ||
580 | |||
581 | err = fnic_alloc_vnic_resources(fnic); | ||
582 | if (err) { | ||
583 | shost_printk(KERN_ERR, fnic->lport->host, | ||
584 | "Failed to alloc vNIC resources, " | ||
585 | "aborting.\n"); | ||
586 | goto err_out_free_intr; | ||
587 | } | ||
588 | |||
589 | |||
590 | /* initialize all fnic locks */ | ||
591 | spin_lock_init(&fnic->fnic_lock); | ||
592 | |||
593 | for (i = 0; i < FNIC_WQ_MAX; i++) | ||
594 | spin_lock_init(&fnic->wq_lock[i]); | ||
595 | |||
596 | for (i = 0; i < FNIC_WQ_COPY_MAX; i++) { | ||
597 | spin_lock_init(&fnic->wq_copy_lock[i]); | ||
598 | fnic->wq_copy_desc_low[i] = DESC_CLEAN_LOW_WATERMARK; | ||
599 | fnic->fw_ack_recd[i] = 0; | ||
600 | fnic->fw_ack_index[i] = -1; | ||
601 | } | ||
602 | |||
603 | for (i = 0; i < FNIC_IO_LOCKS; i++) | ||
604 | spin_lock_init(&fnic->io_req_lock[i]); | ||
605 | |||
606 | fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache); | ||
607 | if (!fnic->io_req_pool) | ||
608 | goto err_out_free_resources; | ||
609 | |||
610 | pool = mempool_create(2, fnic_alloc_slab_dma, mempool_free_slab, | ||
611 | fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); | ||
612 | if (!pool) | ||
613 | goto err_out_free_ioreq_pool; | ||
614 | fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool; | ||
615 | |||
616 | pool = mempool_create(2, fnic_alloc_slab_dma, mempool_free_slab, | ||
617 | fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); | ||
618 | if (!pool) | ||
619 | goto err_out_free_dflt_pool; | ||
620 | fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool; | ||
621 | |||
622 | /* setup vlan config, hw inserts vlan header */ | ||
623 | fnic->vlan_hw_insert = 1; | ||
624 | fnic->vlan_id = 0; | ||
625 | |||
626 | fnic->flogi_oxid = FC_XID_UNKNOWN; | ||
627 | fnic->flogi = NULL; | ||
628 | fnic->flogi_resp = NULL; | ||
629 | fnic->state = FNIC_IN_FC_MODE; | ||
630 | |||
631 | /* Enable hardware stripping of vlan header on ingress */ | ||
632 | fnic_set_nic_config(fnic, 0, 0, 0, 0, 0, 0, 1); | ||
633 | |||
634 | /* Setup notification buffer area */ | ||
635 | err = fnic_notify_set(fnic); | ||
636 | if (err) { | ||
637 | shost_printk(KERN_ERR, fnic->lport->host, | ||
638 | "Failed to alloc notify buffer, aborting.\n"); | ||
639 | goto err_out_free_max_pool; | ||
640 | } | ||
641 | |||
642 | /* Setup notify timer when using MSI interrupts */ | ||
643 | if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI) | ||
644 | setup_timer(&fnic->notify_timer, | ||
645 | fnic_notify_timer, (unsigned long)fnic); | ||
646 | |||
647 | /* allocate RQ buffers and post them to RQ*/ | ||
648 | for (i = 0; i < fnic->rq_count; i++) { | ||
649 | err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame); | ||
650 | if (err) { | ||
651 | shost_printk(KERN_ERR, fnic->lport->host, | ||
652 | "fnic_alloc_rq_frame can't alloc " | ||
653 | "frame\n"); | ||
654 | goto err_out_free_rq_buf; | ||
655 | } | ||
656 | } | ||
657 | |||
658 | /* | ||
659 | * Initialization done with PCI system, hardware, firmware. | ||
660 | * Add host to SCSI | ||
661 | */ | ||
662 | err = scsi_add_host(lp->host, &pdev->dev); | ||
663 | if (err) { | ||
664 | shost_printk(KERN_ERR, fnic->lport->host, | ||
665 | "fnic: scsi_add_host failed...exiting\n"); | ||
666 | goto err_out_free_rq_buf; | ||
667 | } | ||
668 | |||
669 | /* Start local port initiatialization */ | ||
670 | |||
671 | lp->link_up = 0; | ||
672 | lp->tt = fnic_transport_template; | ||
673 | |||
674 | lp->emp = fc_exch_mgr_alloc(lp, FC_CLASS_3, | ||
675 | FCPIO_HOST_EXCH_RANGE_START, | ||
676 | FCPIO_HOST_EXCH_RANGE_END); | ||
677 | if (!lp->emp) { | ||
678 | err = -ENOMEM; | ||
679 | goto err_out_remove_scsi_host; | ||
680 | } | ||
681 | |||
682 | lp->max_retry_count = fnic->config.flogi_retries; | ||
683 | lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | | ||
684 | FCP_SPPF_CONF_COMPL); | ||
685 | if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) | ||
686 | lp->service_params |= FCP_SPPF_RETRY; | ||
687 | |||
688 | lp->boot_time = jiffies; | ||
689 | lp->e_d_tov = fnic->config.ed_tov; | ||
690 | lp->r_a_tov = fnic->config.ra_tov; | ||
691 | lp->link_supported_speeds = FC_PORTSPEED_10GBIT; | ||
692 | fc_set_wwnn(lp, fnic->config.node_wwn); | ||
693 | fc_set_wwpn(lp, fnic->config.port_wwn); | ||
694 | |||
695 | fc_exch_init(lp); | ||
696 | fc_lport_init(lp); | ||
697 | fc_elsct_init(lp); | ||
698 | fc_rport_init(lp); | ||
699 | fc_disc_init(lp); | ||
700 | |||
701 | fc_lport_config(lp); | ||
702 | |||
703 | if (fc_set_mfs(lp, fnic->config.maxdatafieldsize + | ||
704 | sizeof(struct fc_frame_header))) { | ||
705 | err = -EINVAL; | ||
706 | goto err_out_free_exch_mgr; | ||
707 | } | ||
708 | fc_host_maxframe_size(lp->host) = lp->mfs; | ||
709 | |||
710 | sprintf(fc_host_symbolic_name(lp->host), | ||
711 | DRV_NAME " v" DRV_VERSION " over %s", fnic->name); | ||
712 | |||
713 | spin_lock_irqsave(&fnic_list_lock, flags); | ||
714 | list_add_tail(&fnic->list, &fnic_list); | ||
715 | spin_unlock_irqrestore(&fnic_list_lock, flags); | ||
716 | |||
717 | INIT_WORK(&fnic->link_work, fnic_handle_link); | ||
718 | INIT_WORK(&fnic->frame_work, fnic_handle_frame); | ||
719 | skb_queue_head_init(&fnic->frame_queue); | ||
720 | |||
721 | /* Enable all queues */ | ||
722 | for (i = 0; i < fnic->raw_wq_count; i++) | ||
723 | vnic_wq_enable(&fnic->wq[i]); | ||
724 | for (i = 0; i < fnic->rq_count; i++) | ||
725 | vnic_rq_enable(&fnic->rq[i]); | ||
726 | for (i = 0; i < fnic->wq_copy_count; i++) | ||
727 | vnic_wq_copy_enable(&fnic->wq_copy[i]); | ||
728 | |||
729 | fc_fabric_login(lp); | ||
730 | |||
731 | vnic_dev_enable(fnic->vdev); | ||
732 | for (i = 0; i < fnic->intr_count; i++) | ||
733 | vnic_intr_unmask(&fnic->intr[i]); | ||
734 | |||
735 | fnic_notify_timer_start(fnic); | ||
736 | |||
737 | return 0; | ||
738 | |||
739 | err_out_free_exch_mgr: | ||
740 | fc_exch_mgr_free(lp->emp); | ||
741 | err_out_remove_scsi_host: | ||
742 | fc_remove_host(fnic->lport->host); | ||
743 | scsi_remove_host(fnic->lport->host); | ||
744 | err_out_free_rq_buf: | ||
745 | for (i = 0; i < fnic->rq_count; i++) | ||
746 | vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf); | ||
747 | vnic_dev_notify_unset(fnic->vdev); | ||
748 | err_out_free_max_pool: | ||
749 | mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX]); | ||
750 | err_out_free_dflt_pool: | ||
751 | mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT]); | ||
752 | err_out_free_ioreq_pool: | ||
753 | mempool_destroy(fnic->io_req_pool); | ||
754 | err_out_free_resources: | ||
755 | fnic_free_vnic_resources(fnic); | ||
756 | err_out_free_intr: | ||
757 | fnic_free_intr(fnic); | ||
758 | err_out_clear_intr: | ||
759 | fnic_clear_intr_mode(fnic); | ||
760 | err_out_dev_close: | ||
761 | vnic_dev_close(fnic->vdev); | ||
762 | err_out_vnic_unregister: | ||
763 | vnic_dev_unregister(fnic->vdev); | ||
764 | err_out_iounmap: | ||
765 | fnic_iounmap(fnic); | ||
766 | err_out_release_regions: | ||
767 | pci_release_regions(pdev); | ||
768 | err_out_disable_device: | ||
769 | pci_disable_device(pdev); | ||
770 | err_out_free_hba: | ||
771 | scsi_host_put(lp->host); | ||
772 | err_out: | ||
773 | return err; | ||
774 | } | ||
775 | |||
776 | static void __devexit fnic_remove(struct pci_dev *pdev) | ||
777 | { | ||
778 | struct fnic *fnic = pci_get_drvdata(pdev); | ||
779 | unsigned long flags; | ||
780 | |||
781 | /* | ||
782 | * Mark state so that the workqueue thread stops forwarding | ||
783 | * received frames and link events to the local port. ISR and | ||
784 | * other threads that can queue work items will also stop | ||
785 | * creating work items on the fnic workqueue | ||
786 | */ | ||
787 | spin_lock_irqsave(&fnic->fnic_lock, flags); | ||
788 | fnic->stop_rx_link_events = 1; | ||
789 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
790 | |||
791 | if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI) | ||
792 | del_timer_sync(&fnic->notify_timer); | ||
793 | |||
794 | /* | ||
795 | * Flush the fnic event queue. After this call, there should | ||
796 | * be no event queued for this fnic device in the workqueue | ||
797 | */ | ||
798 | flush_workqueue(fnic_event_queue); | ||
799 | skb_queue_purge(&fnic->frame_queue); | ||
800 | |||
801 | /* | ||
802 | * Log off the fabric. This stops all remote ports, dns port, | ||
803 | * logs off the fabric. This flushes all rport, disc, lport work | ||
804 | * before returning | ||
805 | */ | ||
806 | fc_fabric_logoff(fnic->lport); | ||
807 | |||
808 | spin_lock_irqsave(&fnic->fnic_lock, flags); | ||
809 | fnic->in_remove = 1; | ||
810 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
811 | |||
812 | fc_lport_destroy(fnic->lport); | ||
813 | |||
814 | /* | ||
815 | * This stops the fnic device, masks all interrupts. Completed | ||
816 | * CQ entries are drained. Posted WQ/RQ/Copy-WQ entries are | ||
817 | * cleaned up | ||
818 | */ | ||
819 | fnic_cleanup(fnic); | ||
820 | |||
821 | BUG_ON(!skb_queue_empty(&fnic->frame_queue)); | ||
822 | |||
823 | spin_lock_irqsave(&fnic_list_lock, flags); | ||
824 | list_del(&fnic->list); | ||
825 | spin_unlock_irqrestore(&fnic_list_lock, flags); | ||
826 | |||
827 | fc_remove_host(fnic->lport->host); | ||
828 | scsi_remove_host(fnic->lport->host); | ||
829 | fc_exch_mgr_free(fnic->lport->emp); | ||
830 | vnic_dev_notify_unset(fnic->vdev); | ||
831 | fnic_free_vnic_resources(fnic); | ||
832 | fnic_free_intr(fnic); | ||
833 | fnic_clear_intr_mode(fnic); | ||
834 | vnic_dev_close(fnic->vdev); | ||
835 | vnic_dev_unregister(fnic->vdev); | ||
836 | fnic_iounmap(fnic); | ||
837 | pci_release_regions(pdev); | ||
838 | pci_disable_device(pdev); | ||
839 | pci_set_drvdata(pdev, NULL); | ||
840 | scsi_host_put(fnic->lport->host); | ||
841 | } | ||
842 | |||
843 | static struct pci_driver fnic_driver = { | ||
844 | .name = DRV_NAME, | ||
845 | .id_table = fnic_id_table, | ||
846 | .probe = fnic_probe, | ||
847 | .remove = __devexit_p(fnic_remove), | ||
848 | }; | ||
849 | |||
850 | static int __init fnic_init_module(void) | ||
851 | { | ||
852 | size_t len; | ||
853 | int err = 0; | ||
854 | |||
855 | printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION); | ||
856 | |||
857 | /* Create a cache for allocation of default size sgls */ | ||
858 | len = sizeof(struct fnic_dflt_sgl_list); | ||
859 | fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create | ||
860 | ("fnic_sgl_dflt", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN, | ||
861 | SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, | ||
862 | NULL); | ||
863 | if (!fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]) { | ||
864 | printk(KERN_ERR PFX "failed to create fnic dflt sgl slab\n"); | ||
865 | err = -ENOMEM; | ||
866 | goto err_create_fnic_sgl_slab_dflt; | ||
867 | } | ||
868 | |||
869 | /* Create a cache for allocation of max size sgls*/ | ||
870 | len = sizeof(struct fnic_sgl_list); | ||
871 | fnic_sgl_cache[FNIC_SGL_CACHE_MAX] = kmem_cache_create | ||
872 | ("fnic_sgl_max", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN, | ||
873 | SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, | ||
874 | NULL); | ||
875 | if (!fnic_sgl_cache[FNIC_SGL_CACHE_MAX]) { | ||
876 | printk(KERN_ERR PFX "failed to create fnic max sgl slab\n"); | ||
877 | err = -ENOMEM; | ||
878 | goto err_create_fnic_sgl_slab_max; | ||
879 | } | ||
880 | |||
881 | /* Create a cache of io_req structs for use via mempool */ | ||
882 | fnic_io_req_cache = kmem_cache_create("fnic_io_req", | ||
883 | sizeof(struct fnic_io_req), | ||
884 | 0, SLAB_HWCACHE_ALIGN, NULL); | ||
885 | if (!fnic_io_req_cache) { | ||
886 | printk(KERN_ERR PFX "failed to create fnic io_req slab\n"); | ||
887 | err = -ENOMEM; | ||
888 | goto err_create_fnic_ioreq_slab; | ||
889 | } | ||
890 | |||
891 | fnic_event_queue = create_singlethread_workqueue("fnic_event_wq"); | ||
892 | if (!fnic_event_queue) { | ||
893 | printk(KERN_ERR PFX "fnic work queue create failed\n"); | ||
894 | err = -ENOMEM; | ||
895 | goto err_create_fnic_workq; | ||
896 | } | ||
897 | |||
898 | spin_lock_init(&fnic_list_lock); | ||
899 | INIT_LIST_HEAD(&fnic_list); | ||
900 | |||
901 | fnic_fc_transport = fc_attach_transport(&fnic_fc_functions); | ||
902 | if (!fnic_fc_transport) { | ||
903 | printk(KERN_ERR PFX "fc_attach_transport error\n"); | ||
904 | err = -ENOMEM; | ||
905 | goto err_fc_transport; | ||
906 | } | ||
907 | |||
908 | /* register the driver with PCI system */ | ||
909 | err = pci_register_driver(&fnic_driver); | ||
910 | if (err < 0) { | ||
911 | printk(KERN_ERR PFX "pci register error\n"); | ||
912 | goto err_pci_register; | ||
913 | } | ||
914 | return err; | ||
915 | |||
916 | err_pci_register: | ||
917 | fc_release_transport(fnic_fc_transport); | ||
918 | err_fc_transport: | ||
919 | destroy_workqueue(fnic_event_queue); | ||
920 | err_create_fnic_workq: | ||
921 | kmem_cache_destroy(fnic_io_req_cache); | ||
922 | err_create_fnic_ioreq_slab: | ||
923 | kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); | ||
924 | err_create_fnic_sgl_slab_max: | ||
925 | kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); | ||
926 | err_create_fnic_sgl_slab_dflt: | ||
927 | return err; | ||
928 | } | ||
929 | |||
930 | static void __exit fnic_cleanup_module(void) | ||
931 | { | ||
932 | pci_unregister_driver(&fnic_driver); | ||
933 | destroy_workqueue(fnic_event_queue); | ||
934 | kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); | ||
935 | kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); | ||
936 | kmem_cache_destroy(fnic_io_req_cache); | ||
937 | fc_release_transport(fnic_fc_transport); | ||
938 | } | ||
939 | |||
940 | module_init(fnic_init_module); | ||
941 | module_exit(fnic_cleanup_module); | ||
942 | |||
diff --git a/drivers/scsi/fnic/fnic_res.c b/drivers/scsi/fnic/fnic_res.c new file mode 100644 index 000000000000..7ba61ec715d2 --- /dev/null +++ b/drivers/scsi/fnic/fnic_res.c | |||
@@ -0,0 +1,444 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | */ | ||
18 | #include <linux/errno.h> | ||
19 | #include <linux/types.h> | ||
20 | #include <linux/pci.h> | ||
21 | #include "wq_enet_desc.h" | ||
22 | #include "rq_enet_desc.h" | ||
23 | #include "cq_enet_desc.h" | ||
24 | #include "vnic_resource.h" | ||
25 | #include "vnic_dev.h" | ||
26 | #include "vnic_wq.h" | ||
27 | #include "vnic_rq.h" | ||
28 | #include "vnic_cq.h" | ||
29 | #include "vnic_intr.h" | ||
30 | #include "vnic_stats.h" | ||
31 | #include "vnic_nic.h" | ||
32 | #include "fnic.h" | ||
33 | |||
34 | int fnic_get_vnic_config(struct fnic *fnic) | ||
35 | { | ||
36 | struct vnic_fc_config *c = &fnic->config; | ||
37 | int err; | ||
38 | |||
39 | #define GET_CONFIG(m) \ | ||
40 | do { \ | ||
41 | err = vnic_dev_spec(fnic->vdev, \ | ||
42 | offsetof(struct vnic_fc_config, m), \ | ||
43 | sizeof(c->m), &c->m); \ | ||
44 | if (err) { \ | ||
45 | shost_printk(KERN_ERR, fnic->lport->host, \ | ||
46 | "Error getting %s, %d\n", #m, \ | ||
47 | err); \ | ||
48 | return err; \ | ||
49 | } \ | ||
50 | } while (0); | ||
51 | |||
52 | GET_CONFIG(node_wwn); | ||
53 | GET_CONFIG(port_wwn); | ||
54 | GET_CONFIG(wq_enet_desc_count); | ||
55 | GET_CONFIG(wq_copy_desc_count); | ||
56 | GET_CONFIG(rq_desc_count); | ||
57 | GET_CONFIG(maxdatafieldsize); | ||
58 | GET_CONFIG(ed_tov); | ||
59 | GET_CONFIG(ra_tov); | ||
60 | GET_CONFIG(intr_timer); | ||
61 | GET_CONFIG(intr_timer_type); | ||
62 | GET_CONFIG(flags); | ||
63 | GET_CONFIG(flogi_retries); | ||
64 | GET_CONFIG(flogi_timeout); | ||
65 | GET_CONFIG(plogi_retries); | ||
66 | GET_CONFIG(plogi_timeout); | ||
67 | GET_CONFIG(io_throttle_count); | ||
68 | GET_CONFIG(link_down_timeout); | ||
69 | GET_CONFIG(port_down_timeout); | ||
70 | GET_CONFIG(port_down_io_retries); | ||
71 | GET_CONFIG(luns_per_tgt); | ||
72 | |||
73 | c->wq_enet_desc_count = | ||
74 | min_t(u32, VNIC_FNIC_WQ_DESCS_MAX, | ||
75 | max_t(u32, VNIC_FNIC_WQ_DESCS_MIN, | ||
76 | c->wq_enet_desc_count)); | ||
77 | c->wq_enet_desc_count = ALIGN(c->wq_enet_desc_count, 16); | ||
78 | |||
79 | c->wq_copy_desc_count = | ||
80 | min_t(u32, VNIC_FNIC_WQ_COPY_DESCS_MAX, | ||
81 | max_t(u32, VNIC_FNIC_WQ_COPY_DESCS_MIN, | ||
82 | c->wq_copy_desc_count)); | ||
83 | c->wq_copy_desc_count = ALIGN(c->wq_copy_desc_count, 16); | ||
84 | |||
85 | c->rq_desc_count = | ||
86 | min_t(u32, VNIC_FNIC_RQ_DESCS_MAX, | ||
87 | max_t(u32, VNIC_FNIC_RQ_DESCS_MIN, | ||
88 | c->rq_desc_count)); | ||
89 | c->rq_desc_count = ALIGN(c->rq_desc_count, 16); | ||
90 | |||
91 | c->maxdatafieldsize = | ||
92 | min_t(u16, VNIC_FNIC_MAXDATAFIELDSIZE_MAX, | ||
93 | max_t(u16, VNIC_FNIC_MAXDATAFIELDSIZE_MIN, | ||
94 | c->maxdatafieldsize)); | ||
95 | c->ed_tov = | ||
96 | min_t(u32, VNIC_FNIC_EDTOV_MAX, | ||
97 | max_t(u32, VNIC_FNIC_EDTOV_MIN, | ||
98 | c->ed_tov)); | ||
99 | |||
100 | c->ra_tov = | ||
101 | min_t(u32, VNIC_FNIC_RATOV_MAX, | ||
102 | max_t(u32, VNIC_FNIC_RATOV_MIN, | ||
103 | c->ra_tov)); | ||
104 | |||
105 | c->flogi_retries = | ||
106 | min_t(u32, VNIC_FNIC_FLOGI_RETRIES_MAX, c->flogi_retries); | ||
107 | |||
108 | c->flogi_timeout = | ||
109 | min_t(u32, VNIC_FNIC_FLOGI_TIMEOUT_MAX, | ||
110 | max_t(u32, VNIC_FNIC_FLOGI_TIMEOUT_MIN, | ||
111 | c->flogi_timeout)); | ||
112 | |||
113 | c->plogi_retries = | ||
114 | min_t(u32, VNIC_FNIC_PLOGI_RETRIES_MAX, c->plogi_retries); | ||
115 | |||
116 | c->plogi_timeout = | ||
117 | min_t(u32, VNIC_FNIC_PLOGI_TIMEOUT_MAX, | ||
118 | max_t(u32, VNIC_FNIC_PLOGI_TIMEOUT_MIN, | ||
119 | c->plogi_timeout)); | ||
120 | |||
121 | c->io_throttle_count = | ||
122 | min_t(u32, VNIC_FNIC_IO_THROTTLE_COUNT_MAX, | ||
123 | max_t(u32, VNIC_FNIC_IO_THROTTLE_COUNT_MIN, | ||
124 | c->io_throttle_count)); | ||
125 | |||
126 | c->link_down_timeout = | ||
127 | min_t(u32, VNIC_FNIC_LINK_DOWN_TIMEOUT_MAX, | ||
128 | c->link_down_timeout); | ||
129 | |||
130 | c->port_down_timeout = | ||
131 | min_t(u32, VNIC_FNIC_PORT_DOWN_TIMEOUT_MAX, | ||
132 | c->port_down_timeout); | ||
133 | |||
134 | c->port_down_io_retries = | ||
135 | min_t(u32, VNIC_FNIC_PORT_DOWN_IO_RETRIES_MAX, | ||
136 | c->port_down_io_retries); | ||
137 | |||
138 | c->luns_per_tgt = | ||
139 | min_t(u32, VNIC_FNIC_LUNS_PER_TARGET_MAX, | ||
140 | max_t(u32, VNIC_FNIC_LUNS_PER_TARGET_MIN, | ||
141 | c->luns_per_tgt)); | ||
142 | |||
143 | c->intr_timer = min_t(u16, VNIC_INTR_TIMER_MAX, c->intr_timer); | ||
144 | c->intr_timer_type = c->intr_timer_type; | ||
145 | |||
146 | shost_printk(KERN_INFO, fnic->lport->host, | ||
147 | "vNIC MAC addr %02x:%02x:%02x:%02x:%02x:%02x " | ||
148 | "wq/wq_copy/rq %d/%d/%d\n", | ||
149 | fnic->mac_addr[0], fnic->mac_addr[1], fnic->mac_addr[2], | ||
150 | fnic->mac_addr[3], fnic->mac_addr[4], fnic->mac_addr[5], | ||
151 | c->wq_enet_desc_count, c->wq_copy_desc_count, | ||
152 | c->rq_desc_count); | ||
153 | shost_printk(KERN_INFO, fnic->lport->host, | ||
154 | "vNIC node wwn %llx port wwn %llx\n", | ||
155 | c->node_wwn, c->port_wwn); | ||
156 | shost_printk(KERN_INFO, fnic->lport->host, | ||
157 | "vNIC ed_tov %d ra_tov %d\n", | ||
158 | c->ed_tov, c->ra_tov); | ||
159 | shost_printk(KERN_INFO, fnic->lport->host, | ||
160 | "vNIC mtu %d intr timer %d\n", | ||
161 | c->maxdatafieldsize, c->intr_timer); | ||
162 | shost_printk(KERN_INFO, fnic->lport->host, | ||
163 | "vNIC flags 0x%x luns per tgt %d\n", | ||
164 | c->flags, c->luns_per_tgt); | ||
165 | shost_printk(KERN_INFO, fnic->lport->host, | ||
166 | "vNIC flogi_retries %d flogi timeout %d\n", | ||
167 | c->flogi_retries, c->flogi_timeout); | ||
168 | shost_printk(KERN_INFO, fnic->lport->host, | ||
169 | "vNIC plogi retries %d plogi timeout %d\n", | ||
170 | c->plogi_retries, c->plogi_timeout); | ||
171 | shost_printk(KERN_INFO, fnic->lport->host, | ||
172 | "vNIC io throttle count %d link dn timeout %d\n", | ||
173 | c->io_throttle_count, c->link_down_timeout); | ||
174 | shost_printk(KERN_INFO, fnic->lport->host, | ||
175 | "vNIC port dn io retries %d port dn timeout %d\n", | ||
176 | c->port_down_io_retries, c->port_down_timeout); | ||
177 | |||
178 | return 0; | ||
179 | } | ||
180 | |||
181 | int fnic_set_nic_config(struct fnic *fnic, u8 rss_default_cpu, | ||
182 | u8 rss_hash_type, | ||
183 | u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, | ||
184 | u8 tso_ipid_split_en, u8 ig_vlan_strip_en) | ||
185 | { | ||
186 | u64 a0, a1; | ||
187 | u32 nic_cfg; | ||
188 | int wait = 1000; | ||
189 | |||
190 | vnic_set_nic_cfg(&nic_cfg, rss_default_cpu, | ||
191 | rss_hash_type, rss_hash_bits, rss_base_cpu, | ||
192 | rss_enable, tso_ipid_split_en, ig_vlan_strip_en); | ||
193 | |||
194 | a0 = nic_cfg; | ||
195 | a1 = 0; | ||
196 | |||
197 | return vnic_dev_cmd(fnic->vdev, CMD_NIC_CFG, &a0, &a1, wait); | ||
198 | } | ||
199 | |||
200 | void fnic_get_res_counts(struct fnic *fnic) | ||
201 | { | ||
202 | fnic->wq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_WQ); | ||
203 | fnic->raw_wq_count = fnic->wq_count - 1; | ||
204 | fnic->wq_copy_count = fnic->wq_count - fnic->raw_wq_count; | ||
205 | fnic->rq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_RQ); | ||
206 | fnic->cq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_CQ); | ||
207 | fnic->intr_count = vnic_dev_get_res_count(fnic->vdev, | ||
208 | RES_TYPE_INTR_CTRL); | ||
209 | } | ||
210 | |||
211 | void fnic_free_vnic_resources(struct fnic *fnic) | ||
212 | { | ||
213 | unsigned int i; | ||
214 | |||
215 | for (i = 0; i < fnic->raw_wq_count; i++) | ||
216 | vnic_wq_free(&fnic->wq[i]); | ||
217 | |||
218 | for (i = 0; i < fnic->wq_copy_count; i++) | ||
219 | vnic_wq_copy_free(&fnic->wq_copy[i]); | ||
220 | |||
221 | for (i = 0; i < fnic->rq_count; i++) | ||
222 | vnic_rq_free(&fnic->rq[i]); | ||
223 | |||
224 | for (i = 0; i < fnic->cq_count; i++) | ||
225 | vnic_cq_free(&fnic->cq[i]); | ||
226 | |||
227 | for (i = 0; i < fnic->intr_count; i++) | ||
228 | vnic_intr_free(&fnic->intr[i]); | ||
229 | } | ||
230 | |||
231 | int fnic_alloc_vnic_resources(struct fnic *fnic) | ||
232 | { | ||
233 | enum vnic_dev_intr_mode intr_mode; | ||
234 | unsigned int mask_on_assertion; | ||
235 | unsigned int interrupt_offset; | ||
236 | unsigned int error_interrupt_enable; | ||
237 | unsigned int error_interrupt_offset; | ||
238 | unsigned int i, cq_index; | ||
239 | unsigned int wq_copy_cq_desc_count; | ||
240 | int err; | ||
241 | |||
242 | intr_mode = vnic_dev_get_intr_mode(fnic->vdev); | ||
243 | |||
244 | shost_printk(KERN_INFO, fnic->lport->host, "vNIC interrupt mode: %s\n", | ||
245 | intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" : | ||
246 | intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" : | ||
247 | intr_mode == VNIC_DEV_INTR_MODE_MSIX ? | ||
248 | "MSI-X" : "unknown"); | ||
249 | |||
250 | shost_printk(KERN_INFO, fnic->lport->host, "vNIC resources avail: " | ||
251 | "wq %d cp_wq %d raw_wq %d rq %d cq %d intr %d\n", | ||
252 | fnic->wq_count, fnic->wq_copy_count, fnic->raw_wq_count, | ||
253 | fnic->rq_count, fnic->cq_count, fnic->intr_count); | ||
254 | |||
255 | /* Allocate Raw WQ used for FCS frames */ | ||
256 | for (i = 0; i < fnic->raw_wq_count; i++) { | ||
257 | err = vnic_wq_alloc(fnic->vdev, &fnic->wq[i], i, | ||
258 | fnic->config.wq_enet_desc_count, | ||
259 | sizeof(struct wq_enet_desc)); | ||
260 | if (err) | ||
261 | goto err_out_cleanup; | ||
262 | } | ||
263 | |||
264 | /* Allocate Copy WQs used for SCSI IOs */ | ||
265 | for (i = 0; i < fnic->wq_copy_count; i++) { | ||
266 | err = vnic_wq_copy_alloc(fnic->vdev, &fnic->wq_copy[i], | ||
267 | (fnic->raw_wq_count + i), | ||
268 | fnic->config.wq_copy_desc_count, | ||
269 | sizeof(struct fcpio_host_req)); | ||
270 | if (err) | ||
271 | goto err_out_cleanup; | ||
272 | } | ||
273 | |||
274 | /* RQ for receiving FCS frames */ | ||
275 | for (i = 0; i < fnic->rq_count; i++) { | ||
276 | err = vnic_rq_alloc(fnic->vdev, &fnic->rq[i], i, | ||
277 | fnic->config.rq_desc_count, | ||
278 | sizeof(struct rq_enet_desc)); | ||
279 | if (err) | ||
280 | goto err_out_cleanup; | ||
281 | } | ||
282 | |||
283 | /* CQ for each RQ */ | ||
284 | for (i = 0; i < fnic->rq_count; i++) { | ||
285 | cq_index = i; | ||
286 | err = vnic_cq_alloc(fnic->vdev, | ||
287 | &fnic->cq[cq_index], cq_index, | ||
288 | fnic->config.rq_desc_count, | ||
289 | sizeof(struct cq_enet_rq_desc)); | ||
290 | if (err) | ||
291 | goto err_out_cleanup; | ||
292 | } | ||
293 | |||
294 | /* CQ for each WQ */ | ||
295 | for (i = 0; i < fnic->raw_wq_count; i++) { | ||
296 | cq_index = fnic->rq_count + i; | ||
297 | err = vnic_cq_alloc(fnic->vdev, &fnic->cq[cq_index], cq_index, | ||
298 | fnic->config.wq_enet_desc_count, | ||
299 | sizeof(struct cq_enet_wq_desc)); | ||
300 | if (err) | ||
301 | goto err_out_cleanup; | ||
302 | } | ||
303 | |||
304 | /* CQ for each COPY WQ */ | ||
305 | wq_copy_cq_desc_count = (fnic->config.wq_copy_desc_count * 3); | ||
306 | for (i = 0; i < fnic->wq_copy_count; i++) { | ||
307 | cq_index = fnic->raw_wq_count + fnic->rq_count + i; | ||
308 | err = vnic_cq_alloc(fnic->vdev, &fnic->cq[cq_index], | ||
309 | cq_index, | ||
310 | wq_copy_cq_desc_count, | ||
311 | sizeof(struct fcpio_fw_req)); | ||
312 | if (err) | ||
313 | goto err_out_cleanup; | ||
314 | } | ||
315 | |||
316 | for (i = 0; i < fnic->intr_count; i++) { | ||
317 | err = vnic_intr_alloc(fnic->vdev, &fnic->intr[i], i); | ||
318 | if (err) | ||
319 | goto err_out_cleanup; | ||
320 | } | ||
321 | |||
322 | fnic->legacy_pba = vnic_dev_get_res(fnic->vdev, | ||
323 | RES_TYPE_INTR_PBA_LEGACY, 0); | ||
324 | |||
325 | if (!fnic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) { | ||
326 | shost_printk(KERN_ERR, fnic->lport->host, | ||
327 | "Failed to hook legacy pba resource\n"); | ||
328 | err = -ENODEV; | ||
329 | goto err_out_cleanup; | ||
330 | } | ||
331 | |||
332 | /* | ||
333 | * Init RQ/WQ resources. | ||
334 | * | ||
335 | * RQ[0 to n-1] point to CQ[0 to n-1] | ||
336 | * WQ[0 to m-1] point to CQ[n to n+m-1] | ||
337 | * WQ_COPY[0 to k-1] points to CQ[n+m to n+m+k-1] | ||
338 | * | ||
339 | * Note for copy wq we always initialize with cq_index = 0 | ||
340 | * | ||
341 | * Error interrupt is not enabled for MSI. | ||
342 | */ | ||
343 | |||
344 | switch (intr_mode) { | ||
345 | case VNIC_DEV_INTR_MODE_INTX: | ||
346 | case VNIC_DEV_INTR_MODE_MSIX: | ||
347 | error_interrupt_enable = 1; | ||
348 | error_interrupt_offset = fnic->err_intr_offset; | ||
349 | break; | ||
350 | default: | ||
351 | error_interrupt_enable = 0; | ||
352 | error_interrupt_offset = 0; | ||
353 | break; | ||
354 | } | ||
355 | |||
356 | for (i = 0; i < fnic->rq_count; i++) { | ||
357 | cq_index = i; | ||
358 | vnic_rq_init(&fnic->rq[i], | ||
359 | cq_index, | ||
360 | error_interrupt_enable, | ||
361 | error_interrupt_offset); | ||
362 | } | ||
363 | |||
364 | for (i = 0; i < fnic->raw_wq_count; i++) { | ||
365 | cq_index = i + fnic->rq_count; | ||
366 | vnic_wq_init(&fnic->wq[i], | ||
367 | cq_index, | ||
368 | error_interrupt_enable, | ||
369 | error_interrupt_offset); | ||
370 | } | ||
371 | |||
372 | for (i = 0; i < fnic->wq_copy_count; i++) { | ||
373 | vnic_wq_copy_init(&fnic->wq_copy[i], | ||
374 | 0 /* cq_index 0 - always */, | ||
375 | error_interrupt_enable, | ||
376 | error_interrupt_offset); | ||
377 | } | ||
378 | |||
379 | for (i = 0; i < fnic->cq_count; i++) { | ||
380 | |||
381 | switch (intr_mode) { | ||
382 | case VNIC_DEV_INTR_MODE_MSIX: | ||
383 | interrupt_offset = i; | ||
384 | break; | ||
385 | default: | ||
386 | interrupt_offset = 0; | ||
387 | break; | ||
388 | } | ||
389 | |||
390 | vnic_cq_init(&fnic->cq[i], | ||
391 | 0 /* flow_control_enable */, | ||
392 | 1 /* color_enable */, | ||
393 | 0 /* cq_head */, | ||
394 | 0 /* cq_tail */, | ||
395 | 1 /* cq_tail_color */, | ||
396 | 1 /* interrupt_enable */, | ||
397 | 1 /* cq_entry_enable */, | ||
398 | 0 /* cq_message_enable */, | ||
399 | interrupt_offset, | ||
400 | 0 /* cq_message_addr */); | ||
401 | } | ||
402 | |||
403 | /* | ||
404 | * Init INTR resources | ||
405 | * | ||
406 | * mask_on_assertion is not used for INTx due to the level- | ||
407 | * triggered nature of INTx | ||
408 | */ | ||
409 | |||
410 | switch (intr_mode) { | ||
411 | case VNIC_DEV_INTR_MODE_MSI: | ||
412 | case VNIC_DEV_INTR_MODE_MSIX: | ||
413 | mask_on_assertion = 1; | ||
414 | break; | ||
415 | default: | ||
416 | mask_on_assertion = 0; | ||
417 | break; | ||
418 | } | ||
419 | |||
420 | for (i = 0; i < fnic->intr_count; i++) { | ||
421 | vnic_intr_init(&fnic->intr[i], | ||
422 | fnic->config.intr_timer, | ||
423 | fnic->config.intr_timer_type, | ||
424 | mask_on_assertion); | ||
425 | } | ||
426 | |||
427 | /* init the stats memory by making the first call here */ | ||
428 | err = vnic_dev_stats_dump(fnic->vdev, &fnic->stats); | ||
429 | if (err) { | ||
430 | shost_printk(KERN_ERR, fnic->lport->host, | ||
431 | "vnic_dev_stats_dump failed - x%x\n", err); | ||
432 | goto err_out_cleanup; | ||
433 | } | ||
434 | |||
435 | /* Clear LIF stats */ | ||
436 | vnic_dev_stats_clear(fnic->vdev); | ||
437 | |||
438 | return 0; | ||
439 | |||
440 | err_out_cleanup: | ||
441 | fnic_free_vnic_resources(fnic); | ||
442 | |||
443 | return err; | ||
444 | } | ||
diff --git a/drivers/scsi/fnic/fnic_res.h b/drivers/scsi/fnic/fnic_res.h new file mode 100644 index 000000000000..b6f310262534 --- /dev/null +++ b/drivers/scsi/fnic/fnic_res.h | |||
@@ -0,0 +1,197 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | */ | ||
18 | #ifndef _FNIC_RES_H_ | ||
19 | #define _FNIC_RES_H_ | ||
20 | |||
21 | #include "wq_enet_desc.h" | ||
22 | #include "rq_enet_desc.h" | ||
23 | #include "vnic_wq.h" | ||
24 | #include "vnic_rq.h" | ||
25 | #include "fnic_io.h" | ||
26 | #include "fcpio.h" | ||
27 | #include "vnic_wq_copy.h" | ||
28 | #include "vnic_cq_copy.h" | ||
29 | |||
30 | static inline void fnic_queue_wq_desc(struct vnic_wq *wq, | ||
31 | void *os_buf, dma_addr_t dma_addr, | ||
32 | unsigned int len, unsigned int fc_eof, | ||
33 | int vlan_tag_insert, | ||
34 | unsigned int vlan_tag, | ||
35 | int cq_entry, int sop, int eop) | ||
36 | { | ||
37 | struct wq_enet_desc *desc = vnic_wq_next_desc(wq); | ||
38 | |||
39 | wq_enet_desc_enc(desc, | ||
40 | (u64)dma_addr | VNIC_PADDR_TARGET, | ||
41 | (u16)len, | ||
42 | 0, /* mss_or_csum_offset */ | ||
43 | (u16)fc_eof, | ||
44 | 0, /* offload_mode */ | ||
45 | (u8)eop, (u8)cq_entry, | ||
46 | 1, /* fcoe_encap */ | ||
47 | (u8)vlan_tag_insert, | ||
48 | (u16)vlan_tag, | ||
49 | 0 /* loopback */); | ||
50 | |||
51 | vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop); | ||
52 | } | ||
53 | |||
54 | static inline void fnic_queue_wq_copy_desc_icmnd_16(struct vnic_wq_copy *wq, | ||
55 | u32 req_id, | ||
56 | u32 lunmap_id, u8 spl_flags, | ||
57 | u32 sgl_cnt, u32 sense_len, | ||
58 | u64 sgl_addr, u64 sns_addr, | ||
59 | u8 crn, u8 pri_ta, | ||
60 | u8 flags, u8 *scsi_cdb, | ||
61 | u32 data_len, u8 *lun, | ||
62 | u32 d_id, u16 mss, | ||
63 | u32 ratov, u32 edtov) | ||
64 | { | ||
65 | struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); | ||
66 | |||
67 | desc->hdr.type = FCPIO_ICMND_16; /* enum fcpio_type */ | ||
68 | desc->hdr.status = 0; /* header status entry */ | ||
69 | desc->hdr._resvd = 0; /* reserved */ | ||
70 | desc->hdr.tag.u.req_id = req_id; /* id for this request */ | ||
71 | |||
72 | desc->u.icmnd_16.lunmap_id = lunmap_id; /* index into lunmap table */ | ||
73 | desc->u.icmnd_16.special_req_flags = spl_flags; /* exch req flags */ | ||
74 | desc->u.icmnd_16._resvd0[0] = 0; /* reserved */ | ||
75 | desc->u.icmnd_16._resvd0[1] = 0; /* reserved */ | ||
76 | desc->u.icmnd_16._resvd0[2] = 0; /* reserved */ | ||
77 | desc->u.icmnd_16.sgl_cnt = sgl_cnt; /* scatter-gather list count */ | ||
78 | desc->u.icmnd_16.sense_len = sense_len; /* sense buffer length */ | ||
79 | desc->u.icmnd_16.sgl_addr = sgl_addr; /* scatter-gather list addr */ | ||
80 | desc->u.icmnd_16.sense_addr = sns_addr; /* sense buffer address */ | ||
81 | desc->u.icmnd_16.crn = crn; /* SCSI Command Reference No.*/ | ||
82 | desc->u.icmnd_16.pri_ta = pri_ta; /* SCSI Pri & Task attribute */ | ||
83 | desc->u.icmnd_16._resvd1 = 0; /* reserved: should be 0 */ | ||
84 | desc->u.icmnd_16.flags = flags; /* command flags */ | ||
85 | memcpy(desc->u.icmnd_16.scsi_cdb, scsi_cdb, CDB_16); /* SCSI CDB */ | ||
86 | desc->u.icmnd_16.data_len = data_len; /* length of data expected */ | ||
87 | memcpy(desc->u.icmnd_16.lun, lun, LUN_ADDRESS); /* LUN address */ | ||
88 | desc->u.icmnd_16._resvd2 = 0; /* reserved */ | ||
89 | hton24(desc->u.icmnd_16.d_id, d_id); /* FC vNIC only: Target D_ID */ | ||
90 | desc->u.icmnd_16.mss = mss; /* FC vNIC only: max burst */ | ||
91 | desc->u.icmnd_16.r_a_tov = ratov; /*FC vNIC only: Res. Alloc Timeout */ | ||
92 | desc->u.icmnd_16.e_d_tov = edtov; /*FC vNIC only: Err Detect Timeout */ | ||
93 | |||
94 | vnic_wq_copy_post(wq); | ||
95 | } | ||
96 | |||
97 | static inline void fnic_queue_wq_copy_desc_itmf(struct vnic_wq_copy *wq, | ||
98 | u32 req_id, u32 lunmap_id, | ||
99 | u32 tm_req, u32 tm_id, u8 *lun, | ||
100 | u32 d_id, u32 r_a_tov, | ||
101 | u32 e_d_tov) | ||
102 | { | ||
103 | struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); | ||
104 | |||
105 | desc->hdr.type = FCPIO_ITMF; /* enum fcpio_type */ | ||
106 | desc->hdr.status = 0; /* header status entry */ | ||
107 | desc->hdr._resvd = 0; /* reserved */ | ||
108 | desc->hdr.tag.u.req_id = req_id; /* id for this request */ | ||
109 | |||
110 | desc->u.itmf.lunmap_id = lunmap_id; /* index into lunmap table */ | ||
111 | desc->u.itmf.tm_req = tm_req; /* SCSI Task Management request */ | ||
112 | desc->u.itmf.t_tag = tm_id; /* tag of fcpio to be aborted */ | ||
113 | desc->u.itmf._resvd = 0; | ||
114 | memcpy(desc->u.itmf.lun, lun, LUN_ADDRESS); /* LUN address */ | ||
115 | desc->u.itmf._resvd1 = 0; | ||
116 | hton24(desc->u.itmf.d_id, d_id); /* FC vNIC only: Target D_ID */ | ||
117 | desc->u.itmf.r_a_tov = r_a_tov; /* FC vNIC only: R_A_TOV in msec */ | ||
118 | desc->u.itmf.e_d_tov = e_d_tov; /* FC vNIC only: E_D_TOV in msec */ | ||
119 | |||
120 | vnic_wq_copy_post(wq); | ||
121 | } | ||
122 | |||
123 | static inline void fnic_queue_wq_copy_desc_flogi_reg(struct vnic_wq_copy *wq, | ||
124 | u32 req_id, u8 format, | ||
125 | u32 s_id, u8 *gw_mac) | ||
126 | { | ||
127 | struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); | ||
128 | |||
129 | desc->hdr.type = FCPIO_FLOGI_REG; /* enum fcpio_type */ | ||
130 | desc->hdr.status = 0; /* header status entry */ | ||
131 | desc->hdr._resvd = 0; /* reserved */ | ||
132 | desc->hdr.tag.u.req_id = req_id; /* id for this request */ | ||
133 | |||
134 | desc->u.flogi_reg.format = format; | ||
135 | hton24(desc->u.flogi_reg.s_id, s_id); | ||
136 | memcpy(desc->u.flogi_reg.gateway_mac, gw_mac, ETH_ALEN); | ||
137 | |||
138 | vnic_wq_copy_post(wq); | ||
139 | } | ||
140 | |||
141 | static inline void fnic_queue_wq_copy_desc_fw_reset(struct vnic_wq_copy *wq, | ||
142 | u32 req_id) | ||
143 | { | ||
144 | struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); | ||
145 | |||
146 | desc->hdr.type = FCPIO_RESET; /* enum fcpio_type */ | ||
147 | desc->hdr.status = 0; /* header status entry */ | ||
148 | desc->hdr._resvd = 0; /* reserved */ | ||
149 | desc->hdr.tag.u.req_id = req_id; /* id for this request */ | ||
150 | |||
151 | vnic_wq_copy_post(wq); | ||
152 | } | ||
153 | |||
154 | static inline void fnic_queue_wq_copy_desc_lunmap(struct vnic_wq_copy *wq, | ||
155 | u32 req_id, u64 lunmap_addr, | ||
156 | u32 lunmap_len) | ||
157 | { | ||
158 | struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); | ||
159 | |||
160 | desc->hdr.type = FCPIO_LUNMAP_REQ; /* enum fcpio_type */ | ||
161 | desc->hdr.status = 0; /* header status entry */ | ||
162 | desc->hdr._resvd = 0; /* reserved */ | ||
163 | desc->hdr.tag.u.req_id = req_id; /* id for this request */ | ||
164 | |||
165 | desc->u.lunmap_req.addr = lunmap_addr; /* address of the buffer */ | ||
166 | desc->u.lunmap_req.len = lunmap_len; /* len of the buffer */ | ||
167 | |||
168 | vnic_wq_copy_post(wq); | ||
169 | } | ||
170 | |||
171 | static inline void fnic_queue_rq_desc(struct vnic_rq *rq, | ||
172 | void *os_buf, dma_addr_t dma_addr, | ||
173 | u16 len) | ||
174 | { | ||
175 | struct rq_enet_desc *desc = vnic_rq_next_desc(rq); | ||
176 | |||
177 | rq_enet_desc_enc(desc, | ||
178 | (u64)dma_addr | VNIC_PADDR_TARGET, | ||
179 | RQ_ENET_TYPE_ONLY_SOP, | ||
180 | (u16)len); | ||
181 | |||
182 | vnic_rq_post(rq, os_buf, 0, dma_addr, len); | ||
183 | } | ||
184 | |||
185 | |||
186 | struct fnic; | ||
187 | |||
188 | int fnic_get_vnic_config(struct fnic *); | ||
189 | int fnic_alloc_vnic_resources(struct fnic *); | ||
190 | void fnic_free_vnic_resources(struct fnic *); | ||
191 | void fnic_get_res_counts(struct fnic *); | ||
192 | int fnic_set_nic_config(struct fnic *fnic, u8 rss_default_cpu, | ||
193 | u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, | ||
194 | u8 rss_enable, u8 tso_ipid_split_en, | ||
195 | u8 ig_vlan_strip_en); | ||
196 | |||
197 | #endif /* _FNIC_RES_H_ */ | ||
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c new file mode 100644 index 000000000000..eabf36502856 --- /dev/null +++ b/drivers/scsi/fnic/fnic_scsi.c | |||
@@ -0,0 +1,1850 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | */ | ||
18 | #include <linux/mempool.h> | ||
19 | #include <linux/errno.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <linux/workqueue.h> | ||
22 | #include <linux/pci.h> | ||
23 | #include <linux/scatterlist.h> | ||
24 | #include <linux/skbuff.h> | ||
25 | #include <linux/spinlock.h> | ||
26 | #include <linux/if_ether.h> | ||
27 | #include <linux/if_vlan.h> | ||
28 | #include <linux/delay.h> | ||
29 | #include <scsi/scsi.h> | ||
30 | #include <scsi/scsi_host.h> | ||
31 | #include <scsi/scsi_device.h> | ||
32 | #include <scsi/scsi_cmnd.h> | ||
33 | #include <scsi/scsi_tcq.h> | ||
34 | #include <scsi/fc/fc_els.h> | ||
35 | #include <scsi/fc/fc_fcoe.h> | ||
36 | #include <scsi/libfc.h> | ||
37 | #include <scsi/fc_frame.h> | ||
38 | #include "fnic_io.h" | ||
39 | #include "fnic.h" | ||
40 | |||
41 | const char *fnic_state_str[] = { | ||
42 | [FNIC_IN_FC_MODE] = "FNIC_IN_FC_MODE", | ||
43 | [FNIC_IN_FC_TRANS_ETH_MODE] = "FNIC_IN_FC_TRANS_ETH_MODE", | ||
44 | [FNIC_IN_ETH_MODE] = "FNIC_IN_ETH_MODE", | ||
45 | [FNIC_IN_ETH_TRANS_FC_MODE] = "FNIC_IN_ETH_TRANS_FC_MODE", | ||
46 | }; | ||
47 | |||
48 | static const char *fnic_ioreq_state_str[] = { | ||
49 | [FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING", | ||
50 | [FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING", | ||
51 | [FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE", | ||
52 | [FNIC_IOREQ_CMD_COMPLETE] = "FNIC_IOREQ_CMD_COMPLETE", | ||
53 | }; | ||
54 | |||
55 | static const char *fcpio_status_str[] = { | ||
56 | [FCPIO_SUCCESS] = "FCPIO_SUCCESS", /*0x0*/ | ||
57 | [FCPIO_INVALID_HEADER] = "FCPIO_INVALID_HEADER", | ||
58 | [FCPIO_OUT_OF_RESOURCE] = "FCPIO_OUT_OF_RESOURCE", | ||
59 | [FCPIO_INVALID_PARAM] = "FCPIO_INVALID_PARAM]", | ||
60 | [FCPIO_REQ_NOT_SUPPORTED] = "FCPIO_REQ_NOT_SUPPORTED", | ||
61 | [FCPIO_IO_NOT_FOUND] = "FCPIO_IO_NOT_FOUND", | ||
62 | [FCPIO_ABORTED] = "FCPIO_ABORTED", /*0x41*/ | ||
63 | [FCPIO_TIMEOUT] = "FCPIO_TIMEOUT", | ||
64 | [FCPIO_SGL_INVALID] = "FCPIO_SGL_INVALID", | ||
65 | [FCPIO_MSS_INVALID] = "FCPIO_MSS_INVALID", | ||
66 | [FCPIO_DATA_CNT_MISMATCH] = "FCPIO_DATA_CNT_MISMATCH", | ||
67 | [FCPIO_FW_ERR] = "FCPIO_FW_ERR", | ||
68 | [FCPIO_ITMF_REJECTED] = "FCPIO_ITMF_REJECTED", | ||
69 | [FCPIO_ITMF_FAILED] = "FCPIO_ITMF_FAILED", | ||
70 | [FCPIO_ITMF_INCORRECT_LUN] = "FCPIO_ITMF_INCORRECT_LUN", | ||
71 | [FCPIO_CMND_REJECTED] = "FCPIO_CMND_REJECTED", | ||
72 | [FCPIO_NO_PATH_AVAIL] = "FCPIO_NO_PATH_AVAIL", | ||
73 | [FCPIO_PATH_FAILED] = "FCPIO_PATH_FAILED", | ||
74 | [FCPIO_LUNMAP_CHNG_PEND] = "FCPIO_LUNHMAP_CHNG_PEND", | ||
75 | }; | ||
76 | |||
77 | const char *fnic_state_to_str(unsigned int state) | ||
78 | { | ||
79 | if (state >= ARRAY_SIZE(fnic_state_str) || !fnic_state_str[state]) | ||
80 | return "unknown"; | ||
81 | |||
82 | return fnic_state_str[state]; | ||
83 | } | ||
84 | |||
85 | static const char *fnic_ioreq_state_to_str(unsigned int state) | ||
86 | { | ||
87 | if (state >= ARRAY_SIZE(fnic_ioreq_state_str) || | ||
88 | !fnic_ioreq_state_str[state]) | ||
89 | return "unknown"; | ||
90 | |||
91 | return fnic_ioreq_state_str[state]; | ||
92 | } | ||
93 | |||
94 | static const char *fnic_fcpio_status_to_str(unsigned int status) | ||
95 | { | ||
96 | if (status >= ARRAY_SIZE(fcpio_status_str) || !fcpio_status_str[status]) | ||
97 | return "unknown"; | ||
98 | |||
99 | return fcpio_status_str[status]; | ||
100 | } | ||
101 | |||
102 | static void fnic_cleanup_io(struct fnic *fnic, int exclude_id); | ||
103 | |||
104 | static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic, | ||
105 | struct scsi_cmnd *sc) | ||
106 | { | ||
107 | u32 hash = sc->request->tag & (FNIC_IO_LOCKS - 1); | ||
108 | |||
109 | return &fnic->io_req_lock[hash]; | ||
110 | } | ||
111 | |||
112 | /* | ||
113 | * Unmap the data buffer and sense buffer for an io_req, | ||
114 | * also unmap and free the device-private scatter/gather list. | ||
115 | */ | ||
116 | static void fnic_release_ioreq_buf(struct fnic *fnic, | ||
117 | struct fnic_io_req *io_req, | ||
118 | struct scsi_cmnd *sc) | ||
119 | { | ||
120 | if (io_req->sgl_list_pa) | ||
121 | pci_unmap_single(fnic->pdev, io_req->sgl_list_pa, | ||
122 | sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt, | ||
123 | PCI_DMA_TODEVICE); | ||
124 | scsi_dma_unmap(sc); | ||
125 | |||
126 | if (io_req->sgl_cnt) | ||
127 | mempool_free(io_req->sgl_list_alloc, | ||
128 | fnic->io_sgl_pool[io_req->sgl_type]); | ||
129 | if (io_req->sense_buf_pa) | ||
130 | pci_unmap_single(fnic->pdev, io_req->sense_buf_pa, | ||
131 | SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE); | ||
132 | } | ||
133 | |||
134 | /* Free up Copy Wq descriptors. Called with copy_wq lock held */ | ||
135 | static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq) | ||
136 | { | ||
137 | /* if no Ack received from firmware, then nothing to clean */ | ||
138 | if (!fnic->fw_ack_recd[0]) | ||
139 | return 1; | ||
140 | |||
141 | /* | ||
142 | * Update desc_available count based on number of freed descriptors | ||
143 | * Account for wraparound | ||
144 | */ | ||
145 | if (wq->to_clean_index <= fnic->fw_ack_index[0]) | ||
146 | wq->ring.desc_avail += (fnic->fw_ack_index[0] | ||
147 | - wq->to_clean_index + 1); | ||
148 | else | ||
149 | wq->ring.desc_avail += (wq->ring.desc_count | ||
150 | - wq->to_clean_index | ||
151 | + fnic->fw_ack_index[0] + 1); | ||
152 | |||
153 | /* | ||
154 | * just bump clean index to ack_index+1 accounting for wraparound | ||
155 | * this will essentially free up all descriptors between | ||
156 | * to_clean_index and fw_ack_index, both inclusive | ||
157 | */ | ||
158 | wq->to_clean_index = | ||
159 | (fnic->fw_ack_index[0] + 1) % wq->ring.desc_count; | ||
160 | |||
161 | /* we have processed the acks received so far */ | ||
162 | fnic->fw_ack_recd[0] = 0; | ||
163 | return 0; | ||
164 | } | ||
165 | |||
166 | |||
167 | /* | ||
168 | * fnic_fw_reset_handler | ||
169 | * Routine to send reset msg to fw | ||
170 | */ | ||
171 | int fnic_fw_reset_handler(struct fnic *fnic) | ||
172 | { | ||
173 | struct vnic_wq_copy *wq = &fnic->wq_copy[0]; | ||
174 | int ret = 0; | ||
175 | unsigned long flags; | ||
176 | |||
177 | spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); | ||
178 | |||
179 | if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) | ||
180 | free_wq_copy_descs(fnic, wq); | ||
181 | |||
182 | if (!vnic_wq_copy_desc_avail(wq)) | ||
183 | ret = -EAGAIN; | ||
184 | else | ||
185 | fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG); | ||
186 | |||
187 | spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); | ||
188 | |||
189 | if (!ret) | ||
190 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | ||
191 | "Issued fw reset\n"); | ||
192 | else | ||
193 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | ||
194 | "Failed to issue fw reset\n"); | ||
195 | return ret; | ||
196 | } | ||
197 | |||
198 | |||
199 | /* | ||
200 | * fnic_flogi_reg_handler | ||
201 | * Routine to send flogi register msg to fw | ||
202 | */ | ||
203 | int fnic_flogi_reg_handler(struct fnic *fnic) | ||
204 | { | ||
205 | struct vnic_wq_copy *wq = &fnic->wq_copy[0]; | ||
206 | u8 gw_mac[ETH_ALEN]; | ||
207 | int ret = 0; | ||
208 | unsigned long flags; | ||
209 | |||
210 | spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); | ||
211 | |||
212 | if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) | ||
213 | free_wq_copy_descs(fnic, wq); | ||
214 | |||
215 | if (!vnic_wq_copy_desc_avail(wq)) { | ||
216 | ret = -EAGAIN; | ||
217 | goto flogi_reg_ioreq_end; | ||
218 | } | ||
219 | |||
220 | if (fnic->fcoui_mode) | ||
221 | memset(gw_mac, 0xff, ETH_ALEN); | ||
222 | else | ||
223 | memcpy(gw_mac, fnic->dest_addr, ETH_ALEN); | ||
224 | |||
225 | fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG, | ||
226 | FCPIO_FLOGI_REG_GW_DEST, | ||
227 | fnic->s_id, | ||
228 | gw_mac); | ||
229 | |||
230 | flogi_reg_ioreq_end: | ||
231 | spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); | ||
232 | |||
233 | if (!ret) | ||
234 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | ||
235 | "flog reg issued\n"); | ||
236 | |||
237 | return ret; | ||
238 | } | ||
239 | |||
240 | /* | ||
241 | * fnic_queue_wq_copy_desc | ||
242 | * Routine to enqueue a wq copy desc | ||
243 | */ | ||
244 | static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, | ||
245 | struct vnic_wq_copy *wq, | ||
246 | struct fnic_io_req *io_req, | ||
247 | struct scsi_cmnd *sc, | ||
248 | u32 sg_count) | ||
249 | { | ||
250 | struct scatterlist *sg; | ||
251 | struct fc_rport *rport = starget_to_rport(scsi_target(sc->device)); | ||
252 | struct fc_rport_libfc_priv *rp = rport->dd_data; | ||
253 | struct host_sg_desc *desc; | ||
254 | u8 pri_tag = 0; | ||
255 | unsigned int i; | ||
256 | unsigned long intr_flags; | ||
257 | int flags; | ||
258 | u8 exch_flags; | ||
259 | struct scsi_lun fc_lun; | ||
260 | char msg[2]; | ||
261 | |||
262 | if (sg_count) { | ||
263 | BUG_ON(sg_count < 0); | ||
264 | BUG_ON(sg_count > FNIC_MAX_SG_DESC_CNT); | ||
265 | |||
266 | /* For each SGE, create a device desc entry */ | ||
267 | desc = io_req->sgl_list; | ||
268 | for_each_sg(scsi_sglist(sc), sg, sg_count, i) { | ||
269 | desc->addr = cpu_to_le64(sg_dma_address(sg)); | ||
270 | desc->len = cpu_to_le32(sg_dma_len(sg)); | ||
271 | desc->_resvd = 0; | ||
272 | desc++; | ||
273 | } | ||
274 | |||
275 | io_req->sgl_list_pa = pci_map_single | ||
276 | (fnic->pdev, | ||
277 | io_req->sgl_list, | ||
278 | sizeof(io_req->sgl_list[0]) * sg_count, | ||
279 | PCI_DMA_TODEVICE); | ||
280 | } | ||
281 | |||
282 | io_req->sense_buf_pa = pci_map_single(fnic->pdev, | ||
283 | sc->sense_buffer, | ||
284 | SCSI_SENSE_BUFFERSIZE, | ||
285 | PCI_DMA_FROMDEVICE); | ||
286 | |||
287 | int_to_scsilun(sc->device->lun, &fc_lun); | ||
288 | |||
289 | pri_tag = FCPIO_ICMND_PTA_SIMPLE; | ||
290 | msg[0] = MSG_SIMPLE_TAG; | ||
291 | scsi_populate_tag_msg(sc, msg); | ||
292 | if (msg[0] == MSG_ORDERED_TAG) | ||
293 | pri_tag = FCPIO_ICMND_PTA_ORDERED; | ||
294 | |||
295 | /* Enqueue the descriptor in the Copy WQ */ | ||
296 | spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags); | ||
297 | |||
298 | if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) | ||
299 | free_wq_copy_descs(fnic, wq); | ||
300 | |||
301 | if (unlikely(!vnic_wq_copy_desc_avail(wq))) { | ||
302 | spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); | ||
303 | return SCSI_MLQUEUE_HOST_BUSY; | ||
304 | } | ||
305 | |||
306 | flags = 0; | ||
307 | if (sc->sc_data_direction == DMA_FROM_DEVICE) | ||
308 | flags = FCPIO_ICMND_RDDATA; | ||
309 | else if (sc->sc_data_direction == DMA_TO_DEVICE) | ||
310 | flags = FCPIO_ICMND_WRDATA; | ||
311 | |||
312 | exch_flags = 0; | ||
313 | if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) && | ||
314 | (rp->flags & FC_RP_FLAGS_RETRY)) | ||
315 | exch_flags |= FCPIO_ICMND_SRFLAG_RETRY; | ||
316 | |||
317 | fnic_queue_wq_copy_desc_icmnd_16(wq, sc->request->tag, | ||
318 | 0, exch_flags, io_req->sgl_cnt, | ||
319 | SCSI_SENSE_BUFFERSIZE, | ||
320 | io_req->sgl_list_pa, | ||
321 | io_req->sense_buf_pa, | ||
322 | 0, /* scsi cmd ref, always 0 */ | ||
323 | pri_tag, /* scsi pri and tag */ | ||
324 | flags, /* command flags */ | ||
325 | sc->cmnd, scsi_bufflen(sc), | ||
326 | fc_lun.scsi_lun, io_req->port_id, | ||
327 | rport->maxframe_size, rp->r_a_tov, | ||
328 | rp->e_d_tov); | ||
329 | |||
330 | spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); | ||
331 | return 0; | ||
332 | } | ||
333 | |||
334 | /* | ||
335 | * fnic_queuecommand | ||
336 | * Routine to send a scsi cdb | ||
337 | * Called with host_lock held and interrupts disabled. | ||
338 | */ | ||
339 | int fnic_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) | ||
340 | { | ||
341 | struct fc_lport *lp; | ||
342 | struct fc_rport *rport; | ||
343 | struct fnic_io_req *io_req; | ||
344 | struct fnic *fnic; | ||
345 | struct vnic_wq_copy *wq; | ||
346 | int ret; | ||
347 | u32 sg_count; | ||
348 | unsigned long flags; | ||
349 | unsigned long ptr; | ||
350 | |||
351 | rport = starget_to_rport(scsi_target(sc->device)); | ||
352 | ret = fc_remote_port_chkready(rport); | ||
353 | if (ret) { | ||
354 | sc->result = ret; | ||
355 | done(sc); | ||
356 | return 0; | ||
357 | } | ||
358 | |||
359 | lp = shost_priv(sc->device->host); | ||
360 | if (lp->state != LPORT_ST_READY || !(lp->link_up)) | ||
361 | return SCSI_MLQUEUE_HOST_BUSY; | ||
362 | |||
363 | /* | ||
364 | * Release host lock, use driver resource specific locks from here. | ||
365 | * Don't re-enable interrupts in case they were disabled prior to the | ||
366 | * caller disabling them. | ||
367 | */ | ||
368 | spin_unlock(lp->host->host_lock); | ||
369 | |||
370 | /* Get a new io_req for this SCSI IO */ | ||
371 | fnic = lport_priv(lp); | ||
372 | |||
373 | io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC); | ||
374 | if (!io_req) { | ||
375 | ret = SCSI_MLQUEUE_HOST_BUSY; | ||
376 | goto out; | ||
377 | } | ||
378 | memset(io_req, 0, sizeof(*io_req)); | ||
379 | |||
380 | /* Map the data buffer */ | ||
381 | sg_count = scsi_dma_map(sc); | ||
382 | if (sg_count < 0) { | ||
383 | mempool_free(io_req, fnic->io_req_pool); | ||
384 | goto out; | ||
385 | } | ||
386 | |||
387 | /* Determine the type of scatter/gather list we need */ | ||
388 | io_req->sgl_cnt = sg_count; | ||
389 | io_req->sgl_type = FNIC_SGL_CACHE_DFLT; | ||
390 | if (sg_count > FNIC_DFLT_SG_DESC_CNT) | ||
391 | io_req->sgl_type = FNIC_SGL_CACHE_MAX; | ||
392 | |||
393 | if (sg_count) { | ||
394 | io_req->sgl_list = | ||
395 | mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type], | ||
396 | GFP_ATOMIC | GFP_DMA); | ||
397 | if (!io_req->sgl_list) { | ||
398 | ret = SCSI_MLQUEUE_HOST_BUSY; | ||
399 | scsi_dma_unmap(sc); | ||
400 | mempool_free(io_req, fnic->io_req_pool); | ||
401 | goto out; | ||
402 | } | ||
403 | |||
404 | /* Cache sgl list allocated address before alignment */ | ||
405 | io_req->sgl_list_alloc = io_req->sgl_list; | ||
406 | ptr = (unsigned long) io_req->sgl_list; | ||
407 | if (ptr % FNIC_SG_DESC_ALIGN) { | ||
408 | io_req->sgl_list = (struct host_sg_desc *) | ||
409 | (((unsigned long) ptr | ||
410 | + FNIC_SG_DESC_ALIGN - 1) | ||
411 | & ~(FNIC_SG_DESC_ALIGN - 1)); | ||
412 | } | ||
413 | } | ||
414 | |||
415 | /* initialize rest of io_req */ | ||
416 | io_req->port_id = rport->port_id; | ||
417 | CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING; | ||
418 | CMD_SP(sc) = (char *)io_req; | ||
419 | sc->scsi_done = done; | ||
420 | |||
421 | /* create copy wq desc and enqueue it */ | ||
422 | wq = &fnic->wq_copy[0]; | ||
423 | ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count); | ||
424 | if (ret) { | ||
425 | /* | ||
426 | * In case another thread cancelled the request, | ||
427 | * refetch the pointer under the lock. | ||
428 | */ | ||
429 | spinlock_t *io_lock = fnic_io_lock_hash(fnic, sc); | ||
430 | |||
431 | spin_lock_irqsave(io_lock, flags); | ||
432 | io_req = (struct fnic_io_req *)CMD_SP(sc); | ||
433 | CMD_SP(sc) = NULL; | ||
434 | CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE; | ||
435 | spin_unlock_irqrestore(io_lock, flags); | ||
436 | if (io_req) { | ||
437 | fnic_release_ioreq_buf(fnic, io_req, sc); | ||
438 | mempool_free(io_req, fnic->io_req_pool); | ||
439 | } | ||
440 | } | ||
441 | out: | ||
442 | /* acquire host lock before returning to SCSI */ | ||
443 | spin_lock(lp->host->host_lock); | ||
444 | return ret; | ||
445 | } | ||
446 | |||
447 | /* | ||
448 | * fnic_fcpio_fw_reset_cmpl_handler | ||
449 | * Routine to handle fw reset completion | ||
450 | */ | ||
451 | static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic, | ||
452 | struct fcpio_fw_req *desc) | ||
453 | { | ||
454 | u8 type; | ||
455 | u8 hdr_status; | ||
456 | struct fcpio_tag tag; | ||
457 | int ret = 0; | ||
458 | struct fc_frame *flogi; | ||
459 | unsigned long flags; | ||
460 | |||
461 | fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); | ||
462 | |||
463 | /* Clean up all outstanding io requests */ | ||
464 | fnic_cleanup_io(fnic, SCSI_NO_TAG); | ||
465 | |||
466 | spin_lock_irqsave(&fnic->fnic_lock, flags); | ||
467 | |||
468 | flogi = fnic->flogi; | ||
469 | fnic->flogi = NULL; | ||
470 | |||
471 | /* fnic should be in FC_TRANS_ETH_MODE */ | ||
472 | if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) { | ||
473 | /* Check status of reset completion */ | ||
474 | if (!hdr_status) { | ||
475 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | ||
476 | "reset cmpl success\n"); | ||
477 | /* Ready to send flogi out */ | ||
478 | fnic->state = FNIC_IN_ETH_MODE; | ||
479 | } else { | ||
480 | FNIC_SCSI_DBG(KERN_DEBUG, | ||
481 | fnic->lport->host, | ||
482 | "fnic fw_reset : failed %s\n", | ||
483 | fnic_fcpio_status_to_str(hdr_status)); | ||
484 | |||
485 | /* | ||
486 | * Unable to change to eth mode, cannot send out flogi | ||
487 | * Change state to fc mode, so that subsequent Flogi | ||
488 | * requests from libFC will cause more attempts to | ||
489 | * reset the firmware. Free the cached flogi | ||
490 | */ | ||
491 | fnic->state = FNIC_IN_FC_MODE; | ||
492 | ret = -1; | ||
493 | } | ||
494 | } else { | ||
495 | FNIC_SCSI_DBG(KERN_DEBUG, | ||
496 | fnic->lport->host, | ||
497 | "Unexpected state %s while processing" | ||
498 | " reset cmpl\n", fnic_state_to_str(fnic->state)); | ||
499 | ret = -1; | ||
500 | } | ||
501 | |||
502 | /* Thread removing device blocks till firmware reset is complete */ | ||
503 | if (fnic->remove_wait) | ||
504 | complete(fnic->remove_wait); | ||
505 | |||
506 | /* | ||
507 | * If fnic is being removed, or fw reset failed | ||
508 | * free the flogi frame. Else, send it out | ||
509 | */ | ||
510 | if (fnic->remove_wait || ret) { | ||
511 | fnic->flogi_oxid = FC_XID_UNKNOWN; | ||
512 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
513 | if (flogi) | ||
514 | dev_kfree_skb_irq(fp_skb(flogi)); | ||
515 | goto reset_cmpl_handler_end; | ||
516 | } | ||
517 | |||
518 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
519 | |||
520 | if (flogi) | ||
521 | ret = fnic_send_frame(fnic, flogi); | ||
522 | |||
523 | reset_cmpl_handler_end: | ||
524 | return ret; | ||
525 | } | ||
526 | |||
527 | /* | ||
528 | * fnic_fcpio_flogi_reg_cmpl_handler | ||
529 | * Routine to handle flogi register completion | ||
530 | */ | ||
531 | static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic, | ||
532 | struct fcpio_fw_req *desc) | ||
533 | { | ||
534 | u8 type; | ||
535 | u8 hdr_status; | ||
536 | struct fcpio_tag tag; | ||
537 | int ret = 0; | ||
538 | struct fc_frame *flogi_resp = NULL; | ||
539 | unsigned long flags; | ||
540 | struct sk_buff *skb; | ||
541 | |||
542 | fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); | ||
543 | |||
544 | /* Update fnic state based on status of flogi reg completion */ | ||
545 | spin_lock_irqsave(&fnic->fnic_lock, flags); | ||
546 | |||
547 | flogi_resp = fnic->flogi_resp; | ||
548 | fnic->flogi_resp = NULL; | ||
549 | |||
550 | if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) { | ||
551 | |||
552 | /* Check flogi registration completion status */ | ||
553 | if (!hdr_status) { | ||
554 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | ||
555 | "flog reg succeeded\n"); | ||
556 | fnic->state = FNIC_IN_FC_MODE; | ||
557 | } else { | ||
558 | FNIC_SCSI_DBG(KERN_DEBUG, | ||
559 | fnic->lport->host, | ||
560 | "fnic flogi reg :failed %s\n", | ||
561 | fnic_fcpio_status_to_str(hdr_status)); | ||
562 | fnic->state = FNIC_IN_ETH_MODE; | ||
563 | ret = -1; | ||
564 | } | ||
565 | } else { | ||
566 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | ||
567 | "Unexpected fnic state %s while" | ||
568 | " processing flogi reg completion\n", | ||
569 | fnic_state_to_str(fnic->state)); | ||
570 | ret = -1; | ||
571 | } | ||
572 | |||
573 | /* Successful flogi reg cmpl, pass frame to LibFC */ | ||
574 | if (!ret && flogi_resp) { | ||
575 | if (fnic->stop_rx_link_events) { | ||
576 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
577 | goto reg_cmpl_handler_end; | ||
578 | } | ||
579 | skb = (struct sk_buff *)flogi_resp; | ||
580 | /* Use fr_flags to indicate whether flogi resp or not */ | ||
581 | fr_flags(flogi_resp) = 1; | ||
582 | fr_dev(flogi_resp) = fnic->lport; | ||
583 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
584 | |||
585 | skb_queue_tail(&fnic->frame_queue, skb); | ||
586 | queue_work(fnic_event_queue, &fnic->frame_work); | ||
587 | |||
588 | } else { | ||
589 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
590 | if (flogi_resp) | ||
591 | dev_kfree_skb_irq(fp_skb(flogi_resp)); | ||
592 | } | ||
593 | |||
594 | reg_cmpl_handler_end: | ||
595 | return ret; | ||
596 | } | ||
597 | |||
598 | static inline int is_ack_index_in_range(struct vnic_wq_copy *wq, | ||
599 | u16 request_out) | ||
600 | { | ||
601 | if (wq->to_clean_index <= wq->to_use_index) { | ||
602 | /* out of range, stale request_out index */ | ||
603 | if (request_out < wq->to_clean_index || | ||
604 | request_out >= wq->to_use_index) | ||
605 | return 0; | ||
606 | } else { | ||
607 | /* out of range, stale request_out index */ | ||
608 | if (request_out < wq->to_clean_index && | ||
609 | request_out >= wq->to_use_index) | ||
610 | return 0; | ||
611 | } | ||
612 | /* request_out index is in range */ | ||
613 | return 1; | ||
614 | } | ||
615 | |||
616 | |||
617 | /* | ||
618 | * Mark that ack received and store the Ack index. If there are multiple | ||
619 | * acks received before Tx thread cleans it up, the latest value will be | ||
620 | * used which is correct behavior. This state should be in the copy Wq | ||
621 | * instead of in the fnic | ||
622 | */ | ||
623 | static inline void fnic_fcpio_ack_handler(struct fnic *fnic, | ||
624 | unsigned int cq_index, | ||
625 | struct fcpio_fw_req *desc) | ||
626 | { | ||
627 | struct vnic_wq_copy *wq; | ||
628 | u16 request_out = desc->u.ack.request_out; | ||
629 | unsigned long flags; | ||
630 | |||
631 | /* mark the ack state */ | ||
632 | wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count]; | ||
633 | spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); | ||
634 | |||
635 | if (is_ack_index_in_range(wq, request_out)) { | ||
636 | fnic->fw_ack_index[0] = request_out; | ||
637 | fnic->fw_ack_recd[0] = 1; | ||
638 | } | ||
639 | spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); | ||
640 | } | ||
641 | |||
642 | /* | ||
643 | * fnic_fcpio_icmnd_cmpl_handler | ||
644 | * Routine to handle icmnd completions | ||
645 | */ | ||
646 | static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, | ||
647 | struct fcpio_fw_req *desc) | ||
648 | { | ||
649 | u8 type; | ||
650 | u8 hdr_status; | ||
651 | struct fcpio_tag tag; | ||
652 | u32 id; | ||
653 | u64 xfer_len = 0; | ||
654 | struct fcpio_icmnd_cmpl *icmnd_cmpl; | ||
655 | struct fnic_io_req *io_req; | ||
656 | struct scsi_cmnd *sc; | ||
657 | unsigned long flags; | ||
658 | spinlock_t *io_lock; | ||
659 | |||
660 | /* Decode the cmpl description to get the io_req id */ | ||
661 | fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); | ||
662 | fcpio_tag_id_dec(&tag, &id); | ||
663 | |||
664 | if (id >= FNIC_MAX_IO_REQ) | ||
665 | return; | ||
666 | |||
667 | sc = scsi_host_find_tag(fnic->lport->host, id); | ||
668 | WARN_ON_ONCE(!sc); | ||
669 | if (!sc) | ||
670 | return; | ||
671 | |||
672 | io_lock = fnic_io_lock_hash(fnic, sc); | ||
673 | spin_lock_irqsave(io_lock, flags); | ||
674 | io_req = (struct fnic_io_req *)CMD_SP(sc); | ||
675 | WARN_ON_ONCE(!io_req); | ||
676 | if (!io_req) { | ||
677 | spin_unlock_irqrestore(io_lock, flags); | ||
678 | return; | ||
679 | } | ||
680 | |||
681 | /* firmware completed the io */ | ||
682 | io_req->io_completed = 1; | ||
683 | |||
684 | /* | ||
685 | * if SCSI-ML has already issued abort on this command, | ||
686 | * ignore completion of the IO. The abts path will clean it up | ||
687 | */ | ||
688 | if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { | ||
689 | spin_unlock_irqrestore(io_lock, flags); | ||
690 | return; | ||
691 | } | ||
692 | |||
693 | /* Mark the IO as complete */ | ||
694 | CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE; | ||
695 | |||
696 | icmnd_cmpl = &desc->u.icmnd_cmpl; | ||
697 | |||
698 | switch (hdr_status) { | ||
699 | case FCPIO_SUCCESS: | ||
700 | sc->result = (DID_OK << 16) | icmnd_cmpl->scsi_status; | ||
701 | xfer_len = scsi_bufflen(sc); | ||
702 | scsi_set_resid(sc, icmnd_cmpl->residual); | ||
703 | |||
704 | if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER) | ||
705 | xfer_len -= icmnd_cmpl->residual; | ||
706 | |||
707 | /* | ||
708 | * If queue_full, then try to reduce queue depth for all | ||
709 | * LUNS on the target. Todo: this should be accompanied | ||
710 | * by a periodic queue_depth rampup based on successful | ||
711 | * IO completion. | ||
712 | */ | ||
713 | if (icmnd_cmpl->scsi_status == QUEUE_FULL) { | ||
714 | struct scsi_device *t_sdev; | ||
715 | int qd = 0; | ||
716 | |||
717 | shost_for_each_device(t_sdev, sc->device->host) { | ||
718 | if (t_sdev->id != sc->device->id) | ||
719 | continue; | ||
720 | |||
721 | if (t_sdev->queue_depth > 1) { | ||
722 | qd = scsi_track_queue_full | ||
723 | (t_sdev, | ||
724 | t_sdev->queue_depth - 1); | ||
725 | if (qd == -1) | ||
726 | qd = t_sdev->host->cmd_per_lun; | ||
727 | shost_printk(KERN_INFO, | ||
728 | fnic->lport->host, | ||
729 | "scsi[%d:%d:%d:%d" | ||
730 | "] queue full detected," | ||
731 | "new depth = %d\n", | ||
732 | t_sdev->host->host_no, | ||
733 | t_sdev->channel, | ||
734 | t_sdev->id, t_sdev->lun, | ||
735 | t_sdev->queue_depth); | ||
736 | } | ||
737 | } | ||
738 | } | ||
739 | break; | ||
740 | |||
741 | case FCPIO_TIMEOUT: /* request was timed out */ | ||
742 | sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status; | ||
743 | break; | ||
744 | |||
745 | case FCPIO_ABORTED: /* request was aborted */ | ||
746 | sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; | ||
747 | break; | ||
748 | |||
749 | case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */ | ||
750 | scsi_set_resid(sc, icmnd_cmpl->residual); | ||
751 | sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; | ||
752 | break; | ||
753 | |||
754 | case FCPIO_OUT_OF_RESOURCE: /* out of resources to complete request */ | ||
755 | sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status; | ||
756 | break; | ||
757 | case FCPIO_INVALID_HEADER: /* header contains invalid data */ | ||
758 | case FCPIO_INVALID_PARAM: /* some parameter in request invalid */ | ||
759 | case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */ | ||
760 | case FCPIO_IO_NOT_FOUND: /* requested I/O was not found */ | ||
761 | case FCPIO_SGL_INVALID: /* request was aborted due to sgl error */ | ||
762 | case FCPIO_MSS_INVALID: /* request was aborted due to mss error */ | ||
763 | case FCPIO_FW_ERR: /* request was terminated due fw error */ | ||
764 | default: | ||
765 | shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n", | ||
766 | fnic_fcpio_status_to_str(hdr_status)); | ||
767 | sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; | ||
768 | break; | ||
769 | } | ||
770 | |||
771 | /* Break link with the SCSI command */ | ||
772 | CMD_SP(sc) = NULL; | ||
773 | |||
774 | spin_unlock_irqrestore(io_lock, flags); | ||
775 | |||
776 | fnic_release_ioreq_buf(fnic, io_req, sc); | ||
777 | |||
778 | mempool_free(io_req, fnic->io_req_pool); | ||
779 | |||
780 | if (sc->sc_data_direction == DMA_FROM_DEVICE) { | ||
781 | fnic->lport->host_stats.fcp_input_requests++; | ||
782 | fnic->fcp_input_bytes += xfer_len; | ||
783 | } else if (sc->sc_data_direction == DMA_TO_DEVICE) { | ||
784 | fnic->lport->host_stats.fcp_output_requests++; | ||
785 | fnic->fcp_output_bytes += xfer_len; | ||
786 | } else | ||
787 | fnic->lport->host_stats.fcp_control_requests++; | ||
788 | |||
789 | /* Call SCSI completion function to complete the IO */ | ||
790 | if (sc->scsi_done) | ||
791 | sc->scsi_done(sc); | ||
792 | |||
793 | } | ||
794 | |||
795 | /* fnic_fcpio_itmf_cmpl_handler | ||
796 | * Routine to handle itmf completions | ||
797 | */ | ||
798 | static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, | ||
799 | struct fcpio_fw_req *desc) | ||
800 | { | ||
801 | u8 type; | ||
802 | u8 hdr_status; | ||
803 | struct fcpio_tag tag; | ||
804 | u32 id; | ||
805 | struct scsi_cmnd *sc; | ||
806 | struct fnic_io_req *io_req; | ||
807 | unsigned long flags; | ||
808 | spinlock_t *io_lock; | ||
809 | |||
810 | fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); | ||
811 | fcpio_tag_id_dec(&tag, &id); | ||
812 | |||
813 | if ((id & FNIC_TAG_MASK) >= FNIC_MAX_IO_REQ) | ||
814 | return; | ||
815 | |||
816 | sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK); | ||
817 | WARN_ON_ONCE(!sc); | ||
818 | if (!sc) | ||
819 | return; | ||
820 | |||
821 | io_lock = fnic_io_lock_hash(fnic, sc); | ||
822 | spin_lock_irqsave(io_lock, flags); | ||
823 | io_req = (struct fnic_io_req *)CMD_SP(sc); | ||
824 | WARN_ON_ONCE(!io_req); | ||
825 | if (!io_req) { | ||
826 | spin_unlock_irqrestore(io_lock, flags); | ||
827 | return; | ||
828 | } | ||
829 | |||
830 | if (id & FNIC_TAG_ABORT) { | ||
831 | /* Completion of abort cmd */ | ||
832 | if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) { | ||
833 | /* This is a late completion. Ignore it */ | ||
834 | spin_unlock_irqrestore(io_lock, flags); | ||
835 | return; | ||
836 | } | ||
837 | CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; | ||
838 | CMD_ABTS_STATUS(sc) = hdr_status; | ||
839 | |||
840 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | ||
841 | "abts cmpl recd. id %d status %s\n", | ||
842 | (int)(id & FNIC_TAG_MASK), | ||
843 | fnic_fcpio_status_to_str(hdr_status)); | ||
844 | |||
845 | /* | ||
846 | * If scsi_eh thread is blocked waiting for abts to complete, | ||
847 | * signal completion to it. IO will be cleaned in the thread | ||
848 | * else clean it in this context | ||
849 | */ | ||
850 | if (io_req->abts_done) { | ||
851 | complete(io_req->abts_done); | ||
852 | spin_unlock_irqrestore(io_lock, flags); | ||
853 | } else { | ||
854 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | ||
855 | "abts cmpl, completing IO\n"); | ||
856 | CMD_SP(sc) = NULL; | ||
857 | sc->result = (DID_ERROR << 16); | ||
858 | |||
859 | spin_unlock_irqrestore(io_lock, flags); | ||
860 | |||
861 | fnic_release_ioreq_buf(fnic, io_req, sc); | ||
862 | mempool_free(io_req, fnic->io_req_pool); | ||
863 | if (sc->scsi_done) | ||
864 | sc->scsi_done(sc); | ||
865 | } | ||
866 | |||
867 | } else if (id & FNIC_TAG_DEV_RST) { | ||
868 | /* Completion of device reset */ | ||
869 | CMD_LR_STATUS(sc) = hdr_status; | ||
870 | CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE; | ||
871 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | ||
872 | "dev reset cmpl recd. id %d status %s\n", | ||
873 | (int)(id & FNIC_TAG_MASK), | ||
874 | fnic_fcpio_status_to_str(hdr_status)); | ||
875 | if (io_req->dr_done) | ||
876 | complete(io_req->dr_done); | ||
877 | spin_unlock_irqrestore(io_lock, flags); | ||
878 | |||
879 | } else { | ||
880 | shost_printk(KERN_ERR, fnic->lport->host, | ||
881 | "Unexpected itmf io state %s tag %x\n", | ||
882 | fnic_ioreq_state_to_str(CMD_STATE(sc)), id); | ||
883 | spin_unlock_irqrestore(io_lock, flags); | ||
884 | } | ||
885 | |||
886 | } | ||
887 | |||
888 | /* | ||
889 | * fnic_fcpio_cmpl_handler | ||
890 | * Routine to service the cq for wq_copy | ||
891 | */ | ||
892 | static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev, | ||
893 | unsigned int cq_index, | ||
894 | struct fcpio_fw_req *desc) | ||
895 | { | ||
896 | struct fnic *fnic = vnic_dev_priv(vdev); | ||
897 | int ret = 0; | ||
898 | |||
899 | switch (desc->hdr.type) { | ||
900 | case FCPIO_ACK: /* fw copied copy wq desc to its queue */ | ||
901 | fnic_fcpio_ack_handler(fnic, cq_index, desc); | ||
902 | break; | ||
903 | |||
904 | case FCPIO_ICMND_CMPL: /* fw completed a command */ | ||
905 | fnic_fcpio_icmnd_cmpl_handler(fnic, desc); | ||
906 | break; | ||
907 | |||
908 | case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/ | ||
909 | fnic_fcpio_itmf_cmpl_handler(fnic, desc); | ||
910 | break; | ||
911 | |||
912 | case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */ | ||
913 | ret = fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc); | ||
914 | break; | ||
915 | |||
916 | case FCPIO_RESET_CMPL: /* fw completed reset */ | ||
917 | ret = fnic_fcpio_fw_reset_cmpl_handler(fnic, desc); | ||
918 | break; | ||
919 | |||
920 | default: | ||
921 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | ||
922 | "firmware completion type %d\n", | ||
923 | desc->hdr.type); | ||
924 | break; | ||
925 | } | ||
926 | |||
927 | return ret; | ||
928 | } | ||
929 | |||
930 | /* | ||
931 | * fnic_wq_copy_cmpl_handler | ||
932 | * Routine to process wq copy | ||
933 | */ | ||
934 | int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do) | ||
935 | { | ||
936 | unsigned int wq_work_done = 0; | ||
937 | unsigned int i, cq_index; | ||
938 | unsigned int cur_work_done; | ||
939 | |||
940 | for (i = 0; i < fnic->wq_copy_count; i++) { | ||
941 | cq_index = i + fnic->raw_wq_count + fnic->rq_count; | ||
942 | cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index], | ||
943 | fnic_fcpio_cmpl_handler, | ||
944 | copy_work_to_do); | ||
945 | wq_work_done += cur_work_done; | ||
946 | } | ||
947 | return wq_work_done; | ||
948 | } | ||
949 | |||
950 | static void fnic_cleanup_io(struct fnic *fnic, int exclude_id) | ||
951 | { | ||
952 | unsigned int i; | ||
953 | struct fnic_io_req *io_req; | ||
954 | unsigned long flags = 0; | ||
955 | struct scsi_cmnd *sc; | ||
956 | spinlock_t *io_lock; | ||
957 | |||
958 | for (i = 0; i < FNIC_MAX_IO_REQ; i++) { | ||
959 | if (i == exclude_id) | ||
960 | continue; | ||
961 | |||
962 | sc = scsi_host_find_tag(fnic->lport->host, i); | ||
963 | if (!sc) | ||
964 | continue; | ||
965 | |||
966 | io_lock = fnic_io_lock_hash(fnic, sc); | ||
967 | spin_lock_irqsave(io_lock, flags); | ||
968 | io_req = (struct fnic_io_req *)CMD_SP(sc); | ||
969 | if (!io_req) { | ||
970 | spin_unlock_irqrestore(io_lock, flags); | ||
971 | goto cleanup_scsi_cmd; | ||
972 | } | ||
973 | |||
974 | CMD_SP(sc) = NULL; | ||
975 | |||
976 | spin_unlock_irqrestore(io_lock, flags); | ||
977 | |||
978 | /* | ||
979 | * If there is a scsi_cmnd associated with this io_req, then | ||
980 | * free the corresponding state | ||
981 | */ | ||
982 | fnic_release_ioreq_buf(fnic, io_req, sc); | ||
983 | mempool_free(io_req, fnic->io_req_pool); | ||
984 | |||
985 | cleanup_scsi_cmd: | ||
986 | sc->result = DID_TRANSPORT_DISRUPTED << 16; | ||
987 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_cleanup_io:" | ||
988 | " DID_TRANSPORT_DISRUPTED\n"); | ||
989 | |||
990 | /* Complete the command to SCSI */ | ||
991 | if (sc->scsi_done) | ||
992 | sc->scsi_done(sc); | ||
993 | } | ||
994 | } | ||
995 | |||
996 | void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq, | ||
997 | struct fcpio_host_req *desc) | ||
998 | { | ||
999 | u32 id; | ||
1000 | struct fnic *fnic = vnic_dev_priv(wq->vdev); | ||
1001 | struct fnic_io_req *io_req; | ||
1002 | struct scsi_cmnd *sc; | ||
1003 | unsigned long flags; | ||
1004 | spinlock_t *io_lock; | ||
1005 | |||
1006 | /* get the tag reference */ | ||
1007 | fcpio_tag_id_dec(&desc->hdr.tag, &id); | ||
1008 | id &= FNIC_TAG_MASK; | ||
1009 | |||
1010 | if (id >= FNIC_MAX_IO_REQ) | ||
1011 | return; | ||
1012 | |||
1013 | sc = scsi_host_find_tag(fnic->lport->host, id); | ||
1014 | if (!sc) | ||
1015 | return; | ||
1016 | |||
1017 | io_lock = fnic_io_lock_hash(fnic, sc); | ||
1018 | spin_lock_irqsave(io_lock, flags); | ||
1019 | |||
1020 | /* Get the IO context which this desc refers to */ | ||
1021 | io_req = (struct fnic_io_req *)CMD_SP(sc); | ||
1022 | |||
1023 | /* fnic interrupts are turned off by now */ | ||
1024 | |||
1025 | if (!io_req) { | ||
1026 | spin_unlock_irqrestore(io_lock, flags); | ||
1027 | goto wq_copy_cleanup_scsi_cmd; | ||
1028 | } | ||
1029 | |||
1030 | CMD_SP(sc) = NULL; | ||
1031 | |||
1032 | spin_unlock_irqrestore(io_lock, flags); | ||
1033 | |||
1034 | fnic_release_ioreq_buf(fnic, io_req, sc); | ||
1035 | mempool_free(io_req, fnic->io_req_pool); | ||
1036 | |||
1037 | wq_copy_cleanup_scsi_cmd: | ||
1038 | sc->result = DID_NO_CONNECT << 16; | ||
1039 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:" | ||
1040 | " DID_NO_CONNECT\n"); | ||
1041 | |||
1042 | if (sc->scsi_done) | ||
1043 | sc->scsi_done(sc); | ||
1044 | } | ||
1045 | |||
1046 | static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag, | ||
1047 | u32 task_req, u8 *fc_lun, | ||
1048 | struct fnic_io_req *io_req) | ||
1049 | { | ||
1050 | struct vnic_wq_copy *wq = &fnic->wq_copy[0]; | ||
1051 | unsigned long flags; | ||
1052 | |||
1053 | spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); | ||
1054 | |||
1055 | if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) | ||
1056 | free_wq_copy_descs(fnic, wq); | ||
1057 | |||
1058 | if (!vnic_wq_copy_desc_avail(wq)) { | ||
1059 | spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); | ||
1060 | return 1; | ||
1061 | } | ||
1062 | fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT, | ||
1063 | 0, task_req, tag, fc_lun, io_req->port_id, | ||
1064 | fnic->config.ra_tov, fnic->config.ed_tov); | ||
1065 | |||
1066 | spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); | ||
1067 | return 0; | ||
1068 | } | ||
1069 | |||
1070 | void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id) | ||
1071 | { | ||
1072 | int tag; | ||
1073 | struct fnic_io_req *io_req; | ||
1074 | spinlock_t *io_lock; | ||
1075 | unsigned long flags; | ||
1076 | struct scsi_cmnd *sc; | ||
1077 | struct scsi_lun fc_lun; | ||
1078 | enum fnic_ioreq_state old_ioreq_state; | ||
1079 | |||
1080 | FNIC_SCSI_DBG(KERN_DEBUG, | ||
1081 | fnic->lport->host, | ||
1082 | "fnic_rport_reset_exch called portid 0x%06x\n", | ||
1083 | port_id); | ||
1084 | |||
1085 | if (fnic->in_remove) | ||
1086 | return; | ||
1087 | |||
1088 | for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { | ||
1089 | sc = scsi_host_find_tag(fnic->lport->host, tag); | ||
1090 | if (!sc) | ||
1091 | continue; | ||
1092 | |||
1093 | io_lock = fnic_io_lock_hash(fnic, sc); | ||
1094 | spin_lock_irqsave(io_lock, flags); | ||
1095 | |||
1096 | io_req = (struct fnic_io_req *)CMD_SP(sc); | ||
1097 | |||
1098 | if (!io_req || io_req->port_id != port_id) { | ||
1099 | spin_unlock_irqrestore(io_lock, flags); | ||
1100 | continue; | ||
1101 | } | ||
1102 | |||
1103 | /* | ||
1104 | * Found IO that is still pending with firmware and | ||
1105 | * belongs to rport that went away | ||
1106 | */ | ||
1107 | if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { | ||
1108 | spin_unlock_irqrestore(io_lock, flags); | ||
1109 | continue; | ||
1110 | } | ||
1111 | old_ioreq_state = CMD_STATE(sc); | ||
1112 | CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; | ||
1113 | CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; | ||
1114 | |||
1115 | BUG_ON(io_req->abts_done); | ||
1116 | |||
1117 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | ||
1118 | "fnic_rport_reset_exch: Issuing abts\n"); | ||
1119 | |||
1120 | spin_unlock_irqrestore(io_lock, flags); | ||
1121 | |||
1122 | /* Now queue the abort command to firmware */ | ||
1123 | int_to_scsilun(sc->device->lun, &fc_lun); | ||
1124 | |||
1125 | if (fnic_queue_abort_io_req(fnic, tag, | ||
1126 | FCPIO_ITMF_ABT_TASK_TERM, | ||
1127 | fc_lun.scsi_lun, io_req)) { | ||
1128 | /* | ||
1129 | * Revert the cmd state back to old state, if | ||
1130 | * it hasnt changed in between. This cmd will get | ||
1131 | * aborted later by scsi_eh, or cleaned up during | ||
1132 | * lun reset | ||
1133 | */ | ||
1134 | io_lock = fnic_io_lock_hash(fnic, sc); | ||
1135 | |||
1136 | spin_lock_irqsave(io_lock, flags); | ||
1137 | if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) | ||
1138 | CMD_STATE(sc) = old_ioreq_state; | ||
1139 | spin_unlock_irqrestore(io_lock, flags); | ||
1140 | } | ||
1141 | } | ||
1142 | |||
1143 | } | ||
1144 | |||
1145 | void fnic_terminate_rport_io(struct fc_rport *rport) | ||
1146 | { | ||
1147 | int tag; | ||
1148 | struct fnic_io_req *io_req; | ||
1149 | spinlock_t *io_lock; | ||
1150 | unsigned long flags; | ||
1151 | struct scsi_cmnd *sc; | ||
1152 | struct scsi_lun fc_lun; | ||
1153 | struct fc_rport_libfc_priv *rdata = rport->dd_data; | ||
1154 | struct fc_lport *lport = rdata->local_port; | ||
1155 | struct fnic *fnic = lport_priv(lport); | ||
1156 | struct fc_rport *cmd_rport; | ||
1157 | enum fnic_ioreq_state old_ioreq_state; | ||
1158 | |||
1159 | FNIC_SCSI_DBG(KERN_DEBUG, | ||
1160 | fnic->lport->host, "fnic_terminate_rport_io called" | ||
1161 | " wwpn 0x%llx, wwnn0x%llx, portid 0x%06x\n", | ||
1162 | rport->port_name, rport->node_name, | ||
1163 | rport->port_id); | ||
1164 | |||
1165 | if (fnic->in_remove) | ||
1166 | return; | ||
1167 | |||
1168 | for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { | ||
1169 | sc = scsi_host_find_tag(fnic->lport->host, tag); | ||
1170 | if (!sc) | ||
1171 | continue; | ||
1172 | |||
1173 | cmd_rport = starget_to_rport(scsi_target(sc->device)); | ||
1174 | if (rport != cmd_rport) | ||
1175 | continue; | ||
1176 | |||
1177 | io_lock = fnic_io_lock_hash(fnic, sc); | ||
1178 | spin_lock_irqsave(io_lock, flags); | ||
1179 | |||
1180 | io_req = (struct fnic_io_req *)CMD_SP(sc); | ||
1181 | |||
1182 | if (!io_req || rport != cmd_rport) { | ||
1183 | spin_unlock_irqrestore(io_lock, flags); | ||
1184 | continue; | ||
1185 | } | ||
1186 | |||
1187 | /* | ||
1188 | * Found IO that is still pending with firmware and | ||
1189 | * belongs to rport that went away | ||
1190 | */ | ||
1191 | if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { | ||
1192 | spin_unlock_irqrestore(io_lock, flags); | ||
1193 | continue; | ||
1194 | } | ||
1195 | old_ioreq_state = CMD_STATE(sc); | ||
1196 | CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; | ||
1197 | CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; | ||
1198 | |||
1199 | BUG_ON(io_req->abts_done); | ||
1200 | |||
1201 | FNIC_SCSI_DBG(KERN_DEBUG, | ||
1202 | fnic->lport->host, | ||
1203 | "fnic_terminate_rport_io: Issuing abts\n"); | ||
1204 | |||
1205 | spin_unlock_irqrestore(io_lock, flags); | ||
1206 | |||
1207 | /* Now queue the abort command to firmware */ | ||
1208 | int_to_scsilun(sc->device->lun, &fc_lun); | ||
1209 | |||
1210 | if (fnic_queue_abort_io_req(fnic, tag, | ||
1211 | FCPIO_ITMF_ABT_TASK_TERM, | ||
1212 | fc_lun.scsi_lun, io_req)) { | ||
1213 | /* | ||
1214 | * Revert the cmd state back to old state, if | ||
1215 | * it hasnt changed in between. This cmd will get | ||
1216 | * aborted later by scsi_eh, or cleaned up during | ||
1217 | * lun reset | ||
1218 | */ | ||
1219 | io_lock = fnic_io_lock_hash(fnic, sc); | ||
1220 | |||
1221 | spin_lock_irqsave(io_lock, flags); | ||
1222 | if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) | ||
1223 | CMD_STATE(sc) = old_ioreq_state; | ||
1224 | spin_unlock_irqrestore(io_lock, flags); | ||
1225 | } | ||
1226 | } | ||
1227 | |||
1228 | } | ||
1229 | |||
1230 | static void fnic_block_error_handler(struct scsi_cmnd *sc) | ||
1231 | { | ||
1232 | struct Scsi_Host *shost = sc->device->host; | ||
1233 | struct fc_rport *rport = starget_to_rport(scsi_target(sc->device)); | ||
1234 | unsigned long flags; | ||
1235 | |||
1236 | spin_lock_irqsave(shost->host_lock, flags); | ||
1237 | while (rport->port_state == FC_PORTSTATE_BLOCKED) { | ||
1238 | spin_unlock_irqrestore(shost->host_lock, flags); | ||
1239 | msleep(1000); | ||
1240 | spin_lock_irqsave(shost->host_lock, flags); | ||
1241 | } | ||
1242 | spin_unlock_irqrestore(shost->host_lock, flags); | ||
1243 | |||
1244 | } | ||
1245 | |||
1246 | /* | ||
1247 | * This function is exported to SCSI for sending abort cmnds. | ||
1248 | * A SCSI IO is represented by a io_req in the driver. | ||
1249 | * The ioreq is linked to the SCSI Cmd, thus a link with the ULP's IO. | ||
1250 | */ | ||
1251 | int fnic_abort_cmd(struct scsi_cmnd *sc) | ||
1252 | { | ||
1253 | struct fc_lport *lp; | ||
1254 | struct fnic *fnic; | ||
1255 | struct fnic_io_req *io_req; | ||
1256 | struct fc_rport *rport; | ||
1257 | spinlock_t *io_lock; | ||
1258 | unsigned long flags; | ||
1259 | int ret = SUCCESS; | ||
1260 | u32 task_req; | ||
1261 | struct scsi_lun fc_lun; | ||
1262 | DECLARE_COMPLETION_ONSTACK(tm_done); | ||
1263 | |||
1264 | /* Wait for rport to unblock */ | ||
1265 | fnic_block_error_handler(sc); | ||
1266 | |||
1267 | /* Get local-port, check ready and link up */ | ||
1268 | lp = shost_priv(sc->device->host); | ||
1269 | |||
1270 | fnic = lport_priv(lp); | ||
1271 | FNIC_SCSI_DBG(KERN_DEBUG, | ||
1272 | fnic->lport->host, | ||
1273 | "Abort Cmd called FCID 0x%x, LUN 0x%x TAG %d\n", | ||
1274 | (starget_to_rport(scsi_target(sc->device)))->port_id, | ||
1275 | sc->device->lun, sc->request->tag); | ||
1276 | |||
1277 | if (lp->state != LPORT_ST_READY || !(lp->link_up)) { | ||
1278 | ret = FAILED; | ||
1279 | goto fnic_abort_cmd_end; | ||
1280 | } | ||
1281 | |||
1282 | /* | ||
1283 | * Avoid a race between SCSI issuing the abort and the device | ||
1284 | * completing the command. | ||
1285 | * | ||
1286 | * If the command is already completed by the fw cmpl code, | ||
1287 | * we just return SUCCESS from here. This means that the abort | ||
1288 | * succeeded. In the SCSI ML, since the timeout for command has | ||
1289 | * happened, the completion wont actually complete the command | ||
1290 | * and it will be considered as an aborted command | ||
1291 | * | ||
1292 | * The CMD_SP will not be cleared except while holding io_req_lock. | ||
1293 | */ | ||
1294 | io_lock = fnic_io_lock_hash(fnic, sc); | ||
1295 | spin_lock_irqsave(io_lock, flags); | ||
1296 | io_req = (struct fnic_io_req *)CMD_SP(sc); | ||
1297 | if (!io_req) { | ||
1298 | spin_unlock_irqrestore(io_lock, flags); | ||
1299 | goto fnic_abort_cmd_end; | ||
1300 | } | ||
1301 | |||
1302 | io_req->abts_done = &tm_done; | ||
1303 | |||
1304 | if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { | ||
1305 | spin_unlock_irqrestore(io_lock, flags); | ||
1306 | goto wait_pending; | ||
1307 | } | ||
1308 | /* | ||
1309 | * Command is still pending, need to abort it | ||
1310 | * If the firmware completes the command after this point, | ||
1311 | * the completion wont be done till mid-layer, since abort | ||
1312 | * has already started. | ||
1313 | */ | ||
1314 | CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; | ||
1315 | CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; | ||
1316 | |||
1317 | spin_unlock_irqrestore(io_lock, flags); | ||
1318 | |||
1319 | /* | ||
1320 | * Check readiness of the remote port. If the path to remote | ||
1321 | * port is up, then send abts to the remote port to terminate | ||
1322 | * the IO. Else, just locally terminate the IO in the firmware | ||
1323 | */ | ||
1324 | rport = starget_to_rport(scsi_target(sc->device)); | ||
1325 | if (fc_remote_port_chkready(rport) == 0) | ||
1326 | task_req = FCPIO_ITMF_ABT_TASK; | ||
1327 | else | ||
1328 | task_req = FCPIO_ITMF_ABT_TASK_TERM; | ||
1329 | |||
1330 | /* Now queue the abort command to firmware */ | ||
1331 | int_to_scsilun(sc->device->lun, &fc_lun); | ||
1332 | |||
1333 | if (fnic_queue_abort_io_req(fnic, sc->request->tag, task_req, | ||
1334 | fc_lun.scsi_lun, io_req)) { | ||
1335 | spin_lock_irqsave(io_lock, flags); | ||
1336 | io_req = (struct fnic_io_req *)CMD_SP(sc); | ||
1337 | if (io_req) | ||
1338 | io_req->abts_done = NULL; | ||
1339 | spin_unlock_irqrestore(io_lock, flags); | ||
1340 | ret = FAILED; | ||
1341 | goto fnic_abort_cmd_end; | ||
1342 | } | ||
1343 | |||
1344 | /* | ||
1345 | * We queued an abort IO, wait for its completion. | ||
1346 | * Once the firmware completes the abort command, it will | ||
1347 | * wake up this thread. | ||
1348 | */ | ||
1349 | wait_pending: | ||
1350 | wait_for_completion_timeout(&tm_done, | ||
1351 | msecs_to_jiffies | ||
1352 | (2 * fnic->config.ra_tov + | ||
1353 | fnic->config.ed_tov)); | ||
1354 | |||
1355 | /* Check the abort status */ | ||
1356 | spin_lock_irqsave(io_lock, flags); | ||
1357 | |||
1358 | io_req = (struct fnic_io_req *)CMD_SP(sc); | ||
1359 | if (!io_req) { | ||
1360 | spin_unlock_irqrestore(io_lock, flags); | ||
1361 | ret = FAILED; | ||
1362 | goto fnic_abort_cmd_end; | ||
1363 | } | ||
1364 | io_req->abts_done = NULL; | ||
1365 | |||
1366 | /* fw did not complete abort, timed out */ | ||
1367 | if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { | ||
1368 | spin_unlock_irqrestore(io_lock, flags); | ||
1369 | ret = FAILED; | ||
1370 | goto fnic_abort_cmd_end; | ||
1371 | } | ||
1372 | |||
1373 | /* | ||
1374 | * firmware completed the abort, check the status, | ||
1375 | * free the io_req irrespective of failure or success | ||
1376 | */ | ||
1377 | if (CMD_ABTS_STATUS(sc) != FCPIO_SUCCESS) | ||
1378 | ret = FAILED; | ||
1379 | |||
1380 | CMD_SP(sc) = NULL; | ||
1381 | |||
1382 | spin_unlock_irqrestore(io_lock, flags); | ||
1383 | |||
1384 | fnic_release_ioreq_buf(fnic, io_req, sc); | ||
1385 | mempool_free(io_req, fnic->io_req_pool); | ||
1386 | |||
1387 | fnic_abort_cmd_end: | ||
1388 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | ||
1389 | "Returning from abort cmd %s\n", | ||
1390 | (ret == SUCCESS) ? | ||
1391 | "SUCCESS" : "FAILED"); | ||
1392 | return ret; | ||
1393 | } | ||
1394 | |||
1395 | static inline int fnic_queue_dr_io_req(struct fnic *fnic, | ||
1396 | struct scsi_cmnd *sc, | ||
1397 | struct fnic_io_req *io_req) | ||
1398 | { | ||
1399 | struct vnic_wq_copy *wq = &fnic->wq_copy[0]; | ||
1400 | struct scsi_lun fc_lun; | ||
1401 | int ret = 0; | ||
1402 | unsigned long intr_flags; | ||
1403 | |||
1404 | spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags); | ||
1405 | |||
1406 | if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) | ||
1407 | free_wq_copy_descs(fnic, wq); | ||
1408 | |||
1409 | if (!vnic_wq_copy_desc_avail(wq)) { | ||
1410 | ret = -EAGAIN; | ||
1411 | goto lr_io_req_end; | ||
1412 | } | ||
1413 | |||
1414 | /* fill in the lun info */ | ||
1415 | int_to_scsilun(sc->device->lun, &fc_lun); | ||
1416 | |||
1417 | fnic_queue_wq_copy_desc_itmf(wq, sc->request->tag | FNIC_TAG_DEV_RST, | ||
1418 | 0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG, | ||
1419 | fc_lun.scsi_lun, io_req->port_id, | ||
1420 | fnic->config.ra_tov, fnic->config.ed_tov); | ||
1421 | |||
1422 | lr_io_req_end: | ||
1423 | spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); | ||
1424 | |||
1425 | return ret; | ||
1426 | } | ||
1427 | |||
1428 | /* | ||
1429 | * Clean up any pending aborts on the lun | ||
1430 | * For each outstanding IO on this lun, whose abort is not completed by fw, | ||
1431 | * issue a local abort. Wait for abort to complete. Return 0 if all commands | ||
1432 | * successfully aborted, 1 otherwise | ||
1433 | */ | ||
1434 | static int fnic_clean_pending_aborts(struct fnic *fnic, | ||
1435 | struct scsi_cmnd *lr_sc) | ||
1436 | { | ||
1437 | int tag; | ||
1438 | struct fnic_io_req *io_req; | ||
1439 | spinlock_t *io_lock; | ||
1440 | unsigned long flags; | ||
1441 | int ret = 0; | ||
1442 | struct scsi_cmnd *sc; | ||
1443 | struct fc_rport *rport; | ||
1444 | struct scsi_lun fc_lun; | ||
1445 | struct scsi_device *lun_dev = lr_sc->device; | ||
1446 | DECLARE_COMPLETION_ONSTACK(tm_done); | ||
1447 | |||
1448 | for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { | ||
1449 | sc = scsi_host_find_tag(fnic->lport->host, tag); | ||
1450 | /* | ||
1451 | * ignore this lun reset cmd or cmds that do not belong to | ||
1452 | * this lun | ||
1453 | */ | ||
1454 | if (!sc || sc == lr_sc || sc->device != lun_dev) | ||
1455 | continue; | ||
1456 | |||
1457 | io_lock = fnic_io_lock_hash(fnic, sc); | ||
1458 | spin_lock_irqsave(io_lock, flags); | ||
1459 | |||
1460 | io_req = (struct fnic_io_req *)CMD_SP(sc); | ||
1461 | |||
1462 | if (!io_req || sc->device != lun_dev) { | ||
1463 | spin_unlock_irqrestore(io_lock, flags); | ||
1464 | continue; | ||
1465 | } | ||
1466 | |||
1467 | /* | ||
1468 | * Found IO that is still pending with firmware and | ||
1469 | * belongs to the LUN that we are resetting | ||
1470 | */ | ||
1471 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | ||
1472 | "Found IO in %s on lun\n", | ||
1473 | fnic_ioreq_state_to_str(CMD_STATE(sc))); | ||
1474 | |||
1475 | BUG_ON(CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING); | ||
1476 | |||
1477 | CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; | ||
1478 | io_req->abts_done = &tm_done; | ||
1479 | spin_unlock_irqrestore(io_lock, flags); | ||
1480 | |||
1481 | /* Now queue the abort command to firmware */ | ||
1482 | int_to_scsilun(sc->device->lun, &fc_lun); | ||
1483 | rport = starget_to_rport(scsi_target(sc->device)); | ||
1484 | |||
1485 | if (fnic_queue_abort_io_req(fnic, tag, | ||
1486 | FCPIO_ITMF_ABT_TASK_TERM, | ||
1487 | fc_lun.scsi_lun, io_req)) { | ||
1488 | spin_lock_irqsave(io_lock, flags); | ||
1489 | io_req = (struct fnic_io_req *)CMD_SP(sc); | ||
1490 | if (io_req) | ||
1491 | io_req->abts_done = NULL; | ||
1492 | spin_unlock_irqrestore(io_lock, flags); | ||
1493 | ret = 1; | ||
1494 | goto clean_pending_aborts_end; | ||
1495 | } | ||
1496 | |||
1497 | wait_for_completion_timeout(&tm_done, | ||
1498 | msecs_to_jiffies | ||
1499 | (fnic->config.ed_tov)); | ||
1500 | |||
1501 | /* Recheck cmd state to check if it is now aborted */ | ||
1502 | spin_lock_irqsave(io_lock, flags); | ||
1503 | io_req = (struct fnic_io_req *)CMD_SP(sc); | ||
1504 | if (!io_req) { | ||
1505 | spin_unlock_irqrestore(io_lock, flags); | ||
1506 | ret = 1; | ||
1507 | goto clean_pending_aborts_end; | ||
1508 | } | ||
1509 | |||
1510 | io_req->abts_done = NULL; | ||
1511 | |||
1512 | /* if abort is still pending with fw, fail */ | ||
1513 | if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { | ||
1514 | spin_unlock_irqrestore(io_lock, flags); | ||
1515 | ret = 1; | ||
1516 | goto clean_pending_aborts_end; | ||
1517 | } | ||
1518 | CMD_SP(sc) = NULL; | ||
1519 | spin_unlock_irqrestore(io_lock, flags); | ||
1520 | |||
1521 | fnic_release_ioreq_buf(fnic, io_req, sc); | ||
1522 | mempool_free(io_req, fnic->io_req_pool); | ||
1523 | } | ||
1524 | |||
1525 | clean_pending_aborts_end: | ||
1526 | return ret; | ||
1527 | } | ||
1528 | |||
1529 | /* | ||
1530 | * SCSI Eh thread issues a Lun Reset when one or more commands on a LUN | ||
1531 | * fail to get aborted. It calls driver's eh_device_reset with a SCSI command | ||
1532 | * on the LUN. | ||
1533 | */ | ||
1534 | int fnic_device_reset(struct scsi_cmnd *sc) | ||
1535 | { | ||
1536 | struct fc_lport *lp; | ||
1537 | struct fnic *fnic; | ||
1538 | struct fnic_io_req *io_req; | ||
1539 | struct fc_rport *rport; | ||
1540 | int status; | ||
1541 | int ret = FAILED; | ||
1542 | spinlock_t *io_lock; | ||
1543 | unsigned long flags; | ||
1544 | DECLARE_COMPLETION_ONSTACK(tm_done); | ||
1545 | |||
1546 | /* Wait for rport to unblock */ | ||
1547 | fnic_block_error_handler(sc); | ||
1548 | |||
1549 | /* Get local-port, check ready and link up */ | ||
1550 | lp = shost_priv(sc->device->host); | ||
1551 | |||
1552 | fnic = lport_priv(lp); | ||
1553 | FNIC_SCSI_DBG(KERN_DEBUG, | ||
1554 | fnic->lport->host, | ||
1555 | "Device reset called FCID 0x%x, LUN 0x%x\n", | ||
1556 | (starget_to_rport(scsi_target(sc->device)))->port_id, | ||
1557 | sc->device->lun); | ||
1558 | |||
1559 | |||
1560 | if (lp->state != LPORT_ST_READY || !(lp->link_up)) | ||
1561 | goto fnic_device_reset_end; | ||
1562 | |||
1563 | /* Check if remote port up */ | ||
1564 | rport = starget_to_rport(scsi_target(sc->device)); | ||
1565 | if (fc_remote_port_chkready(rport)) | ||
1566 | goto fnic_device_reset_end; | ||
1567 | |||
1568 | io_lock = fnic_io_lock_hash(fnic, sc); | ||
1569 | spin_lock_irqsave(io_lock, flags); | ||
1570 | io_req = (struct fnic_io_req *)CMD_SP(sc); | ||
1571 | |||
1572 | /* | ||
1573 | * If there is a io_req attached to this command, then use it, | ||
1574 | * else allocate a new one. | ||
1575 | */ | ||
1576 | if (!io_req) { | ||
1577 | io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC); | ||
1578 | if (!io_req) { | ||
1579 | spin_unlock_irqrestore(io_lock, flags); | ||
1580 | goto fnic_device_reset_end; | ||
1581 | } | ||
1582 | memset(io_req, 0, sizeof(*io_req)); | ||
1583 | io_req->port_id = rport->port_id; | ||
1584 | CMD_SP(sc) = (char *)io_req; | ||
1585 | } | ||
1586 | io_req->dr_done = &tm_done; | ||
1587 | CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING; | ||
1588 | CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE; | ||
1589 | spin_unlock_irqrestore(io_lock, flags); | ||
1590 | |||
1591 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %d\n", | ||
1592 | sc->request->tag); | ||
1593 | |||
1594 | /* | ||
1595 | * issue the device reset, if enqueue failed, clean up the ioreq | ||
1596 | * and break assoc with scsi cmd | ||
1597 | */ | ||
1598 | if (fnic_queue_dr_io_req(fnic, sc, io_req)) { | ||
1599 | spin_lock_irqsave(io_lock, flags); | ||
1600 | io_req = (struct fnic_io_req *)CMD_SP(sc); | ||
1601 | if (io_req) | ||
1602 | io_req->dr_done = NULL; | ||
1603 | goto fnic_device_reset_clean; | ||
1604 | } | ||
1605 | |||
1606 | /* | ||
1607 | * Wait on the local completion for LUN reset. The io_req may be | ||
1608 | * freed while we wait since we hold no lock. | ||
1609 | */ | ||
1610 | wait_for_completion_timeout(&tm_done, | ||
1611 | msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT)); | ||
1612 | |||
1613 | spin_lock_irqsave(io_lock, flags); | ||
1614 | io_req = (struct fnic_io_req *)CMD_SP(sc); | ||
1615 | if (!io_req) { | ||
1616 | spin_unlock_irqrestore(io_lock, flags); | ||
1617 | goto fnic_device_reset_end; | ||
1618 | } | ||
1619 | io_req->dr_done = NULL; | ||
1620 | |||
1621 | status = CMD_LR_STATUS(sc); | ||
1622 | spin_unlock_irqrestore(io_lock, flags); | ||
1623 | |||
1624 | /* | ||
1625 | * If lun reset not completed, bail out with failed. io_req | ||
1626 | * gets cleaned up during higher levels of EH | ||
1627 | */ | ||
1628 | if (status == FCPIO_INVALID_CODE) { | ||
1629 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | ||
1630 | "Device reset timed out\n"); | ||
1631 | goto fnic_device_reset_end; | ||
1632 | } | ||
1633 | |||
1634 | /* Completed, but not successful, clean up the io_req, return fail */ | ||
1635 | if (status != FCPIO_SUCCESS) { | ||
1636 | spin_lock_irqsave(io_lock, flags); | ||
1637 | FNIC_SCSI_DBG(KERN_DEBUG, | ||
1638 | fnic->lport->host, | ||
1639 | "Device reset completed - failed\n"); | ||
1640 | io_req = (struct fnic_io_req *)CMD_SP(sc); | ||
1641 | goto fnic_device_reset_clean; | ||
1642 | } | ||
1643 | |||
1644 | /* | ||
1645 | * Clean up any aborts on this lun that have still not | ||
1646 | * completed. If any of these fail, then LUN reset fails. | ||
1647 | * clean_pending_aborts cleans all cmds on this lun except | ||
1648 | * the lun reset cmd. If all cmds get cleaned, the lun reset | ||
1649 | * succeeds | ||
1650 | */ | ||
1651 | if (fnic_clean_pending_aborts(fnic, sc)) { | ||
1652 | spin_lock_irqsave(io_lock, flags); | ||
1653 | io_req = (struct fnic_io_req *)CMD_SP(sc); | ||
1654 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | ||
1655 | "Device reset failed" | ||
1656 | " since could not abort all IOs\n"); | ||
1657 | goto fnic_device_reset_clean; | ||
1658 | } | ||
1659 | |||
1660 | /* Clean lun reset command */ | ||
1661 | spin_lock_irqsave(io_lock, flags); | ||
1662 | io_req = (struct fnic_io_req *)CMD_SP(sc); | ||
1663 | if (io_req) | ||
1664 | /* Completed, and successful */ | ||
1665 | ret = SUCCESS; | ||
1666 | |||
1667 | fnic_device_reset_clean: | ||
1668 | if (io_req) | ||
1669 | CMD_SP(sc) = NULL; | ||
1670 | |||
1671 | spin_unlock_irqrestore(io_lock, flags); | ||
1672 | |||
1673 | if (io_req) { | ||
1674 | fnic_release_ioreq_buf(fnic, io_req, sc); | ||
1675 | mempool_free(io_req, fnic->io_req_pool); | ||
1676 | } | ||
1677 | |||
1678 | fnic_device_reset_end: | ||
1679 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | ||
1680 | "Returning from device reset %s\n", | ||
1681 | (ret == SUCCESS) ? | ||
1682 | "SUCCESS" : "FAILED"); | ||
1683 | return ret; | ||
1684 | } | ||
1685 | |||
1686 | /* Clean up all IOs, clean up libFC local port */ | ||
1687 | int fnic_reset(struct Scsi_Host *shost) | ||
1688 | { | ||
1689 | struct fc_lport *lp; | ||
1690 | struct fnic *fnic; | ||
1691 | int ret = SUCCESS; | ||
1692 | |||
1693 | lp = shost_priv(shost); | ||
1694 | fnic = lport_priv(lp); | ||
1695 | |||
1696 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | ||
1697 | "fnic_reset called\n"); | ||
1698 | |||
1699 | /* | ||
1700 | * Reset local port, this will clean up libFC exchanges, | ||
1701 | * reset remote port sessions, and if link is up, begin flogi | ||
1702 | */ | ||
1703 | if (lp->tt.lport_reset(lp)) | ||
1704 | ret = FAILED; | ||
1705 | |||
1706 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | ||
1707 | "Returning from fnic reset %s\n", | ||
1708 | (ret == SUCCESS) ? | ||
1709 | "SUCCESS" : "FAILED"); | ||
1710 | |||
1711 | return ret; | ||
1712 | } | ||
1713 | |||
1714 | /* | ||
1715 | * SCSI Error handling calls driver's eh_host_reset if all prior | ||
1716 | * error handling levels return FAILED. If host reset completes | ||
1717 | * successfully, and if link is up, then Fabric login begins. | ||
1718 | * | ||
1719 | * Host Reset is the highest level of error recovery. If this fails, then | ||
1720 | * host is offlined by SCSI. | ||
1721 | * | ||
1722 | */ | ||
1723 | int fnic_host_reset(struct scsi_cmnd *sc) | ||
1724 | { | ||
1725 | int ret; | ||
1726 | unsigned long wait_host_tmo; | ||
1727 | struct Scsi_Host *shost = sc->device->host; | ||
1728 | struct fc_lport *lp = shost_priv(shost); | ||
1729 | |||
1730 | /* | ||
1731 | * If fnic_reset is successful, wait for fabric login to complete | ||
1732 | * scsi-ml tries to send a TUR to every device if host reset is | ||
1733 | * successful, so before returning to scsi, fabric should be up | ||
1734 | */ | ||
1735 | ret = fnic_reset(shost); | ||
1736 | if (ret == SUCCESS) { | ||
1737 | wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ; | ||
1738 | ret = FAILED; | ||
1739 | while (time_before(jiffies, wait_host_tmo)) { | ||
1740 | if ((lp->state == LPORT_ST_READY) && | ||
1741 | (lp->link_up)) { | ||
1742 | ret = SUCCESS; | ||
1743 | break; | ||
1744 | } | ||
1745 | ssleep(1); | ||
1746 | } | ||
1747 | } | ||
1748 | |||
1749 | return ret; | ||
1750 | } | ||
1751 | |||
1752 | /* | ||
1753 | * This fxn is called from libFC when host is removed | ||
1754 | */ | ||
1755 | void fnic_scsi_abort_io(struct fc_lport *lp) | ||
1756 | { | ||
1757 | int err = 0; | ||
1758 | unsigned long flags; | ||
1759 | enum fnic_state old_state; | ||
1760 | struct fnic *fnic = lport_priv(lp); | ||
1761 | DECLARE_COMPLETION_ONSTACK(remove_wait); | ||
1762 | |||
1763 | /* Issue firmware reset for fnic, wait for reset to complete */ | ||
1764 | spin_lock_irqsave(&fnic->fnic_lock, flags); | ||
1765 | fnic->remove_wait = &remove_wait; | ||
1766 | old_state = fnic->state; | ||
1767 | fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; | ||
1768 | vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr); | ||
1769 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
1770 | |||
1771 | err = fnic_fw_reset_handler(fnic); | ||
1772 | if (err) { | ||
1773 | spin_lock_irqsave(&fnic->fnic_lock, flags); | ||
1774 | if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) | ||
1775 | fnic->state = old_state; | ||
1776 | fnic->remove_wait = NULL; | ||
1777 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
1778 | return; | ||
1779 | } | ||
1780 | |||
1781 | /* Wait for firmware reset to complete */ | ||
1782 | wait_for_completion_timeout(&remove_wait, | ||
1783 | msecs_to_jiffies(FNIC_RMDEVICE_TIMEOUT)); | ||
1784 | |||
1785 | spin_lock_irqsave(&fnic->fnic_lock, flags); | ||
1786 | fnic->remove_wait = NULL; | ||
1787 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | ||
1788 | "fnic_scsi_abort_io %s\n", | ||
1789 | (fnic->state == FNIC_IN_ETH_MODE) ? | ||
1790 | "SUCCESS" : "FAILED"); | ||
1791 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
1792 | |||
1793 | } | ||
1794 | |||
1795 | /* | ||
1796 | * This fxn called from libFC to clean up driver IO state on link down | ||
1797 | */ | ||
1798 | void fnic_scsi_cleanup(struct fc_lport *lp) | ||
1799 | { | ||
1800 | unsigned long flags; | ||
1801 | enum fnic_state old_state; | ||
1802 | struct fnic *fnic = lport_priv(lp); | ||
1803 | |||
1804 | /* issue fw reset */ | ||
1805 | spin_lock_irqsave(&fnic->fnic_lock, flags); | ||
1806 | old_state = fnic->state; | ||
1807 | fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; | ||
1808 | vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr); | ||
1809 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
1810 | |||
1811 | if (fnic_fw_reset_handler(fnic)) { | ||
1812 | spin_lock_irqsave(&fnic->fnic_lock, flags); | ||
1813 | if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) | ||
1814 | fnic->state = old_state; | ||
1815 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
1816 | } | ||
1817 | |||
1818 | } | ||
1819 | |||
1820 | void fnic_empty_scsi_cleanup(struct fc_lport *lp) | ||
1821 | { | ||
1822 | } | ||
1823 | |||
1824 | void fnic_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did) | ||
1825 | { | ||
1826 | struct fnic *fnic = lport_priv(lp); | ||
1827 | |||
1828 | /* Non-zero sid, nothing to do */ | ||
1829 | if (sid) | ||
1830 | goto call_fc_exch_mgr_reset; | ||
1831 | |||
1832 | if (did) { | ||
1833 | fnic_rport_exch_reset(fnic, did); | ||
1834 | goto call_fc_exch_mgr_reset; | ||
1835 | } | ||
1836 | |||
1837 | /* | ||
1838 | * sid = 0, did = 0 | ||
1839 | * link down or device being removed | ||
1840 | */ | ||
1841 | if (!fnic->in_remove) | ||
1842 | fnic_scsi_cleanup(lp); | ||
1843 | else | ||
1844 | fnic_scsi_abort_io(lp); | ||
1845 | |||
1846 | /* call libFC exch mgr reset to reset its exchanges */ | ||
1847 | call_fc_exch_mgr_reset: | ||
1848 | fc_exch_mgr_reset(lp, sid, did); | ||
1849 | |||
1850 | } | ||
diff --git a/drivers/scsi/fnic/rq_enet_desc.h b/drivers/scsi/fnic/rq_enet_desc.h new file mode 100644 index 000000000000..92e80ae6b725 --- /dev/null +++ b/drivers/scsi/fnic/rq_enet_desc.h | |||
@@ -0,0 +1,58 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | */ | ||
18 | #ifndef _RQ_ENET_DESC_H_ | ||
19 | #define _RQ_ENET_DESC_H_ | ||
20 | |||
21 | /* Ethernet receive queue descriptor: 16B */ | ||
22 | struct rq_enet_desc { | ||
23 | __le64 address; | ||
24 | __le16 length_type; | ||
25 | u8 reserved[6]; | ||
26 | }; | ||
27 | |||
28 | enum rq_enet_type_types { | ||
29 | RQ_ENET_TYPE_ONLY_SOP = 0, | ||
30 | RQ_ENET_TYPE_NOT_SOP = 1, | ||
31 | RQ_ENET_TYPE_RESV2 = 2, | ||
32 | RQ_ENET_TYPE_RESV3 = 3, | ||
33 | }; | ||
34 | |||
35 | #define RQ_ENET_ADDR_BITS 64 | ||
36 | #define RQ_ENET_LEN_BITS 14 | ||
37 | #define RQ_ENET_LEN_MASK ((1 << RQ_ENET_LEN_BITS) - 1) | ||
38 | #define RQ_ENET_TYPE_BITS 2 | ||
39 | #define RQ_ENET_TYPE_MASK ((1 << RQ_ENET_TYPE_BITS) - 1) | ||
40 | |||
41 | static inline void rq_enet_desc_enc(struct rq_enet_desc *desc, | ||
42 | u64 address, u8 type, u16 length) | ||
43 | { | ||
44 | desc->address = cpu_to_le64(address); | ||
45 | desc->length_type = cpu_to_le16((length & RQ_ENET_LEN_MASK) | | ||
46 | ((type & RQ_ENET_TYPE_MASK) << RQ_ENET_LEN_BITS)); | ||
47 | } | ||
48 | |||
49 | static inline void rq_enet_desc_dec(struct rq_enet_desc *desc, | ||
50 | u64 *address, u8 *type, u16 *length) | ||
51 | { | ||
52 | *address = le64_to_cpu(desc->address); | ||
53 | *length = le16_to_cpu(desc->length_type) & RQ_ENET_LEN_MASK; | ||
54 | *type = (u8)((le16_to_cpu(desc->length_type) >> RQ_ENET_LEN_BITS) & | ||
55 | RQ_ENET_TYPE_MASK); | ||
56 | } | ||
57 | |||
58 | #endif /* _RQ_ENET_DESC_H_ */ | ||
diff --git a/drivers/scsi/fnic/vnic_cq.c b/drivers/scsi/fnic/vnic_cq.c new file mode 100644 index 000000000000..c5db32eda5ef --- /dev/null +++ b/drivers/scsi/fnic/vnic_cq.c | |||
@@ -0,0 +1,85 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | */ | ||
18 | #include <linux/errno.h> | ||
19 | #include <linux/types.h> | ||
20 | #include <linux/pci.h> | ||
21 | #include "vnic_dev.h" | ||
22 | #include "vnic_cq.h" | ||
23 | |||
24 | void vnic_cq_free(struct vnic_cq *cq) | ||
25 | { | ||
26 | vnic_dev_free_desc_ring(cq->vdev, &cq->ring); | ||
27 | |||
28 | cq->ctrl = NULL; | ||
29 | } | ||
30 | |||
31 | int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, | ||
32 | unsigned int desc_count, unsigned int desc_size) | ||
33 | { | ||
34 | int err; | ||
35 | |||
36 | cq->index = index; | ||
37 | cq->vdev = vdev; | ||
38 | |||
39 | cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index); | ||
40 | if (!cq->ctrl) { | ||
41 | printk(KERN_ERR "Failed to hook CQ[%d] resource\n", index); | ||
42 | return -EINVAL; | ||
43 | } | ||
44 | |||
45 | err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size); | ||
46 | if (err) | ||
47 | return err; | ||
48 | |||
49 | return 0; | ||
50 | } | ||
51 | |||
52 | void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, | ||
53 | unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail, | ||
54 | unsigned int cq_tail_color, unsigned int interrupt_enable, | ||
55 | unsigned int cq_entry_enable, unsigned int cq_message_enable, | ||
56 | unsigned int interrupt_offset, u64 cq_message_addr) | ||
57 | { | ||
58 | u64 paddr; | ||
59 | |||
60 | paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET; | ||
61 | writeq(paddr, &cq->ctrl->ring_base); | ||
62 | iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size); | ||
63 | iowrite32(flow_control_enable, &cq->ctrl->flow_control_enable); | ||
64 | iowrite32(color_enable, &cq->ctrl->color_enable); | ||
65 | iowrite32(cq_head, &cq->ctrl->cq_head); | ||
66 | iowrite32(cq_tail, &cq->ctrl->cq_tail); | ||
67 | iowrite32(cq_tail_color, &cq->ctrl->cq_tail_color); | ||
68 | iowrite32(interrupt_enable, &cq->ctrl->interrupt_enable); | ||
69 | iowrite32(cq_entry_enable, &cq->ctrl->cq_entry_enable); | ||
70 | iowrite32(cq_message_enable, &cq->ctrl->cq_message_enable); | ||
71 | iowrite32(interrupt_offset, &cq->ctrl->interrupt_offset); | ||
72 | writeq(cq_message_addr, &cq->ctrl->cq_message_addr); | ||
73 | } | ||
74 | |||
75 | void vnic_cq_clean(struct vnic_cq *cq) | ||
76 | { | ||
77 | cq->to_clean = 0; | ||
78 | cq->last_color = 0; | ||
79 | |||
80 | iowrite32(0, &cq->ctrl->cq_head); | ||
81 | iowrite32(0, &cq->ctrl->cq_tail); | ||
82 | iowrite32(1, &cq->ctrl->cq_tail_color); | ||
83 | |||
84 | vnic_dev_clear_desc_ring(&cq->ring); | ||
85 | } | ||
diff --git a/drivers/scsi/fnic/vnic_cq.h b/drivers/scsi/fnic/vnic_cq.h new file mode 100644 index 000000000000..4ede6809fb1e --- /dev/null +++ b/drivers/scsi/fnic/vnic_cq.h | |||
@@ -0,0 +1,121 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | */ | ||
18 | #ifndef _VNIC_CQ_H_ | ||
19 | #define _VNIC_CQ_H_ | ||
20 | |||
21 | #include "cq_desc.h" | ||
22 | #include "vnic_dev.h" | ||
23 | |||
24 | /* | ||
25 | * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth | ||
26 | * Driver) when both are built with CONFIG options =y | ||
27 | */ | ||
28 | #define vnic_cq_service fnic_cq_service | ||
29 | #define vnic_cq_free fnic_cq_free | ||
30 | #define vnic_cq_alloc fnic_cq_alloc | ||
31 | #define vnic_cq_init fnic_cq_init | ||
32 | #define vnic_cq_clean fnic_cq_clean | ||
33 | |||
34 | /* Completion queue control */ | ||
35 | struct vnic_cq_ctrl { | ||
36 | u64 ring_base; /* 0x00 */ | ||
37 | u32 ring_size; /* 0x08 */ | ||
38 | u32 pad0; | ||
39 | u32 flow_control_enable; /* 0x10 */ | ||
40 | u32 pad1; | ||
41 | u32 color_enable; /* 0x18 */ | ||
42 | u32 pad2; | ||
43 | u32 cq_head; /* 0x20 */ | ||
44 | u32 pad3; | ||
45 | u32 cq_tail; /* 0x28 */ | ||
46 | u32 pad4; | ||
47 | u32 cq_tail_color; /* 0x30 */ | ||
48 | u32 pad5; | ||
49 | u32 interrupt_enable; /* 0x38 */ | ||
50 | u32 pad6; | ||
51 | u32 cq_entry_enable; /* 0x40 */ | ||
52 | u32 pad7; | ||
53 | u32 cq_message_enable; /* 0x48 */ | ||
54 | u32 pad8; | ||
55 | u32 interrupt_offset; /* 0x50 */ | ||
56 | u32 pad9; | ||
57 | u64 cq_message_addr; /* 0x58 */ | ||
58 | u32 pad10; | ||
59 | }; | ||
60 | |||
61 | struct vnic_cq { | ||
62 | unsigned int index; | ||
63 | struct vnic_dev *vdev; | ||
64 | struct vnic_cq_ctrl __iomem *ctrl; /* memory-mapped */ | ||
65 | struct vnic_dev_ring ring; | ||
66 | unsigned int to_clean; | ||
67 | unsigned int last_color; | ||
68 | }; | ||
69 | |||
70 | static inline unsigned int vnic_cq_service(struct vnic_cq *cq, | ||
71 | unsigned int work_to_do, | ||
72 | int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc, | ||
73 | u8 type, u16 q_number, u16 completed_index, void *opaque), | ||
74 | void *opaque) | ||
75 | { | ||
76 | struct cq_desc *cq_desc; | ||
77 | unsigned int work_done = 0; | ||
78 | u16 q_number, completed_index; | ||
79 | u8 type, color; | ||
80 | |||
81 | cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + | ||
82 | cq->ring.desc_size * cq->to_clean); | ||
83 | cq_desc_dec(cq_desc, &type, &color, | ||
84 | &q_number, &completed_index); | ||
85 | |||
86 | while (color != cq->last_color) { | ||
87 | |||
88 | if ((*q_service)(cq->vdev, cq_desc, type, | ||
89 | q_number, completed_index, opaque)) | ||
90 | break; | ||
91 | |||
92 | cq->to_clean++; | ||
93 | if (cq->to_clean == cq->ring.desc_count) { | ||
94 | cq->to_clean = 0; | ||
95 | cq->last_color = cq->last_color ? 0 : 1; | ||
96 | } | ||
97 | |||
98 | cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + | ||
99 | cq->ring.desc_size * cq->to_clean); | ||
100 | cq_desc_dec(cq_desc, &type, &color, | ||
101 | &q_number, &completed_index); | ||
102 | |||
103 | work_done++; | ||
104 | if (work_done >= work_to_do) | ||
105 | break; | ||
106 | } | ||
107 | |||
108 | return work_done; | ||
109 | } | ||
110 | |||
111 | void vnic_cq_free(struct vnic_cq *cq); | ||
112 | int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, | ||
113 | unsigned int desc_count, unsigned int desc_size); | ||
114 | void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, | ||
115 | unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail, | ||
116 | unsigned int cq_tail_color, unsigned int interrupt_enable, | ||
117 | unsigned int cq_entry_enable, unsigned int message_enable, | ||
118 | unsigned int interrupt_offset, u64 message_addr); | ||
119 | void vnic_cq_clean(struct vnic_cq *cq); | ||
120 | |||
121 | #endif /* _VNIC_CQ_H_ */ | ||
diff --git a/drivers/scsi/fnic/vnic_cq_copy.h b/drivers/scsi/fnic/vnic_cq_copy.h new file mode 100644 index 000000000000..7901ce255a81 --- /dev/null +++ b/drivers/scsi/fnic/vnic_cq_copy.h | |||
@@ -0,0 +1,62 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | */ | ||
18 | #ifndef _VNIC_CQ_COPY_H_ | ||
19 | #define _VNIC_CQ_COPY_H_ | ||
20 | |||
21 | #include "fcpio.h" | ||
22 | |||
23 | static inline unsigned int vnic_cq_copy_service( | ||
24 | struct vnic_cq *cq, | ||
25 | int (*q_service)(struct vnic_dev *vdev, | ||
26 | unsigned int index, | ||
27 | struct fcpio_fw_req *desc), | ||
28 | unsigned int work_to_do) | ||
29 | |||
30 | { | ||
31 | struct fcpio_fw_req *desc; | ||
32 | unsigned int work_done = 0; | ||
33 | u8 color; | ||
34 | |||
35 | desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs + | ||
36 | cq->ring.desc_size * cq->to_clean); | ||
37 | fcpio_color_dec(desc, &color); | ||
38 | |||
39 | while (color != cq->last_color) { | ||
40 | |||
41 | if ((*q_service)(cq->vdev, cq->index, desc)) | ||
42 | break; | ||
43 | |||
44 | cq->to_clean++; | ||
45 | if (cq->to_clean == cq->ring.desc_count) { | ||
46 | cq->to_clean = 0; | ||
47 | cq->last_color = cq->last_color ? 0 : 1; | ||
48 | } | ||
49 | |||
50 | desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs + | ||
51 | cq->ring.desc_size * cq->to_clean); | ||
52 | fcpio_color_dec(desc, &color); | ||
53 | |||
54 | work_done++; | ||
55 | if (work_done >= work_to_do) | ||
56 | break; | ||
57 | } | ||
58 | |||
59 | return work_done; | ||
60 | } | ||
61 | |||
62 | #endif /* _VNIC_CQ_COPY_H_ */ | ||
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c new file mode 100644 index 000000000000..566770645086 --- /dev/null +++ b/drivers/scsi/fnic/vnic_dev.c | |||
@@ -0,0 +1,690 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | */ | ||
18 | |||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/errno.h> | ||
21 | #include <linux/types.h> | ||
22 | #include <linux/pci.h> | ||
23 | #include <linux/delay.h> | ||
24 | #include <linux/if_ether.h> | ||
25 | #include "vnic_resource.h" | ||
26 | #include "vnic_devcmd.h" | ||
27 | #include "vnic_dev.h" | ||
28 | #include "vnic_stats.h" | ||
29 | |||
30 | struct vnic_res { | ||
31 | void __iomem *vaddr; | ||
32 | unsigned int count; | ||
33 | }; | ||
34 | |||
35 | struct vnic_dev { | ||
36 | void *priv; | ||
37 | struct pci_dev *pdev; | ||
38 | struct vnic_res res[RES_TYPE_MAX]; | ||
39 | enum vnic_dev_intr_mode intr_mode; | ||
40 | struct vnic_devcmd __iomem *devcmd; | ||
41 | struct vnic_devcmd_notify *notify; | ||
42 | struct vnic_devcmd_notify notify_copy; | ||
43 | dma_addr_t notify_pa; | ||
44 | u32 *linkstatus; | ||
45 | dma_addr_t linkstatus_pa; | ||
46 | struct vnic_stats *stats; | ||
47 | dma_addr_t stats_pa; | ||
48 | struct vnic_devcmd_fw_info *fw_info; | ||
49 | dma_addr_t fw_info_pa; | ||
50 | }; | ||
51 | |||
52 | #define VNIC_MAX_RES_HDR_SIZE \ | ||
53 | (sizeof(struct vnic_resource_header) + \ | ||
54 | sizeof(struct vnic_resource) * RES_TYPE_MAX) | ||
55 | #define VNIC_RES_STRIDE 128 | ||
56 | |||
57 | void *vnic_dev_priv(struct vnic_dev *vdev) | ||
58 | { | ||
59 | return vdev->priv; | ||
60 | } | ||
61 | |||
62 | static int vnic_dev_discover_res(struct vnic_dev *vdev, | ||
63 | struct vnic_dev_bar *bar) | ||
64 | { | ||
65 | struct vnic_resource_header __iomem *rh; | ||
66 | struct vnic_resource __iomem *r; | ||
67 | u8 type; | ||
68 | |||
69 | if (bar->len < VNIC_MAX_RES_HDR_SIZE) { | ||
70 | printk(KERN_ERR "vNIC BAR0 res hdr length error\n"); | ||
71 | return -EINVAL; | ||
72 | } | ||
73 | |||
74 | rh = bar->vaddr; | ||
75 | if (!rh) { | ||
76 | printk(KERN_ERR "vNIC BAR0 res hdr not mem-mapped\n"); | ||
77 | return -EINVAL; | ||
78 | } | ||
79 | |||
80 | if (ioread32(&rh->magic) != VNIC_RES_MAGIC || | ||
81 | ioread32(&rh->version) != VNIC_RES_VERSION) { | ||
82 | printk(KERN_ERR "vNIC BAR0 res magic/version error " | ||
83 | "exp (%lx/%lx) curr (%x/%x)\n", | ||
84 | VNIC_RES_MAGIC, VNIC_RES_VERSION, | ||
85 | ioread32(&rh->magic), ioread32(&rh->version)); | ||
86 | return -EINVAL; | ||
87 | } | ||
88 | |||
89 | r = (struct vnic_resource __iomem *)(rh + 1); | ||
90 | |||
91 | while ((type = ioread8(&r->type)) != RES_TYPE_EOL) { | ||
92 | |||
93 | u8 bar_num = ioread8(&r->bar); | ||
94 | u32 bar_offset = ioread32(&r->bar_offset); | ||
95 | u32 count = ioread32(&r->count); | ||
96 | u32 len; | ||
97 | |||
98 | r++; | ||
99 | |||
100 | if (bar_num != 0) /* only mapping in BAR0 resources */ | ||
101 | continue; | ||
102 | |||
103 | switch (type) { | ||
104 | case RES_TYPE_WQ: | ||
105 | case RES_TYPE_RQ: | ||
106 | case RES_TYPE_CQ: | ||
107 | case RES_TYPE_INTR_CTRL: | ||
108 | /* each count is stride bytes long */ | ||
109 | len = count * VNIC_RES_STRIDE; | ||
110 | if (len + bar_offset > bar->len) { | ||
111 | printk(KERN_ERR "vNIC BAR0 resource %d " | ||
112 | "out-of-bounds, offset 0x%x + " | ||
113 | "size 0x%x > bar len 0x%lx\n", | ||
114 | type, bar_offset, | ||
115 | len, | ||
116 | bar->len); | ||
117 | return -EINVAL; | ||
118 | } | ||
119 | break; | ||
120 | case RES_TYPE_INTR_PBA_LEGACY: | ||
121 | case RES_TYPE_DEVCMD: | ||
122 | len = count; | ||
123 | break; | ||
124 | default: | ||
125 | continue; | ||
126 | } | ||
127 | |||
128 | vdev->res[type].count = count; | ||
129 | vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset; | ||
130 | } | ||
131 | |||
132 | return 0; | ||
133 | } | ||
134 | |||
135 | unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev, | ||
136 | enum vnic_res_type type) | ||
137 | { | ||
138 | return vdev->res[type].count; | ||
139 | } | ||
140 | |||
141 | void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, | ||
142 | unsigned int index) | ||
143 | { | ||
144 | if (!vdev->res[type].vaddr) | ||
145 | return NULL; | ||
146 | |||
147 | switch (type) { | ||
148 | case RES_TYPE_WQ: | ||
149 | case RES_TYPE_RQ: | ||
150 | case RES_TYPE_CQ: | ||
151 | case RES_TYPE_INTR_CTRL: | ||
152 | return (char __iomem *)vdev->res[type].vaddr + | ||
153 | index * VNIC_RES_STRIDE; | ||
154 | default: | ||
155 | return (char __iomem *)vdev->res[type].vaddr; | ||
156 | } | ||
157 | } | ||
158 | |||
159 | unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring, | ||
160 | unsigned int desc_count, | ||
161 | unsigned int desc_size) | ||
162 | { | ||
163 | /* The base address of the desc rings must be 512 byte aligned. | ||
164 | * Descriptor count is aligned to groups of 32 descriptors. A | ||
165 | * count of 0 means the maximum 4096 descriptors. Descriptor | ||
166 | * size is aligned to 16 bytes. | ||
167 | */ | ||
168 | |||
169 | unsigned int count_align = 32; | ||
170 | unsigned int desc_align = 16; | ||
171 | |||
172 | ring->base_align = 512; | ||
173 | |||
174 | if (desc_count == 0) | ||
175 | desc_count = 4096; | ||
176 | |||
177 | ring->desc_count = ALIGN(desc_count, count_align); | ||
178 | |||
179 | ring->desc_size = ALIGN(desc_size, desc_align); | ||
180 | |||
181 | ring->size = ring->desc_count * ring->desc_size; | ||
182 | ring->size_unaligned = ring->size + ring->base_align; | ||
183 | |||
184 | return ring->size_unaligned; | ||
185 | } | ||
186 | |||
187 | void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring) | ||
188 | { | ||
189 | memset(ring->descs, 0, ring->size); | ||
190 | } | ||
191 | |||
192 | int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, | ||
193 | unsigned int desc_count, unsigned int desc_size) | ||
194 | { | ||
195 | vnic_dev_desc_ring_size(ring, desc_count, desc_size); | ||
196 | |||
197 | ring->descs_unaligned = pci_alloc_consistent(vdev->pdev, | ||
198 | ring->size_unaligned, | ||
199 | &ring->base_addr_unaligned); | ||
200 | |||
201 | if (!ring->descs_unaligned) { | ||
202 | printk(KERN_ERR | ||
203 | "Failed to allocate ring (size=%d), aborting\n", | ||
204 | (int)ring->size); | ||
205 | return -ENOMEM; | ||
206 | } | ||
207 | |||
208 | ring->base_addr = ALIGN(ring->base_addr_unaligned, | ||
209 | ring->base_align); | ||
210 | ring->descs = (u8 *)ring->descs_unaligned + | ||
211 | (ring->base_addr - ring->base_addr_unaligned); | ||
212 | |||
213 | vnic_dev_clear_desc_ring(ring); | ||
214 | |||
215 | ring->desc_avail = ring->desc_count - 1; | ||
216 | |||
217 | return 0; | ||
218 | } | ||
219 | |||
220 | void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring) | ||
221 | { | ||
222 | if (ring->descs) { | ||
223 | pci_free_consistent(vdev->pdev, | ||
224 | ring->size_unaligned, | ||
225 | ring->descs_unaligned, | ||
226 | ring->base_addr_unaligned); | ||
227 | ring->descs = NULL; | ||
228 | } | ||
229 | } | ||
230 | |||
231 | int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, | ||
232 | u64 *a0, u64 *a1, int wait) | ||
233 | { | ||
234 | struct vnic_devcmd __iomem *devcmd = vdev->devcmd; | ||
235 | int delay; | ||
236 | u32 status; | ||
237 | int dev_cmd_err[] = { | ||
238 | /* convert from fw's version of error.h to host's version */ | ||
239 | 0, /* ERR_SUCCESS */ | ||
240 | EINVAL, /* ERR_EINVAL */ | ||
241 | EFAULT, /* ERR_EFAULT */ | ||
242 | EPERM, /* ERR_EPERM */ | ||
243 | EBUSY, /* ERR_EBUSY */ | ||
244 | }; | ||
245 | int err; | ||
246 | |||
247 | status = ioread32(&devcmd->status); | ||
248 | if (status & STAT_BUSY) { | ||
249 | printk(KERN_ERR "Busy devcmd %d\n", _CMD_N(cmd)); | ||
250 | return -EBUSY; | ||
251 | } | ||
252 | |||
253 | if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) { | ||
254 | writeq(*a0, &devcmd->args[0]); | ||
255 | writeq(*a1, &devcmd->args[1]); | ||
256 | wmb(); | ||
257 | } | ||
258 | |||
259 | iowrite32(cmd, &devcmd->cmd); | ||
260 | |||
261 | if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) | ||
262 | return 0; | ||
263 | |||
264 | for (delay = 0; delay < wait; delay++) { | ||
265 | |||
266 | udelay(100); | ||
267 | |||
268 | status = ioread32(&devcmd->status); | ||
269 | if (!(status & STAT_BUSY)) { | ||
270 | |||
271 | if (status & STAT_ERROR) { | ||
272 | err = dev_cmd_err[(int)readq(&devcmd->args[0])]; | ||
273 | printk(KERN_ERR "Error %d devcmd %d\n", | ||
274 | err, _CMD_N(cmd)); | ||
275 | return -err; | ||
276 | } | ||
277 | |||
278 | if (_CMD_DIR(cmd) & _CMD_DIR_READ) { | ||
279 | rmb(); | ||
280 | *a0 = readq(&devcmd->args[0]); | ||
281 | *a1 = readq(&devcmd->args[1]); | ||
282 | } | ||
283 | |||
284 | return 0; | ||
285 | } | ||
286 | } | ||
287 | |||
288 | printk(KERN_ERR "Timedout devcmd %d\n", _CMD_N(cmd)); | ||
289 | return -ETIMEDOUT; | ||
290 | } | ||
291 | |||
292 | int vnic_dev_fw_info(struct vnic_dev *vdev, | ||
293 | struct vnic_devcmd_fw_info **fw_info) | ||
294 | { | ||
295 | u64 a0, a1 = 0; | ||
296 | int wait = 1000; | ||
297 | int err = 0; | ||
298 | |||
299 | if (!vdev->fw_info) { | ||
300 | vdev->fw_info = pci_alloc_consistent(vdev->pdev, | ||
301 | sizeof(struct vnic_devcmd_fw_info), | ||
302 | &vdev->fw_info_pa); | ||
303 | if (!vdev->fw_info) | ||
304 | return -ENOMEM; | ||
305 | |||
306 | a0 = vdev->fw_info_pa; | ||
307 | |||
308 | /* only get fw_info once and cache it */ | ||
309 | err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait); | ||
310 | } | ||
311 | |||
312 | *fw_info = vdev->fw_info; | ||
313 | |||
314 | return err; | ||
315 | } | ||
316 | |||
317 | int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size, | ||
318 | void *value) | ||
319 | { | ||
320 | u64 a0, a1; | ||
321 | int wait = 1000; | ||
322 | int err; | ||
323 | |||
324 | a0 = offset; | ||
325 | a1 = size; | ||
326 | |||
327 | err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait); | ||
328 | |||
329 | switch (size) { | ||
330 | case 1: | ||
331 | *(u8 *)value = (u8)a0; | ||
332 | break; | ||
333 | case 2: | ||
334 | *(u16 *)value = (u16)a0; | ||
335 | break; | ||
336 | case 4: | ||
337 | *(u32 *)value = (u32)a0; | ||
338 | break; | ||
339 | case 8: | ||
340 | *(u64 *)value = a0; | ||
341 | break; | ||
342 | default: | ||
343 | BUG(); | ||
344 | break; | ||
345 | } | ||
346 | |||
347 | return err; | ||
348 | } | ||
349 | |||
350 | int vnic_dev_stats_clear(struct vnic_dev *vdev) | ||
351 | { | ||
352 | u64 a0 = 0, a1 = 0; | ||
353 | int wait = 1000; | ||
354 | return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait); | ||
355 | } | ||
356 | |||
357 | int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats) | ||
358 | { | ||
359 | u64 a0, a1; | ||
360 | int wait = 1000; | ||
361 | |||
362 | if (!vdev->stats) { | ||
363 | vdev->stats = pci_alloc_consistent(vdev->pdev, | ||
364 | sizeof(struct vnic_stats), &vdev->stats_pa); | ||
365 | if (!vdev->stats) | ||
366 | return -ENOMEM; | ||
367 | } | ||
368 | |||
369 | *stats = vdev->stats; | ||
370 | a0 = vdev->stats_pa; | ||
371 | a1 = sizeof(struct vnic_stats); | ||
372 | |||
373 | return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait); | ||
374 | } | ||
375 | |||
376 | int vnic_dev_close(struct vnic_dev *vdev) | ||
377 | { | ||
378 | u64 a0 = 0, a1 = 0; | ||
379 | int wait = 1000; | ||
380 | return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait); | ||
381 | } | ||
382 | |||
383 | int vnic_dev_enable(struct vnic_dev *vdev) | ||
384 | { | ||
385 | u64 a0 = 0, a1 = 0; | ||
386 | int wait = 1000; | ||
387 | return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait); | ||
388 | } | ||
389 | |||
390 | int vnic_dev_disable(struct vnic_dev *vdev) | ||
391 | { | ||
392 | u64 a0 = 0, a1 = 0; | ||
393 | int wait = 1000; | ||
394 | return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait); | ||
395 | } | ||
396 | |||
397 | int vnic_dev_open(struct vnic_dev *vdev, int arg) | ||
398 | { | ||
399 | u64 a0 = (u32)arg, a1 = 0; | ||
400 | int wait = 1000; | ||
401 | return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait); | ||
402 | } | ||
403 | |||
404 | int vnic_dev_open_done(struct vnic_dev *vdev, int *done) | ||
405 | { | ||
406 | u64 a0 = 0, a1 = 0; | ||
407 | int wait = 1000; | ||
408 | int err; | ||
409 | |||
410 | *done = 0; | ||
411 | |||
412 | err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait); | ||
413 | if (err) | ||
414 | return err; | ||
415 | |||
416 | *done = (a0 == 0); | ||
417 | |||
418 | return 0; | ||
419 | } | ||
420 | |||
421 | int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg) | ||
422 | { | ||
423 | u64 a0 = (u32)arg, a1 = 0; | ||
424 | int wait = 1000; | ||
425 | return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait); | ||
426 | } | ||
427 | |||
428 | int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done) | ||
429 | { | ||
430 | u64 a0 = 0, a1 = 0; | ||
431 | int wait = 1000; | ||
432 | int err; | ||
433 | |||
434 | *done = 0; | ||
435 | |||
436 | err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait); | ||
437 | if (err) | ||
438 | return err; | ||
439 | |||
440 | *done = (a0 == 0); | ||
441 | |||
442 | return 0; | ||
443 | } | ||
444 | |||
445 | int vnic_dev_hang_notify(struct vnic_dev *vdev) | ||
446 | { | ||
447 | u64 a0, a1; | ||
448 | int wait = 1000; | ||
449 | return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait); | ||
450 | } | ||
451 | |||
452 | int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr) | ||
453 | { | ||
454 | u64 a0, a1; | ||
455 | int wait = 1000; | ||
456 | int err, i; | ||
457 | |||
458 | for (i = 0; i < ETH_ALEN; i++) | ||
459 | mac_addr[i] = 0; | ||
460 | |||
461 | err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait); | ||
462 | if (err) | ||
463 | return err; | ||
464 | |||
465 | for (i = 0; i < ETH_ALEN; i++) | ||
466 | mac_addr[i] = ((u8 *)&a0)[i]; | ||
467 | |||
468 | return 0; | ||
469 | } | ||
470 | |||
471 | void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, | ||
472 | int broadcast, int promisc, int allmulti) | ||
473 | { | ||
474 | u64 a0, a1 = 0; | ||
475 | int wait = 1000; | ||
476 | int err; | ||
477 | |||
478 | a0 = (directed ? CMD_PFILTER_DIRECTED : 0) | | ||
479 | (multicast ? CMD_PFILTER_MULTICAST : 0) | | ||
480 | (broadcast ? CMD_PFILTER_BROADCAST : 0) | | ||
481 | (promisc ? CMD_PFILTER_PROMISCUOUS : 0) | | ||
482 | (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0); | ||
483 | |||
484 | err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait); | ||
485 | if (err) | ||
486 | printk(KERN_ERR "Can't set packet filter\n"); | ||
487 | } | ||
488 | |||
489 | void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr) | ||
490 | { | ||
491 | u64 a0 = 0, a1 = 0; | ||
492 | int wait = 1000; | ||
493 | int err; | ||
494 | int i; | ||
495 | |||
496 | for (i = 0; i < ETH_ALEN; i++) | ||
497 | ((u8 *)&a0)[i] = addr[i]; | ||
498 | |||
499 | err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); | ||
500 | if (err) | ||
501 | printk(KERN_ERR | ||
502 | "Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n", | ||
503 | addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], | ||
504 | err); | ||
505 | } | ||
506 | |||
507 | void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr) | ||
508 | { | ||
509 | u64 a0 = 0, a1 = 0; | ||
510 | int wait = 1000; | ||
511 | int err; | ||
512 | int i; | ||
513 | |||
514 | for (i = 0; i < ETH_ALEN; i++) | ||
515 | ((u8 *)&a0)[i] = addr[i]; | ||
516 | |||
517 | err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait); | ||
518 | if (err) | ||
519 | printk(KERN_ERR | ||
520 | "Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n", | ||
521 | addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], | ||
522 | err); | ||
523 | } | ||
524 | |||
525 | int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr) | ||
526 | { | ||
527 | u64 a0, a1; | ||
528 | int wait = 1000; | ||
529 | |||
530 | if (!vdev->notify) { | ||
531 | vdev->notify = pci_alloc_consistent(vdev->pdev, | ||
532 | sizeof(struct vnic_devcmd_notify), | ||
533 | &vdev->notify_pa); | ||
534 | if (!vdev->notify) | ||
535 | return -ENOMEM; | ||
536 | } | ||
537 | |||
538 | a0 = vdev->notify_pa; | ||
539 | a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL; | ||
540 | a1 += sizeof(struct vnic_devcmd_notify); | ||
541 | |||
542 | return vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); | ||
543 | } | ||
544 | |||
545 | void vnic_dev_notify_unset(struct vnic_dev *vdev) | ||
546 | { | ||
547 | u64 a0, a1; | ||
548 | int wait = 1000; | ||
549 | |||
550 | a0 = 0; /* paddr = 0 to unset notify buffer */ | ||
551 | a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */ | ||
552 | a1 += sizeof(struct vnic_devcmd_notify); | ||
553 | |||
554 | vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); | ||
555 | } | ||
556 | |||
557 | static int vnic_dev_notify_ready(struct vnic_dev *vdev) | ||
558 | { | ||
559 | u32 *words; | ||
560 | unsigned int nwords = sizeof(struct vnic_devcmd_notify) / 4; | ||
561 | unsigned int i; | ||
562 | u32 csum; | ||
563 | |||
564 | if (!vdev->notify) | ||
565 | return 0; | ||
566 | |||
567 | do { | ||
568 | csum = 0; | ||
569 | memcpy(&vdev->notify_copy, vdev->notify, | ||
570 | sizeof(struct vnic_devcmd_notify)); | ||
571 | words = (u32 *)&vdev->notify_copy; | ||
572 | for (i = 1; i < nwords; i++) | ||
573 | csum += words[i]; | ||
574 | } while (csum != words[0]); | ||
575 | |||
576 | return 1; | ||
577 | } | ||
578 | |||
579 | int vnic_dev_init(struct vnic_dev *vdev, int arg) | ||
580 | { | ||
581 | u64 a0 = (u32)arg, a1 = 0; | ||
582 | int wait = 1000; | ||
583 | return vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait); | ||
584 | } | ||
585 | |||
586 | int vnic_dev_link_status(struct vnic_dev *vdev) | ||
587 | { | ||
588 | if (vdev->linkstatus) | ||
589 | return *vdev->linkstatus; | ||
590 | |||
591 | if (!vnic_dev_notify_ready(vdev)) | ||
592 | return 0; | ||
593 | |||
594 | return vdev->notify_copy.link_state; | ||
595 | } | ||
596 | |||
597 | u32 vnic_dev_port_speed(struct vnic_dev *vdev) | ||
598 | { | ||
599 | if (!vnic_dev_notify_ready(vdev)) | ||
600 | return 0; | ||
601 | |||
602 | return vdev->notify_copy.port_speed; | ||
603 | } | ||
604 | |||
605 | u32 vnic_dev_msg_lvl(struct vnic_dev *vdev) | ||
606 | { | ||
607 | if (!vnic_dev_notify_ready(vdev)) | ||
608 | return 0; | ||
609 | |||
610 | return vdev->notify_copy.msglvl; | ||
611 | } | ||
612 | |||
613 | u32 vnic_dev_mtu(struct vnic_dev *vdev) | ||
614 | { | ||
615 | if (!vnic_dev_notify_ready(vdev)) | ||
616 | return 0; | ||
617 | |||
618 | return vdev->notify_copy.mtu; | ||
619 | } | ||
620 | |||
621 | u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev) | ||
622 | { | ||
623 | if (!vnic_dev_notify_ready(vdev)) | ||
624 | return 0; | ||
625 | |||
626 | return vdev->notify_copy.link_down_cnt; | ||
627 | } | ||
628 | |||
629 | void vnic_dev_set_intr_mode(struct vnic_dev *vdev, | ||
630 | enum vnic_dev_intr_mode intr_mode) | ||
631 | { | ||
632 | vdev->intr_mode = intr_mode; | ||
633 | } | ||
634 | |||
635 | enum vnic_dev_intr_mode vnic_dev_get_intr_mode( | ||
636 | struct vnic_dev *vdev) | ||
637 | { | ||
638 | return vdev->intr_mode; | ||
639 | } | ||
640 | |||
641 | void vnic_dev_unregister(struct vnic_dev *vdev) | ||
642 | { | ||
643 | if (vdev) { | ||
644 | if (vdev->notify) | ||
645 | pci_free_consistent(vdev->pdev, | ||
646 | sizeof(struct vnic_devcmd_notify), | ||
647 | vdev->notify, | ||
648 | vdev->notify_pa); | ||
649 | if (vdev->linkstatus) | ||
650 | pci_free_consistent(vdev->pdev, | ||
651 | sizeof(u32), | ||
652 | vdev->linkstatus, | ||
653 | vdev->linkstatus_pa); | ||
654 | if (vdev->stats) | ||
655 | pci_free_consistent(vdev->pdev, | ||
656 | sizeof(struct vnic_dev), | ||
657 | vdev->stats, vdev->stats_pa); | ||
658 | if (vdev->fw_info) | ||
659 | pci_free_consistent(vdev->pdev, | ||
660 | sizeof(struct vnic_devcmd_fw_info), | ||
661 | vdev->fw_info, vdev->fw_info_pa); | ||
662 | kfree(vdev); | ||
663 | } | ||
664 | } | ||
665 | |||
666 | struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, | ||
667 | void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar) | ||
668 | { | ||
669 | if (!vdev) { | ||
670 | vdev = kzalloc(sizeof(struct vnic_dev), GFP_KERNEL); | ||
671 | if (!vdev) | ||
672 | return NULL; | ||
673 | } | ||
674 | |||
675 | vdev->priv = priv; | ||
676 | vdev->pdev = pdev; | ||
677 | |||
678 | if (vnic_dev_discover_res(vdev, bar)) | ||
679 | goto err_out; | ||
680 | |||
681 | vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0); | ||
682 | if (!vdev->devcmd) | ||
683 | goto err_out; | ||
684 | |||
685 | return vdev; | ||
686 | |||
687 | err_out: | ||
688 | vnic_dev_unregister(vdev); | ||
689 | return NULL; | ||
690 | } | ||
diff --git a/drivers/scsi/fnic/vnic_dev.h b/drivers/scsi/fnic/vnic_dev.h new file mode 100644 index 000000000000..f9935a8a5a09 --- /dev/null +++ b/drivers/scsi/fnic/vnic_dev.h | |||
@@ -0,0 +1,161 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | */ | ||
18 | #ifndef _VNIC_DEV_H_ | ||
19 | #define _VNIC_DEV_H_ | ||
20 | |||
21 | #include "vnic_resource.h" | ||
22 | #include "vnic_devcmd.h" | ||
23 | |||
24 | /* | ||
25 | * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth | ||
26 | * Driver) when both are built with CONFIG options =y | ||
27 | */ | ||
28 | #define vnic_dev_priv fnic_dev_priv | ||
29 | #define vnic_dev_get_res_count fnic_dev_get_res_count | ||
30 | #define vnic_dev_get_res fnic_dev_get_res | ||
31 | #define vnic_dev_desc_ring_size fnic_dev_desc_ring_siz | ||
32 | #define vnic_dev_clear_desc_ring fnic_dev_clear_desc_ring | ||
33 | #define vnic_dev_alloc_desc_ring fnic_dev_alloc_desc_ring | ||
34 | #define vnic_dev_free_desc_ring fnic_dev_free_desc_ring | ||
35 | #define vnic_dev_cmd fnic_dev_cmd | ||
36 | #define vnic_dev_fw_info fnic_dev_fw_info | ||
37 | #define vnic_dev_spec fnic_dev_spec | ||
38 | #define vnic_dev_stats_clear fnic_dev_stats_clear | ||
39 | #define vnic_dev_stats_dump fnic_dev_stats_dump | ||
40 | #define vnic_dev_hang_notify fnic_dev_hang_notify | ||
41 | #define vnic_dev_packet_filter fnic_dev_packet_filter | ||
42 | #define vnic_dev_add_addr fnic_dev_add_addr | ||
43 | #define vnic_dev_del_addr fnic_dev_del_addr | ||
44 | #define vnic_dev_mac_addr fnic_dev_mac_addr | ||
45 | #define vnic_dev_notify_set fnic_dev_notify_set | ||
46 | #define vnic_dev_notify_unset fnic_dev_notify_unset | ||
47 | #define vnic_dev_link_status fnic_dev_link_status | ||
48 | #define vnic_dev_port_speed fnic_dev_port_speed | ||
49 | #define vnic_dev_msg_lvl fnic_dev_msg_lvl | ||
50 | #define vnic_dev_mtu fnic_dev_mtu | ||
51 | #define vnic_dev_link_down_cnt fnic_dev_link_down_cnt | ||
52 | #define vnic_dev_close fnic_dev_close | ||
53 | #define vnic_dev_enable fnic_dev_enable | ||
54 | #define vnic_dev_disable fnic_dev_disable | ||
55 | #define vnic_dev_open fnic_dev_open | ||
56 | #define vnic_dev_open_done fnic_dev_open_done | ||
57 | #define vnic_dev_init fnic_dev_init | ||
58 | #define vnic_dev_soft_reset fnic_dev_soft_reset | ||
59 | #define vnic_dev_soft_reset_done fnic_dev_soft_reset_done | ||
60 | #define vnic_dev_set_intr_mode fnic_dev_set_intr_mode | ||
61 | #define vnic_dev_get_intr_mode fnic_dev_get_intr_mode | ||
62 | #define vnic_dev_unregister fnic_dev_unregister | ||
63 | #define vnic_dev_register fnic_dev_register | ||
64 | |||
65 | #ifndef VNIC_PADDR_TARGET | ||
66 | #define VNIC_PADDR_TARGET 0x0000000000000000ULL | ||
67 | #endif | ||
68 | |||
69 | #ifndef readq | ||
70 | static inline u64 readq(void __iomem *reg) | ||
71 | { | ||
72 | return ((u64)readl(reg + 0x4UL) << 32) | (u64)readl(reg); | ||
73 | } | ||
74 | |||
75 | static inline void writeq(u64 val, void __iomem *reg) | ||
76 | { | ||
77 | writel(val & 0xffffffff, reg); | ||
78 | writel(val >> 32, reg + 0x4UL); | ||
79 | } | ||
80 | #endif | ||
81 | |||
82 | enum vnic_dev_intr_mode { | ||
83 | VNIC_DEV_INTR_MODE_UNKNOWN, | ||
84 | VNIC_DEV_INTR_MODE_INTX, | ||
85 | VNIC_DEV_INTR_MODE_MSI, | ||
86 | VNIC_DEV_INTR_MODE_MSIX, | ||
87 | }; | ||
88 | |||
89 | struct vnic_dev_bar { | ||
90 | void __iomem *vaddr; | ||
91 | dma_addr_t bus_addr; | ||
92 | unsigned long len; | ||
93 | }; | ||
94 | |||
95 | struct vnic_dev_ring { | ||
96 | void *descs; | ||
97 | size_t size; | ||
98 | dma_addr_t base_addr; | ||
99 | size_t base_align; | ||
100 | void *descs_unaligned; | ||
101 | size_t size_unaligned; | ||
102 | dma_addr_t base_addr_unaligned; | ||
103 | unsigned int desc_size; | ||
104 | unsigned int desc_count; | ||
105 | unsigned int desc_avail; | ||
106 | }; | ||
107 | |||
108 | struct vnic_dev; | ||
109 | struct vnic_stats; | ||
110 | |||
111 | void *vnic_dev_priv(struct vnic_dev *vdev); | ||
112 | unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev, | ||
113 | enum vnic_res_type type); | ||
114 | void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, | ||
115 | unsigned int index); | ||
116 | unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring, | ||
117 | unsigned int desc_count, | ||
118 | unsigned int desc_size); | ||
119 | void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring); | ||
120 | int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, | ||
121 | unsigned int desc_count, unsigned int desc_size); | ||
122 | void vnic_dev_free_desc_ring(struct vnic_dev *vdev, | ||
123 | struct vnic_dev_ring *ring); | ||
124 | int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, | ||
125 | u64 *a0, u64 *a1, int wait); | ||
126 | int vnic_dev_fw_info(struct vnic_dev *vdev, | ||
127 | struct vnic_devcmd_fw_info **fw_info); | ||
128 | int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, | ||
129 | unsigned int size, void *value); | ||
130 | int vnic_dev_stats_clear(struct vnic_dev *vdev); | ||
131 | int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats); | ||
132 | int vnic_dev_hang_notify(struct vnic_dev *vdev); | ||
133 | void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, | ||
134 | int broadcast, int promisc, int allmulti); | ||
135 | void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr); | ||
136 | void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr); | ||
137 | int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr); | ||
138 | int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr); | ||
139 | void vnic_dev_notify_unset(struct vnic_dev *vdev); | ||
140 | int vnic_dev_link_status(struct vnic_dev *vdev); | ||
141 | u32 vnic_dev_port_speed(struct vnic_dev *vdev); | ||
142 | u32 vnic_dev_msg_lvl(struct vnic_dev *vdev); | ||
143 | u32 vnic_dev_mtu(struct vnic_dev *vdev); | ||
144 | u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev); | ||
145 | int vnic_dev_close(struct vnic_dev *vdev); | ||
146 | int vnic_dev_enable(struct vnic_dev *vdev); | ||
147 | int vnic_dev_disable(struct vnic_dev *vdev); | ||
148 | int vnic_dev_open(struct vnic_dev *vdev, int arg); | ||
149 | int vnic_dev_open_done(struct vnic_dev *vdev, int *done); | ||
150 | int vnic_dev_init(struct vnic_dev *vdev, int arg); | ||
151 | int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg); | ||
152 | int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done); | ||
153 | void vnic_dev_set_intr_mode(struct vnic_dev *vdev, | ||
154 | enum vnic_dev_intr_mode intr_mode); | ||
155 | enum vnic_dev_intr_mode vnic_dev_get_intr_mode(struct vnic_dev *vdev); | ||
156 | void vnic_dev_unregister(struct vnic_dev *vdev); | ||
157 | struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, | ||
158 | void *priv, struct pci_dev *pdev, | ||
159 | struct vnic_dev_bar *bar); | ||
160 | |||
161 | #endif /* _VNIC_DEV_H_ */ | ||
diff --git a/drivers/scsi/fnic/vnic_devcmd.h b/drivers/scsi/fnic/vnic_devcmd.h new file mode 100644 index 000000000000..d62b9061bf12 --- /dev/null +++ b/drivers/scsi/fnic/vnic_devcmd.h | |||
@@ -0,0 +1,281 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | */ | ||
18 | #ifndef _VNIC_DEVCMD_H_ | ||
19 | #define _VNIC_DEVCMD_H_ | ||
20 | |||
21 | #define _CMD_NBITS 14 | ||
22 | #define _CMD_VTYPEBITS 10 | ||
23 | #define _CMD_FLAGSBITS 6 | ||
24 | #define _CMD_DIRBITS 2 | ||
25 | |||
26 | #define _CMD_NMASK ((1 << _CMD_NBITS)-1) | ||
27 | #define _CMD_VTYPEMASK ((1 << _CMD_VTYPEBITS)-1) | ||
28 | #define _CMD_FLAGSMASK ((1 << _CMD_FLAGSBITS)-1) | ||
29 | #define _CMD_DIRMASK ((1 << _CMD_DIRBITS)-1) | ||
30 | |||
31 | #define _CMD_NSHIFT 0 | ||
32 | #define _CMD_VTYPESHIFT (_CMD_NSHIFT+_CMD_NBITS) | ||
33 | #define _CMD_FLAGSSHIFT (_CMD_VTYPESHIFT+_CMD_VTYPEBITS) | ||
34 | #define _CMD_DIRSHIFT (_CMD_FLAGSSHIFT+_CMD_FLAGSBITS) | ||
35 | |||
36 | /* | ||
37 | * Direction bits (from host perspective). | ||
38 | */ | ||
39 | #define _CMD_DIR_NONE 0U | ||
40 | #define _CMD_DIR_WRITE 1U | ||
41 | #define _CMD_DIR_READ 2U | ||
42 | #define _CMD_DIR_RW (_CMD_DIR_WRITE | _CMD_DIR_READ) | ||
43 | |||
44 | /* | ||
45 | * Flag bits. | ||
46 | */ | ||
47 | #define _CMD_FLAGS_NONE 0U | ||
48 | #define _CMD_FLAGS_NOWAIT 1U | ||
49 | |||
50 | /* | ||
51 | * vNIC type bits. | ||
52 | */ | ||
53 | #define _CMD_VTYPE_NONE 0U | ||
54 | #define _CMD_VTYPE_ENET 1U | ||
55 | #define _CMD_VTYPE_FC 2U | ||
56 | #define _CMD_VTYPE_SCSI 4U | ||
57 | #define _CMD_VTYPE_ALL (_CMD_VTYPE_ENET | _CMD_VTYPE_FC | _CMD_VTYPE_SCSI) | ||
58 | |||
59 | /* | ||
60 | * Used to create cmds.. | ||
61 | */ | ||
62 | #define _CMDCF(dir, flags, vtype, nr) \ | ||
63 | (((dir) << _CMD_DIRSHIFT) | \ | ||
64 | ((flags) << _CMD_FLAGSSHIFT) | \ | ||
65 | ((vtype) << _CMD_VTYPESHIFT) | \ | ||
66 | ((nr) << _CMD_NSHIFT)) | ||
67 | #define _CMDC(dir, vtype, nr) _CMDCF(dir, 0, vtype, nr) | ||
68 | #define _CMDCNW(dir, vtype, nr) _CMDCF(dir, _CMD_FLAGS_NOWAIT, vtype, nr) | ||
69 | |||
70 | /* | ||
71 | * Used to decode cmds.. | ||
72 | */ | ||
73 | #define _CMD_DIR(cmd) (((cmd) >> _CMD_DIRSHIFT) & _CMD_DIRMASK) | ||
74 | #define _CMD_FLAGS(cmd) (((cmd) >> _CMD_FLAGSSHIFT) & _CMD_FLAGSMASK) | ||
75 | #define _CMD_VTYPE(cmd) (((cmd) >> _CMD_VTYPESHIFT) & _CMD_VTYPEMASK) | ||
76 | #define _CMD_N(cmd) (((cmd) >> _CMD_NSHIFT) & _CMD_NMASK) | ||
77 | |||
78 | enum vnic_devcmd_cmd { | ||
79 | CMD_NONE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0), | ||
80 | |||
81 | /* mcpu fw info in mem: (u64)a0=paddr to struct vnic_devcmd_fw_info */ | ||
82 | CMD_MCPU_FW_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1), | ||
83 | |||
84 | /* dev-specific block member: | ||
85 | * in: (u16)a0=offset,(u8)a1=size | ||
86 | * out: a0=value */ | ||
87 | CMD_DEV_SPEC = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 2), | ||
88 | |||
89 | /* stats clear */ | ||
90 | CMD_STATS_CLEAR = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 3), | ||
91 | |||
92 | /* stats dump in mem: (u64)a0=paddr to stats area, | ||
93 | * (u16)a1=sizeof stats area */ | ||
94 | CMD_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 4), | ||
95 | |||
96 | /* set Rx packet filter: (u32)a0=filters (see CMD_PFILTER_*) */ | ||
97 | CMD_PACKET_FILTER = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 7), | ||
98 | |||
99 | /* hang detection notification */ | ||
100 | CMD_HANG_NOTIFY = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8), | ||
101 | |||
102 | /* MAC address in (u48)a0 */ | ||
103 | CMD_MAC_ADDR = _CMDC(_CMD_DIR_READ, | ||
104 | _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 9), | ||
105 | |||
106 | /* disable/enable promisc mode: (u8)a0=0/1 */ | ||
107 | /***** XXX DEPRECATED *****/ | ||
108 | CMD_PROMISC_MODE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 10), | ||
109 | |||
110 | /* disable/enable all-multi mode: (u8)a0=0/1 */ | ||
111 | /***** XXX DEPRECATED *****/ | ||
112 | CMD_ALLMULTI_MODE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 11), | ||
113 | |||
114 | /* add addr from (u48)a0 */ | ||
115 | CMD_ADDR_ADD = _CMDCNW(_CMD_DIR_WRITE, | ||
116 | _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 12), | ||
117 | |||
118 | /* del addr from (u48)a0 */ | ||
119 | CMD_ADDR_DEL = _CMDCNW(_CMD_DIR_WRITE, | ||
120 | _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 13), | ||
121 | |||
122 | /* add VLAN id in (u16)a0 */ | ||
123 | CMD_VLAN_ADD = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 14), | ||
124 | |||
125 | /* del VLAN id in (u16)a0 */ | ||
126 | CMD_VLAN_DEL = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 15), | ||
127 | |||
128 | /* nic_cfg in (u32)a0 */ | ||
129 | CMD_NIC_CFG = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16), | ||
130 | |||
131 | /* union vnic_rss_key in mem: (u64)a0=paddr, (u16)a1=len */ | ||
132 | CMD_RSS_KEY = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 17), | ||
133 | |||
134 | /* union vnic_rss_cpu in mem: (u64)a0=paddr, (u16)a1=len */ | ||
135 | CMD_RSS_CPU = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 18), | ||
136 | |||
137 | /* initiate softreset */ | ||
138 | CMD_SOFT_RESET = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 19), | ||
139 | |||
140 | /* softreset status: | ||
141 | * out: a0=0 reset complete, a0=1 reset in progress */ | ||
142 | CMD_SOFT_RESET_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 20), | ||
143 | |||
144 | /* set struct vnic_devcmd_notify buffer in mem: | ||
145 | * in: | ||
146 | * (u64)a0=paddr to notify (set paddr=0 to unset) | ||
147 | * (u32)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify) | ||
148 | * (u16)a1 & 0x0000ffff00000000=intr num (-1 for no intr) | ||
149 | * out: | ||
150 | * (u32)a1 = effective size | ||
151 | */ | ||
152 | CMD_NOTIFY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 21), | ||
153 | |||
154 | /* UNDI API: (u64)a0=paddr to s_PXENV_UNDI_ struct, | ||
155 | * (u8)a1=PXENV_UNDI_xxx */ | ||
156 | CMD_UNDI = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 22), | ||
157 | |||
158 | /* initiate open sequence (u32)a0=flags (see CMD_OPENF_*) */ | ||
159 | CMD_OPEN = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 23), | ||
160 | |||
161 | /* open status: | ||
162 | * out: a0=0 open complete, a0=1 open in progress */ | ||
163 | CMD_OPEN_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 24), | ||
164 | |||
165 | /* close vnic */ | ||
166 | CMD_CLOSE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 25), | ||
167 | |||
168 | /* initialize virtual link: (u32)a0=flags (see CMD_INITF_*) */ | ||
169 | CMD_INIT = _CMDCNW(_CMD_DIR_READ, _CMD_VTYPE_ALL, 26), | ||
170 | |||
171 | /* variant of CMD_INIT, with provisioning info | ||
172 | * (u64)a0=paddr of vnic_devcmd_provinfo | ||
173 | * (u32)a1=sizeof provision info */ | ||
174 | CMD_INIT_PROV_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 27), | ||
175 | |||
176 | /* enable virtual link */ | ||
177 | CMD_ENABLE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28), | ||
178 | |||
179 | /* disable virtual link */ | ||
180 | CMD_DISABLE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 29), | ||
181 | |||
182 | /* stats dump all vnics on uplink in mem: (u64)a0=paddr (u32)a1=uif */ | ||
183 | CMD_STATS_DUMP_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 30), | ||
184 | |||
185 | /* init status: | ||
186 | * out: a0=0 init complete, a0=1 init in progress | ||
187 | * if a0=0, a1=errno */ | ||
188 | CMD_INIT_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 31), | ||
189 | |||
190 | /* INT13 API: (u64)a0=paddr to vnic_int13_params struct | ||
191 | * (u8)a1=INT13_CMD_xxx */ | ||
192 | CMD_INT13 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_FC, 32), | ||
193 | |||
194 | /* logical uplink enable/disable: (u64)a0: 0/1=disable/enable */ | ||
195 | CMD_LOGICAL_UPLINK = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 33), | ||
196 | |||
197 | /* undo initialize of virtual link */ | ||
198 | CMD_DEINIT = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34), | ||
199 | }; | ||
200 | |||
201 | /* flags for CMD_OPEN */ | ||
202 | #define CMD_OPENF_OPROM 0x1 /* open coming from option rom */ | ||
203 | |||
204 | /* flags for CMD_INIT */ | ||
205 | #define CMD_INITF_DEFAULT_MAC 0x1 /* init with default mac addr */ | ||
206 | |||
207 | /* flags for CMD_PACKET_FILTER */ | ||
208 | #define CMD_PFILTER_DIRECTED 0x01 | ||
209 | #define CMD_PFILTER_MULTICAST 0x02 | ||
210 | #define CMD_PFILTER_BROADCAST 0x04 | ||
211 | #define CMD_PFILTER_PROMISCUOUS 0x08 | ||
212 | #define CMD_PFILTER_ALL_MULTICAST 0x10 | ||
213 | |||
214 | enum vnic_devcmd_status { | ||
215 | STAT_NONE = 0, | ||
216 | STAT_BUSY = 1 << 0, /* cmd in progress */ | ||
217 | STAT_ERROR = 1 << 1, /* last cmd caused error (code in a0) */ | ||
218 | }; | ||
219 | |||
220 | enum vnic_devcmd_error { | ||
221 | ERR_SUCCESS = 0, | ||
222 | ERR_EINVAL = 1, | ||
223 | ERR_EFAULT = 2, | ||
224 | ERR_EPERM = 3, | ||
225 | ERR_EBUSY = 4, | ||
226 | ERR_ECMDUNKNOWN = 5, | ||
227 | ERR_EBADSTATE = 6, | ||
228 | ERR_ENOMEM = 7, | ||
229 | ERR_ETIMEDOUT = 8, | ||
230 | ERR_ELINKDOWN = 9, | ||
231 | }; | ||
232 | |||
233 | struct vnic_devcmd_fw_info { | ||
234 | char fw_version[32]; | ||
235 | char fw_build[32]; | ||
236 | char hw_version[32]; | ||
237 | char hw_serial_number[32]; | ||
238 | }; | ||
239 | |||
240 | struct vnic_devcmd_notify { | ||
241 | u32 csum; /* checksum over following words */ | ||
242 | |||
243 | u32 link_state; /* link up == 1 */ | ||
244 | u32 port_speed; /* effective port speed (rate limit) */ | ||
245 | u32 mtu; /* MTU */ | ||
246 | u32 msglvl; /* requested driver msg lvl */ | ||
247 | u32 uif; /* uplink interface */ | ||
248 | u32 status; /* status bits (see VNIC_STF_*) */ | ||
249 | u32 error; /* error code (see ERR_*) for first ERR */ | ||
250 | u32 link_down_cnt; /* running count of link down transitions */ | ||
251 | }; | ||
252 | #define VNIC_STF_FATAL_ERR 0x0001 /* fatal fw error */ | ||
253 | |||
254 | struct vnic_devcmd_provinfo { | ||
255 | u8 oui[3]; | ||
256 | u8 type; | ||
257 | u8 data[0]; | ||
258 | }; | ||
259 | |||
260 | /* | ||
261 | * Writing cmd register causes STAT_BUSY to get set in status register. | ||
262 | * When cmd completes, STAT_BUSY will be cleared. | ||
263 | * | ||
264 | * If cmd completed successfully STAT_ERROR will be clear | ||
265 | * and args registers contain cmd-specific results. | ||
266 | * | ||
267 | * If cmd error, STAT_ERROR will be set and args[0] contains error code. | ||
268 | * | ||
269 | * status register is read-only. While STAT_BUSY is set, | ||
270 | * all other register contents are read-only. | ||
271 | */ | ||
272 | |||
273 | /* Make sizeof(vnic_devcmd) a power-of-2 for I/O BAR. */ | ||
274 | #define VNIC_DEVCMD_NARGS 15 | ||
275 | struct vnic_devcmd { | ||
276 | u32 status; /* RO */ | ||
277 | u32 cmd; /* RW */ | ||
278 | u64 args[VNIC_DEVCMD_NARGS]; /* RW cmd args (little-endian) */ | ||
279 | }; | ||
280 | |||
281 | #endif /* _VNIC_DEVCMD_H_ */ | ||
diff --git a/drivers/scsi/fnic/vnic_intr.c b/drivers/scsi/fnic/vnic_intr.c new file mode 100644 index 000000000000..4f4dc8793d23 --- /dev/null +++ b/drivers/scsi/fnic/vnic_intr.c | |||
@@ -0,0 +1,60 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | */ | ||
18 | |||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/errno.h> | ||
21 | #include <linux/types.h> | ||
22 | #include <linux/pci.h> | ||
23 | #include <linux/delay.h> | ||
24 | #include "vnic_dev.h" | ||
25 | #include "vnic_intr.h" | ||
26 | |||
27 | void vnic_intr_free(struct vnic_intr *intr) | ||
28 | { | ||
29 | intr->ctrl = NULL; | ||
30 | } | ||
31 | |||
32 | int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr, | ||
33 | unsigned int index) | ||
34 | { | ||
35 | intr->index = index; | ||
36 | intr->vdev = vdev; | ||
37 | |||
38 | intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index); | ||
39 | if (!intr->ctrl) { | ||
40 | printk(KERN_ERR "Failed to hook INTR[%d].ctrl resource\n", | ||
41 | index); | ||
42 | return -EINVAL; | ||
43 | } | ||
44 | |||
45 | return 0; | ||
46 | } | ||
47 | |||
48 | void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer, | ||
49 | unsigned int coalescing_type, unsigned int mask_on_assertion) | ||
50 | { | ||
51 | iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer); | ||
52 | iowrite32(coalescing_type, &intr->ctrl->coalescing_type); | ||
53 | iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion); | ||
54 | iowrite32(0, &intr->ctrl->int_credits); | ||
55 | } | ||
56 | |||
57 | void vnic_intr_clean(struct vnic_intr *intr) | ||
58 | { | ||
59 | iowrite32(0, &intr->ctrl->int_credits); | ||
60 | } | ||
diff --git a/drivers/scsi/fnic/vnic_intr.h b/drivers/scsi/fnic/vnic_intr.h new file mode 100644 index 000000000000..d5fb40e7c98e --- /dev/null +++ b/drivers/scsi/fnic/vnic_intr.h | |||
@@ -0,0 +1,118 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | */ | ||
18 | #ifndef _VNIC_INTR_H_ | ||
19 | #define _VNIC_INTR_H_ | ||
20 | |||
21 | #include <linux/pci.h> | ||
22 | #include "vnic_dev.h" | ||
23 | |||
24 | /* | ||
25 | * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth | ||
26 | * Driver) when both are built with CONFIG options =y | ||
27 | */ | ||
28 | #define vnic_intr_unmask fnic_intr_unmask | ||
29 | #define vnic_intr_mask fnic_intr_mask | ||
30 | #define vnic_intr_return_credits fnic_intr_return_credits | ||
31 | #define vnic_intr_credits fnic_intr_credits | ||
32 | #define vnic_intr_return_all_credits fnic_intr_return_all_credits | ||
33 | #define vnic_intr_legacy_pba fnic_intr_legacy_pba | ||
34 | #define vnic_intr_free fnic_intr_free | ||
35 | #define vnic_intr_alloc fnic_intr_alloc | ||
36 | #define vnic_intr_init fnic_intr_init | ||
37 | #define vnic_intr_clean fnic_intr_clean | ||
38 | |||
39 | #define VNIC_INTR_TIMER_MAX 0xffff | ||
40 | |||
41 | #define VNIC_INTR_TIMER_TYPE_ABS 0 | ||
42 | #define VNIC_INTR_TIMER_TYPE_QUIET 1 | ||
43 | |||
44 | /* Interrupt control */ | ||
45 | struct vnic_intr_ctrl { | ||
46 | u32 coalescing_timer; /* 0x00 */ | ||
47 | u32 pad0; | ||
48 | u32 coalescing_value; /* 0x08 */ | ||
49 | u32 pad1; | ||
50 | u32 coalescing_type; /* 0x10 */ | ||
51 | u32 pad2; | ||
52 | u32 mask_on_assertion; /* 0x18 */ | ||
53 | u32 pad3; | ||
54 | u32 mask; /* 0x20 */ | ||
55 | u32 pad4; | ||
56 | u32 int_credits; /* 0x28 */ | ||
57 | u32 pad5; | ||
58 | u32 int_credit_return; /* 0x30 */ | ||
59 | u32 pad6; | ||
60 | }; | ||
61 | |||
62 | struct vnic_intr { | ||
63 | unsigned int index; | ||
64 | struct vnic_dev *vdev; | ||
65 | struct vnic_intr_ctrl __iomem *ctrl; /* memory-mapped */ | ||
66 | }; | ||
67 | |||
68 | static inline void vnic_intr_unmask(struct vnic_intr *intr) | ||
69 | { | ||
70 | iowrite32(0, &intr->ctrl->mask); | ||
71 | } | ||
72 | |||
73 | static inline void vnic_intr_mask(struct vnic_intr *intr) | ||
74 | { | ||
75 | iowrite32(1, &intr->ctrl->mask); | ||
76 | } | ||
77 | |||
78 | static inline void vnic_intr_return_credits(struct vnic_intr *intr, | ||
79 | unsigned int credits, int unmask, int reset_timer) | ||
80 | { | ||
81 | #define VNIC_INTR_UNMASK_SHIFT 16 | ||
82 | #define VNIC_INTR_RESET_TIMER_SHIFT 17 | ||
83 | |||
84 | u32 int_credit_return = (credits & 0xffff) | | ||
85 | (unmask ? (1 << VNIC_INTR_UNMASK_SHIFT) : 0) | | ||
86 | (reset_timer ? (1 << VNIC_INTR_RESET_TIMER_SHIFT) : 0); | ||
87 | |||
88 | iowrite32(int_credit_return, &intr->ctrl->int_credit_return); | ||
89 | } | ||
90 | |||
91 | static inline unsigned int vnic_intr_credits(struct vnic_intr *intr) | ||
92 | { | ||
93 | return ioread32(&intr->ctrl->int_credits); | ||
94 | } | ||
95 | |||
96 | static inline void vnic_intr_return_all_credits(struct vnic_intr *intr) | ||
97 | { | ||
98 | unsigned int credits = vnic_intr_credits(intr); | ||
99 | int unmask = 1; | ||
100 | int reset_timer = 1; | ||
101 | |||
102 | vnic_intr_return_credits(intr, credits, unmask, reset_timer); | ||
103 | } | ||
104 | |||
105 | static inline u32 vnic_intr_legacy_pba(u32 __iomem *legacy_pba) | ||
106 | { | ||
107 | /* read PBA without clearing */ | ||
108 | return ioread32(legacy_pba); | ||
109 | } | ||
110 | |||
111 | void vnic_intr_free(struct vnic_intr *intr); | ||
112 | int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr, | ||
113 | unsigned int index); | ||
114 | void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer, | ||
115 | unsigned int coalescing_type, unsigned int mask_on_assertion); | ||
116 | void vnic_intr_clean(struct vnic_intr *intr); | ||
117 | |||
118 | #endif /* _VNIC_INTR_H_ */ | ||
diff --git a/drivers/scsi/fnic/vnic_nic.h b/drivers/scsi/fnic/vnic_nic.h new file mode 100644 index 000000000000..f15b83eeaced --- /dev/null +++ b/drivers/scsi/fnic/vnic_nic.h | |||
@@ -0,0 +1,69 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | */ | ||
18 | #ifndef _VNIC_NIC_H_ | ||
19 | #define _VNIC_NIC_H_ | ||
20 | |||
21 | /* | ||
22 | * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth | ||
23 | * Driver) when both are built with CONFIG options =y | ||
24 | */ | ||
25 | #define vnic_set_nic_cfg fnic_set_nic_cfg | ||
26 | |||
27 | #define NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD 0xffUL | ||
28 | #define NIC_CFG_RSS_DEFAULT_CPU_SHIFT 0 | ||
29 | #define NIC_CFG_RSS_HASH_TYPE (0xffUL << 8) | ||
30 | #define NIC_CFG_RSS_HASH_TYPE_MASK_FIELD 0xffUL | ||
31 | #define NIC_CFG_RSS_HASH_TYPE_SHIFT 8 | ||
32 | #define NIC_CFG_RSS_HASH_BITS (7UL << 16) | ||
33 | #define NIC_CFG_RSS_HASH_BITS_MASK_FIELD 7UL | ||
34 | #define NIC_CFG_RSS_HASH_BITS_SHIFT 16 | ||
35 | #define NIC_CFG_RSS_BASE_CPU (7UL << 19) | ||
36 | #define NIC_CFG_RSS_BASE_CPU_MASK_FIELD 7UL | ||
37 | #define NIC_CFG_RSS_BASE_CPU_SHIFT 19 | ||
38 | #define NIC_CFG_RSS_ENABLE (1UL << 22) | ||
39 | #define NIC_CFG_RSS_ENABLE_MASK_FIELD 1UL | ||
40 | #define NIC_CFG_RSS_ENABLE_SHIFT 22 | ||
41 | #define NIC_CFG_TSO_IPID_SPLIT_EN (1UL << 23) | ||
42 | #define NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD 1UL | ||
43 | #define NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT 23 | ||
44 | #define NIC_CFG_IG_VLAN_STRIP_EN (1UL << 24) | ||
45 | #define NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD 1UL | ||
46 | #define NIC_CFG_IG_VLAN_STRIP_EN_SHIFT 24 | ||
47 | |||
48 | static inline void vnic_set_nic_cfg(u32 *nic_cfg, | ||
49 | u8 rss_default_cpu, u8 rss_hash_type, | ||
50 | u8 rss_hash_bits, u8 rss_base_cpu, | ||
51 | u8 rss_enable, u8 tso_ipid_split_en, | ||
52 | u8 ig_vlan_strip_en) | ||
53 | { | ||
54 | *nic_cfg = (rss_default_cpu & NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD) | | ||
55 | ((rss_hash_type & NIC_CFG_RSS_HASH_TYPE_MASK_FIELD) | ||
56 | << NIC_CFG_RSS_HASH_TYPE_SHIFT) | | ||
57 | ((rss_hash_bits & NIC_CFG_RSS_HASH_BITS_MASK_FIELD) | ||
58 | << NIC_CFG_RSS_HASH_BITS_SHIFT) | | ||
59 | ((rss_base_cpu & NIC_CFG_RSS_BASE_CPU_MASK_FIELD) | ||
60 | << NIC_CFG_RSS_BASE_CPU_SHIFT) | | ||
61 | ((rss_enable & NIC_CFG_RSS_ENABLE_MASK_FIELD) | ||
62 | << NIC_CFG_RSS_ENABLE_SHIFT) | | ||
63 | ((tso_ipid_split_en & NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD) | ||
64 | << NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT) | | ||
65 | ((ig_vlan_strip_en & NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD) | ||
66 | << NIC_CFG_IG_VLAN_STRIP_EN_SHIFT); | ||
67 | } | ||
68 | |||
69 | #endif /* _VNIC_NIC_H_ */ | ||
diff --git a/drivers/scsi/fnic/vnic_resource.h b/drivers/scsi/fnic/vnic_resource.h new file mode 100644 index 000000000000..2d842f79d41a --- /dev/null +++ b/drivers/scsi/fnic/vnic_resource.h | |||
@@ -0,0 +1,61 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | */ | ||
18 | #ifndef _VNIC_RESOURCE_H_ | ||
19 | #define _VNIC_RESOURCE_H_ | ||
20 | |||
21 | #define VNIC_RES_MAGIC 0x766E6963L /* 'vnic' */ | ||
22 | #define VNIC_RES_VERSION 0x00000000L | ||
23 | |||
24 | /* vNIC resource types */ | ||
25 | enum vnic_res_type { | ||
26 | RES_TYPE_EOL, /* End-of-list */ | ||
27 | RES_TYPE_WQ, /* Work queues */ | ||
28 | RES_TYPE_RQ, /* Receive queues */ | ||
29 | RES_TYPE_CQ, /* Completion queues */ | ||
30 | RES_TYPE_RSVD1, | ||
31 | RES_TYPE_NIC_CFG, /* Enet NIC config registers */ | ||
32 | RES_TYPE_RSVD2, | ||
33 | RES_TYPE_RSVD3, | ||
34 | RES_TYPE_RSVD4, | ||
35 | RES_TYPE_RSVD5, | ||
36 | RES_TYPE_INTR_CTRL, /* Interrupt ctrl table */ | ||
37 | RES_TYPE_INTR_TABLE, /* MSI/MSI-X Interrupt table */ | ||
38 | RES_TYPE_INTR_PBA, /* MSI/MSI-X PBA table */ | ||
39 | RES_TYPE_INTR_PBA_LEGACY, /* Legacy intr status */ | ||
40 | RES_TYPE_RSVD6, | ||
41 | RES_TYPE_RSVD7, | ||
42 | RES_TYPE_DEVCMD, /* Device command region */ | ||
43 | RES_TYPE_PASS_THRU_PAGE, /* Pass-thru page */ | ||
44 | |||
45 | RES_TYPE_MAX, /* Count of resource types */ | ||
46 | }; | ||
47 | |||
48 | struct vnic_resource_header { | ||
49 | u32 magic; | ||
50 | u32 version; | ||
51 | }; | ||
52 | |||
53 | struct vnic_resource { | ||
54 | u8 type; | ||
55 | u8 bar; | ||
56 | u8 pad[2]; | ||
57 | u32 bar_offset; | ||
58 | u32 count; | ||
59 | }; | ||
60 | |||
61 | #endif /* _VNIC_RESOURCE_H_ */ | ||
diff --git a/drivers/scsi/fnic/vnic_rq.c b/drivers/scsi/fnic/vnic_rq.c new file mode 100644 index 000000000000..bedd0d285630 --- /dev/null +++ b/drivers/scsi/fnic/vnic_rq.c | |||
@@ -0,0 +1,196 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | */ | ||
18 | |||
19 | #include <linux/errno.h> | ||
20 | #include <linux/types.h> | ||
21 | #include <linux/pci.h> | ||
22 | #include <linux/delay.h> | ||
23 | #include "vnic_dev.h" | ||
24 | #include "vnic_rq.h" | ||
25 | |||
26 | static int vnic_rq_alloc_bufs(struct vnic_rq *rq) | ||
27 | { | ||
28 | struct vnic_rq_buf *buf; | ||
29 | struct vnic_dev *vdev; | ||
30 | unsigned int i, j, count = rq->ring.desc_count; | ||
31 | unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count); | ||
32 | |||
33 | vdev = rq->vdev; | ||
34 | |||
35 | for (i = 0; i < blks; i++) { | ||
36 | rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC); | ||
37 | if (!rq->bufs[i]) { | ||
38 | printk(KERN_ERR "Failed to alloc rq_bufs\n"); | ||
39 | return -ENOMEM; | ||
40 | } | ||
41 | } | ||
42 | |||
43 | for (i = 0; i < blks; i++) { | ||
44 | buf = rq->bufs[i]; | ||
45 | for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES; j++) { | ||
46 | buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES + j; | ||
47 | buf->desc = (u8 *)rq->ring.descs + | ||
48 | rq->ring.desc_size * buf->index; | ||
49 | if (buf->index + 1 == count) { | ||
50 | buf->next = rq->bufs[0]; | ||
51 | break; | ||
52 | } else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES) { | ||
53 | buf->next = rq->bufs[i + 1]; | ||
54 | } else { | ||
55 | buf->next = buf + 1; | ||
56 | buf++; | ||
57 | } | ||
58 | } | ||
59 | } | ||
60 | |||
61 | rq->to_use = rq->to_clean = rq->bufs[0]; | ||
62 | rq->buf_index = 0; | ||
63 | |||
64 | return 0; | ||
65 | } | ||
66 | |||
67 | void vnic_rq_free(struct vnic_rq *rq) | ||
68 | { | ||
69 | struct vnic_dev *vdev; | ||
70 | unsigned int i; | ||
71 | |||
72 | vdev = rq->vdev; | ||
73 | |||
74 | vnic_dev_free_desc_ring(vdev, &rq->ring); | ||
75 | |||
76 | for (i = 0; i < VNIC_RQ_BUF_BLKS_MAX; i++) { | ||
77 | kfree(rq->bufs[i]); | ||
78 | rq->bufs[i] = NULL; | ||
79 | } | ||
80 | |||
81 | rq->ctrl = NULL; | ||
82 | } | ||
83 | |||
84 | int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index, | ||
85 | unsigned int desc_count, unsigned int desc_size) | ||
86 | { | ||
87 | int err; | ||
88 | |||
89 | rq->index = index; | ||
90 | rq->vdev = vdev; | ||
91 | |||
92 | rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index); | ||
93 | if (!rq->ctrl) { | ||
94 | printk(KERN_ERR "Failed to hook RQ[%d] resource\n", index); | ||
95 | return -EINVAL; | ||
96 | } | ||
97 | |||
98 | vnic_rq_disable(rq); | ||
99 | |||
100 | err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size); | ||
101 | if (err) | ||
102 | return err; | ||
103 | |||
104 | err = vnic_rq_alloc_bufs(rq); | ||
105 | if (err) { | ||
106 | vnic_rq_free(rq); | ||
107 | return err; | ||
108 | } | ||
109 | |||
110 | return 0; | ||
111 | } | ||
112 | |||
113 | void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, | ||
114 | unsigned int error_interrupt_enable, | ||
115 | unsigned int error_interrupt_offset) | ||
116 | { | ||
117 | u64 paddr; | ||
118 | u32 fetch_index; | ||
119 | |||
120 | paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET; | ||
121 | writeq(paddr, &rq->ctrl->ring_base); | ||
122 | iowrite32(rq->ring.desc_count, &rq->ctrl->ring_size); | ||
123 | iowrite32(cq_index, &rq->ctrl->cq_index); | ||
124 | iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable); | ||
125 | iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset); | ||
126 | iowrite32(0, &rq->ctrl->dropped_packet_count); | ||
127 | iowrite32(0, &rq->ctrl->error_status); | ||
128 | |||
129 | /* Use current fetch_index as the ring starting point */ | ||
130 | fetch_index = ioread32(&rq->ctrl->fetch_index); | ||
131 | rq->to_use = rq->to_clean = | ||
132 | &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES] | ||
133 | [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES]; | ||
134 | iowrite32(fetch_index, &rq->ctrl->posted_index); | ||
135 | |||
136 | rq->buf_index = 0; | ||
137 | } | ||
138 | |||
139 | unsigned int vnic_rq_error_status(struct vnic_rq *rq) | ||
140 | { | ||
141 | return ioread32(&rq->ctrl->error_status); | ||
142 | } | ||
143 | |||
144 | void vnic_rq_enable(struct vnic_rq *rq) | ||
145 | { | ||
146 | iowrite32(1, &rq->ctrl->enable); | ||
147 | } | ||
148 | |||
149 | int vnic_rq_disable(struct vnic_rq *rq) | ||
150 | { | ||
151 | unsigned int wait; | ||
152 | |||
153 | iowrite32(0, &rq->ctrl->enable); | ||
154 | |||
155 | /* Wait for HW to ACK disable request */ | ||
156 | for (wait = 0; wait < 100; wait++) { | ||
157 | if (!(ioread32(&rq->ctrl->running))) | ||
158 | return 0; | ||
159 | udelay(1); | ||
160 | } | ||
161 | |||
162 | printk(KERN_ERR "Failed to disable RQ[%d]\n", rq->index); | ||
163 | |||
164 | return -ETIMEDOUT; | ||
165 | } | ||
166 | |||
167 | void vnic_rq_clean(struct vnic_rq *rq, | ||
168 | void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf)) | ||
169 | { | ||
170 | struct vnic_rq_buf *buf; | ||
171 | u32 fetch_index; | ||
172 | |||
173 | BUG_ON(ioread32(&rq->ctrl->enable)); | ||
174 | |||
175 | buf = rq->to_clean; | ||
176 | |||
177 | while (vnic_rq_desc_used(rq) > 0) { | ||
178 | |||
179 | (*buf_clean)(rq, buf); | ||
180 | |||
181 | buf = rq->to_clean = buf->next; | ||
182 | rq->ring.desc_avail++; | ||
183 | } | ||
184 | |||
185 | /* Use current fetch_index as the ring starting point */ | ||
186 | fetch_index = ioread32(&rq->ctrl->fetch_index); | ||
187 | rq->to_use = rq->to_clean = | ||
188 | &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES] | ||
189 | [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES]; | ||
190 | iowrite32(fetch_index, &rq->ctrl->posted_index); | ||
191 | |||
192 | rq->buf_index = 0; | ||
193 | |||
194 | vnic_dev_clear_desc_ring(&rq->ring); | ||
195 | } | ||
196 | |||
diff --git a/drivers/scsi/fnic/vnic_rq.h b/drivers/scsi/fnic/vnic_rq.h new file mode 100644 index 000000000000..aebdfbd6ad3c --- /dev/null +++ b/drivers/scsi/fnic/vnic_rq.h | |||
@@ -0,0 +1,235 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | */ | ||
18 | #ifndef _VNIC_RQ_H_ | ||
19 | #define _VNIC_RQ_H_ | ||
20 | |||
21 | #include <linux/pci.h> | ||
22 | #include "vnic_dev.h" | ||
23 | #include "vnic_cq.h" | ||
24 | |||
25 | /* | ||
26 | * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth | ||
27 | * Driver) when both are built with CONFIG options =y | ||
28 | */ | ||
29 | #define vnic_rq_desc_avail fnic_rq_desc_avail | ||
30 | #define vnic_rq_desc_used fnic_rq_desc_used | ||
31 | #define vnic_rq_next_desc fnic_rq_next_desc | ||
32 | #define vnic_rq_next_index fnic_rq_next_index | ||
33 | #define vnic_rq_next_buf_index fnic_rq_next_buf_index | ||
34 | #define vnic_rq_post fnic_rq_post | ||
35 | #define vnic_rq_posting_soon fnic_rq_posting_soon | ||
36 | #define vnic_rq_return_descs fnic_rq_return_descs | ||
37 | #define vnic_rq_service fnic_rq_service | ||
38 | #define vnic_rq_fill fnic_rq_fill | ||
39 | #define vnic_rq_free fnic_rq_free | ||
40 | #define vnic_rq_alloc fnic_rq_alloc | ||
41 | #define vnic_rq_init fnic_rq_init | ||
42 | #define vnic_rq_error_status fnic_rq_error_status | ||
43 | #define vnic_rq_enable fnic_rq_enable | ||
44 | #define vnic_rq_disable fnic_rq_disable | ||
45 | #define vnic_rq_clean fnic_rq_clean | ||
46 | |||
47 | /* Receive queue control */ | ||
48 | struct vnic_rq_ctrl { | ||
49 | u64 ring_base; /* 0x00 */ | ||
50 | u32 ring_size; /* 0x08 */ | ||
51 | u32 pad0; | ||
52 | u32 posted_index; /* 0x10 */ | ||
53 | u32 pad1; | ||
54 | u32 cq_index; /* 0x18 */ | ||
55 | u32 pad2; | ||
56 | u32 enable; /* 0x20 */ | ||
57 | u32 pad3; | ||
58 | u32 running; /* 0x28 */ | ||
59 | u32 pad4; | ||
60 | u32 fetch_index; /* 0x30 */ | ||
61 | u32 pad5; | ||
62 | u32 error_interrupt_enable; /* 0x38 */ | ||
63 | u32 pad6; | ||
64 | u32 error_interrupt_offset; /* 0x40 */ | ||
65 | u32 pad7; | ||
66 | u32 error_status; /* 0x48 */ | ||
67 | u32 pad8; | ||
68 | u32 dropped_packet_count; /* 0x50 */ | ||
69 | u32 pad9; | ||
70 | u32 dropped_packet_count_rc; /* 0x58 */ | ||
71 | u32 pad10; | ||
72 | }; | ||
73 | |||
74 | /* Break the vnic_rq_buf allocations into blocks of 64 entries */ | ||
75 | #define VNIC_RQ_BUF_BLK_ENTRIES 64 | ||
76 | #define VNIC_RQ_BUF_BLK_SZ \ | ||
77 | (VNIC_RQ_BUF_BLK_ENTRIES * sizeof(struct vnic_rq_buf)) | ||
78 | #define VNIC_RQ_BUF_BLKS_NEEDED(entries) \ | ||
79 | DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES) | ||
80 | #define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096) | ||
81 | |||
82 | struct vnic_rq_buf { | ||
83 | struct vnic_rq_buf *next; | ||
84 | dma_addr_t dma_addr; | ||
85 | void *os_buf; | ||
86 | unsigned int os_buf_index; | ||
87 | unsigned int len; | ||
88 | unsigned int index; | ||
89 | void *desc; | ||
90 | }; | ||
91 | |||
92 | struct vnic_rq { | ||
93 | unsigned int index; | ||
94 | struct vnic_dev *vdev; | ||
95 | struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */ | ||
96 | struct vnic_dev_ring ring; | ||
97 | struct vnic_rq_buf *bufs[VNIC_RQ_BUF_BLKS_MAX]; | ||
98 | struct vnic_rq_buf *to_use; | ||
99 | struct vnic_rq_buf *to_clean; | ||
100 | void *os_buf_head; | ||
101 | unsigned int buf_index; | ||
102 | unsigned int pkts_outstanding; | ||
103 | }; | ||
104 | |||
105 | static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) | ||
106 | { | ||
107 | /* how many does SW own? */ | ||
108 | return rq->ring.desc_avail; | ||
109 | } | ||
110 | |||
111 | static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) | ||
112 | { | ||
113 | /* how many does HW own? */ | ||
114 | return rq->ring.desc_count - rq->ring.desc_avail - 1; | ||
115 | } | ||
116 | |||
117 | static inline void *vnic_rq_next_desc(struct vnic_rq *rq) | ||
118 | { | ||
119 | return rq->to_use->desc; | ||
120 | } | ||
121 | |||
122 | static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) | ||
123 | { | ||
124 | return rq->to_use->index; | ||
125 | } | ||
126 | |||
127 | static inline unsigned int vnic_rq_next_buf_index(struct vnic_rq *rq) | ||
128 | { | ||
129 | return rq->buf_index++; | ||
130 | } | ||
131 | |||
132 | static inline void vnic_rq_post(struct vnic_rq *rq, | ||
133 | void *os_buf, unsigned int os_buf_index, | ||
134 | dma_addr_t dma_addr, unsigned int len) | ||
135 | { | ||
136 | struct vnic_rq_buf *buf = rq->to_use; | ||
137 | |||
138 | buf->os_buf = os_buf; | ||
139 | buf->os_buf_index = os_buf_index; | ||
140 | buf->dma_addr = dma_addr; | ||
141 | buf->len = len; | ||
142 | |||
143 | buf = buf->next; | ||
144 | rq->to_use = buf; | ||
145 | rq->ring.desc_avail--; | ||
146 | |||
147 | /* Move the posted_index every nth descriptor | ||
148 | */ | ||
149 | |||
150 | #ifndef VNIC_RQ_RETURN_RATE | ||
151 | #define VNIC_RQ_RETURN_RATE 0xf /* keep 2^n - 1 */ | ||
152 | #endif | ||
153 | |||
154 | if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) { | ||
155 | /* Adding write memory barrier prevents compiler and/or CPU | ||
156 | * reordering, thus avoiding descriptor posting before | ||
157 | * descriptor is initialized. Otherwise, hardware can read | ||
158 | * stale descriptor fields. | ||
159 | */ | ||
160 | wmb(); | ||
161 | iowrite32(buf->index, &rq->ctrl->posted_index); | ||
162 | } | ||
163 | } | ||
164 | |||
165 | static inline int vnic_rq_posting_soon(struct vnic_rq *rq) | ||
166 | { | ||
167 | return (rq->to_use->index & VNIC_RQ_RETURN_RATE) == 0; | ||
168 | } | ||
169 | |||
170 | static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count) | ||
171 | { | ||
172 | rq->ring.desc_avail += count; | ||
173 | } | ||
174 | |||
175 | enum desc_return_options { | ||
176 | VNIC_RQ_RETURN_DESC, | ||
177 | VNIC_RQ_DEFER_RETURN_DESC, | ||
178 | }; | ||
179 | |||
180 | static inline void vnic_rq_service(struct vnic_rq *rq, | ||
181 | struct cq_desc *cq_desc, u16 completed_index, | ||
182 | int desc_return, void (*buf_service)(struct vnic_rq *rq, | ||
183 | struct cq_desc *cq_desc, struct vnic_rq_buf *buf, | ||
184 | int skipped, void *opaque), void *opaque) | ||
185 | { | ||
186 | struct vnic_rq_buf *buf; | ||
187 | int skipped; | ||
188 | |||
189 | buf = rq->to_clean; | ||
190 | while (1) { | ||
191 | |||
192 | skipped = (buf->index != completed_index); | ||
193 | |||
194 | (*buf_service)(rq, cq_desc, buf, skipped, opaque); | ||
195 | |||
196 | if (desc_return == VNIC_RQ_RETURN_DESC) | ||
197 | rq->ring.desc_avail++; | ||
198 | |||
199 | rq->to_clean = buf->next; | ||
200 | |||
201 | if (!skipped) | ||
202 | break; | ||
203 | |||
204 | buf = rq->to_clean; | ||
205 | } | ||
206 | } | ||
207 | |||
208 | static inline int vnic_rq_fill(struct vnic_rq *rq, | ||
209 | int (*buf_fill)(struct vnic_rq *rq)) | ||
210 | { | ||
211 | int err; | ||
212 | |||
213 | while (vnic_rq_desc_avail(rq) > 1) { | ||
214 | |||
215 | err = (*buf_fill)(rq); | ||
216 | if (err) | ||
217 | return err; | ||
218 | } | ||
219 | |||
220 | return 0; | ||
221 | } | ||
222 | |||
223 | void vnic_rq_free(struct vnic_rq *rq); | ||
224 | int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index, | ||
225 | unsigned int desc_count, unsigned int desc_size); | ||
226 | void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, | ||
227 | unsigned int error_interrupt_enable, | ||
228 | unsigned int error_interrupt_offset); | ||
229 | unsigned int vnic_rq_error_status(struct vnic_rq *rq); | ||
230 | void vnic_rq_enable(struct vnic_rq *rq); | ||
231 | int vnic_rq_disable(struct vnic_rq *rq); | ||
232 | void vnic_rq_clean(struct vnic_rq *rq, | ||
233 | void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf)); | ||
234 | |||
235 | #endif /* _VNIC_RQ_H_ */ | ||
diff --git a/drivers/scsi/fnic/vnic_scsi.h b/drivers/scsi/fnic/vnic_scsi.h new file mode 100644 index 000000000000..46baa5254001 --- /dev/null +++ b/drivers/scsi/fnic/vnic_scsi.h | |||
@@ -0,0 +1,99 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | */ | ||
18 | #ifndef _VNIC_SCSI_H_ | ||
19 | #define _VNIC_SCSI_H_ | ||
20 | |||
21 | #define VNIC_FNIC_WQ_COPY_COUNT_MIN 1 | ||
22 | #define VNIC_FNIC_WQ_COPY_COUNT_MAX 1 | ||
23 | |||
24 | #define VNIC_FNIC_WQ_DESCS_MIN 64 | ||
25 | #define VNIC_FNIC_WQ_DESCS_MAX 128 | ||
26 | |||
27 | #define VNIC_FNIC_WQ_COPY_DESCS_MIN 64 | ||
28 | #define VNIC_FNIC_WQ_COPY_DESCS_MAX 512 | ||
29 | |||
30 | #define VNIC_FNIC_RQ_DESCS_MIN 64 | ||
31 | #define VNIC_FNIC_RQ_DESCS_MAX 128 | ||
32 | |||
33 | #define VNIC_FNIC_EDTOV_MIN 1000 | ||
34 | #define VNIC_FNIC_EDTOV_MAX 255000 | ||
35 | #define VNIC_FNIC_EDTOV_DEF 2000 | ||
36 | |||
37 | #define VNIC_FNIC_RATOV_MIN 1000 | ||
38 | #define VNIC_FNIC_RATOV_MAX 255000 | ||
39 | |||
40 | #define VNIC_FNIC_MAXDATAFIELDSIZE_MIN 256 | ||
41 | #define VNIC_FNIC_MAXDATAFIELDSIZE_MAX 2112 | ||
42 | |||
43 | #define VNIC_FNIC_FLOGI_RETRIES_MIN 0 | ||
44 | #define VNIC_FNIC_FLOGI_RETRIES_MAX 0xffffffff | ||
45 | #define VNIC_FNIC_FLOGI_RETRIES_DEF 0xffffffff | ||
46 | |||
47 | #define VNIC_FNIC_FLOGI_TIMEOUT_MIN 1000 | ||
48 | #define VNIC_FNIC_FLOGI_TIMEOUT_MAX 255000 | ||
49 | |||
50 | #define VNIC_FNIC_PLOGI_RETRIES_MIN 0 | ||
51 | #define VNIC_FNIC_PLOGI_RETRIES_MAX 255 | ||
52 | #define VNIC_FNIC_PLOGI_RETRIES_DEF 8 | ||
53 | |||
54 | #define VNIC_FNIC_PLOGI_TIMEOUT_MIN 1000 | ||
55 | #define VNIC_FNIC_PLOGI_TIMEOUT_MAX 255000 | ||
56 | |||
57 | #define VNIC_FNIC_IO_THROTTLE_COUNT_MIN 256 | ||
58 | #define VNIC_FNIC_IO_THROTTLE_COUNT_MAX 4096 | ||
59 | |||
60 | #define VNIC_FNIC_LINK_DOWN_TIMEOUT_MIN 0 | ||
61 | #define VNIC_FNIC_LINK_DOWN_TIMEOUT_MAX 240000 | ||
62 | |||
63 | #define VNIC_FNIC_PORT_DOWN_TIMEOUT_MIN 0 | ||
64 | #define VNIC_FNIC_PORT_DOWN_TIMEOUT_MAX 240000 | ||
65 | |||
66 | #define VNIC_FNIC_PORT_DOWN_IO_RETRIES_MIN 0 | ||
67 | #define VNIC_FNIC_PORT_DOWN_IO_RETRIES_MAX 255 | ||
68 | |||
69 | #define VNIC_FNIC_LUNS_PER_TARGET_MIN 1 | ||
70 | #define VNIC_FNIC_LUNS_PER_TARGET_MAX 1024 | ||
71 | |||
72 | /* Device-specific region: scsi configuration */ | ||
73 | struct vnic_fc_config { | ||
74 | u64 node_wwn; | ||
75 | u64 port_wwn; | ||
76 | u32 flags; | ||
77 | u32 wq_enet_desc_count; | ||
78 | u32 wq_copy_desc_count; | ||
79 | u32 rq_desc_count; | ||
80 | u32 flogi_retries; | ||
81 | u32 flogi_timeout; | ||
82 | u32 plogi_retries; | ||
83 | u32 plogi_timeout; | ||
84 | u32 io_throttle_count; | ||
85 | u32 link_down_timeout; | ||
86 | u32 port_down_timeout; | ||
87 | u32 port_down_io_retries; | ||
88 | u32 luns_per_tgt; | ||
89 | u16 maxdatafieldsize; | ||
90 | u16 ed_tov; | ||
91 | u16 ra_tov; | ||
92 | u16 intr_timer; | ||
93 | u8 intr_timer_type; | ||
94 | }; | ||
95 | |||
96 | #define VFCF_FCP_SEQ_LVL_ERR 0x1 /* Enable FCP-2 Error Recovery */ | ||
97 | #define VFCF_PERBI 0x2 /* persistent binding info available */ | ||
98 | |||
99 | #endif /* _VNIC_SCSI_H_ */ | ||
diff --git a/drivers/scsi/fnic/vnic_stats.h b/drivers/scsi/fnic/vnic_stats.h new file mode 100644 index 000000000000..5372e23c1cb3 --- /dev/null +++ b/drivers/scsi/fnic/vnic_stats.h | |||
@@ -0,0 +1,68 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | */ | ||
18 | #ifndef _VNIC_STATS_H_ | ||
19 | #define _VNIC_STATS_H_ | ||
20 | |||
21 | /* Tx statistics */ | ||
22 | struct vnic_tx_stats { | ||
23 | u64 tx_frames_ok; | ||
24 | u64 tx_unicast_frames_ok; | ||
25 | u64 tx_multicast_frames_ok; | ||
26 | u64 tx_broadcast_frames_ok; | ||
27 | u64 tx_bytes_ok; | ||
28 | u64 tx_unicast_bytes_ok; | ||
29 | u64 tx_multicast_bytes_ok; | ||
30 | u64 tx_broadcast_bytes_ok; | ||
31 | u64 tx_drops; | ||
32 | u64 tx_errors; | ||
33 | u64 tx_tso; | ||
34 | u64 rsvd[16]; | ||
35 | }; | ||
36 | |||
37 | /* Rx statistics */ | ||
38 | struct vnic_rx_stats { | ||
39 | u64 rx_frames_ok; | ||
40 | u64 rx_frames_total; | ||
41 | u64 rx_unicast_frames_ok; | ||
42 | u64 rx_multicast_frames_ok; | ||
43 | u64 rx_broadcast_frames_ok; | ||
44 | u64 rx_bytes_ok; | ||
45 | u64 rx_unicast_bytes_ok; | ||
46 | u64 rx_multicast_bytes_ok; | ||
47 | u64 rx_broadcast_bytes_ok; | ||
48 | u64 rx_drop; | ||
49 | u64 rx_no_bufs; | ||
50 | u64 rx_errors; | ||
51 | u64 rx_rss; | ||
52 | u64 rx_crc_errors; | ||
53 | u64 rx_frames_64; | ||
54 | u64 rx_frames_127; | ||
55 | u64 rx_frames_255; | ||
56 | u64 rx_frames_511; | ||
57 | u64 rx_frames_1023; | ||
58 | u64 rx_frames_1518; | ||
59 | u64 rx_frames_to_max; | ||
60 | u64 rsvd[16]; | ||
61 | }; | ||
62 | |||
63 | struct vnic_stats { | ||
64 | struct vnic_tx_stats tx; | ||
65 | struct vnic_rx_stats rx; | ||
66 | }; | ||
67 | |||
68 | #endif /* _VNIC_STATS_H_ */ | ||
diff --git a/drivers/scsi/fnic/vnic_wq.c b/drivers/scsi/fnic/vnic_wq.c new file mode 100644 index 000000000000..1f9ea790d130 --- /dev/null +++ b/drivers/scsi/fnic/vnic_wq.c | |||
@@ -0,0 +1,182 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | */ | ||
18 | |||
19 | #include <linux/errno.h> | ||
20 | #include <linux/types.h> | ||
21 | #include <linux/pci.h> | ||
22 | #include <linux/delay.h> | ||
23 | #include "vnic_dev.h" | ||
24 | #include "vnic_wq.h" | ||
25 | |||
26 | static int vnic_wq_alloc_bufs(struct vnic_wq *wq) | ||
27 | { | ||
28 | struct vnic_wq_buf *buf; | ||
29 | struct vnic_dev *vdev; | ||
30 | unsigned int i, j, count = wq->ring.desc_count; | ||
31 | unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count); | ||
32 | |||
33 | vdev = wq->vdev; | ||
34 | |||
35 | for (i = 0; i < blks; i++) { | ||
36 | wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC); | ||
37 | if (!wq->bufs[i]) { | ||
38 | printk(KERN_ERR "Failed to alloc wq_bufs\n"); | ||
39 | return -ENOMEM; | ||
40 | } | ||
41 | } | ||
42 | |||
43 | for (i = 0; i < blks; i++) { | ||
44 | buf = wq->bufs[i]; | ||
45 | for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES; j++) { | ||
46 | buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES + j; | ||
47 | buf->desc = (u8 *)wq->ring.descs + | ||
48 | wq->ring.desc_size * buf->index; | ||
49 | if (buf->index + 1 == count) { | ||
50 | buf->next = wq->bufs[0]; | ||
51 | break; | ||
52 | } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES) { | ||
53 | buf->next = wq->bufs[i + 1]; | ||
54 | } else { | ||
55 | buf->next = buf + 1; | ||
56 | buf++; | ||
57 | } | ||
58 | } | ||
59 | } | ||
60 | |||
61 | wq->to_use = wq->to_clean = wq->bufs[0]; | ||
62 | |||
63 | return 0; | ||
64 | } | ||
65 | |||
66 | void vnic_wq_free(struct vnic_wq *wq) | ||
67 | { | ||
68 | struct vnic_dev *vdev; | ||
69 | unsigned int i; | ||
70 | |||
71 | vdev = wq->vdev; | ||
72 | |||
73 | vnic_dev_free_desc_ring(vdev, &wq->ring); | ||
74 | |||
75 | for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) { | ||
76 | kfree(wq->bufs[i]); | ||
77 | wq->bufs[i] = NULL; | ||
78 | } | ||
79 | |||
80 | wq->ctrl = NULL; | ||
81 | |||
82 | } | ||
83 | |||
84 | int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, | ||
85 | unsigned int desc_count, unsigned int desc_size) | ||
86 | { | ||
87 | int err; | ||
88 | |||
89 | wq->index = index; | ||
90 | wq->vdev = vdev; | ||
91 | |||
92 | wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index); | ||
93 | if (!wq->ctrl) { | ||
94 | printk(KERN_ERR "Failed to hook WQ[%d] resource\n", index); | ||
95 | return -EINVAL; | ||
96 | } | ||
97 | |||
98 | vnic_wq_disable(wq); | ||
99 | |||
100 | err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); | ||
101 | if (err) | ||
102 | return err; | ||
103 | |||
104 | err = vnic_wq_alloc_bufs(wq); | ||
105 | if (err) { | ||
106 | vnic_wq_free(wq); | ||
107 | return err; | ||
108 | } | ||
109 | |||
110 | return 0; | ||
111 | } | ||
112 | |||
113 | void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, | ||
114 | unsigned int error_interrupt_enable, | ||
115 | unsigned int error_interrupt_offset) | ||
116 | { | ||
117 | u64 paddr; | ||
118 | |||
119 | paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET; | ||
120 | writeq(paddr, &wq->ctrl->ring_base); | ||
121 | iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size); | ||
122 | iowrite32(0, &wq->ctrl->fetch_index); | ||
123 | iowrite32(0, &wq->ctrl->posted_index); | ||
124 | iowrite32(cq_index, &wq->ctrl->cq_index); | ||
125 | iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable); | ||
126 | iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset); | ||
127 | iowrite32(0, &wq->ctrl->error_status); | ||
128 | } | ||
129 | |||
130 | unsigned int vnic_wq_error_status(struct vnic_wq *wq) | ||
131 | { | ||
132 | return ioread32(&wq->ctrl->error_status); | ||
133 | } | ||
134 | |||
135 | void vnic_wq_enable(struct vnic_wq *wq) | ||
136 | { | ||
137 | iowrite32(1, &wq->ctrl->enable); | ||
138 | } | ||
139 | |||
140 | int vnic_wq_disable(struct vnic_wq *wq) | ||
141 | { | ||
142 | unsigned int wait; | ||
143 | |||
144 | iowrite32(0, &wq->ctrl->enable); | ||
145 | |||
146 | /* Wait for HW to ACK disable request */ | ||
147 | for (wait = 0; wait < 100; wait++) { | ||
148 | if (!(ioread32(&wq->ctrl->running))) | ||
149 | return 0; | ||
150 | udelay(1); | ||
151 | } | ||
152 | |||
153 | printk(KERN_ERR "Failed to disable WQ[%d]\n", wq->index); | ||
154 | |||
155 | return -ETIMEDOUT; | ||
156 | } | ||
157 | |||
158 | void vnic_wq_clean(struct vnic_wq *wq, | ||
159 | void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)) | ||
160 | { | ||
161 | struct vnic_wq_buf *buf; | ||
162 | |||
163 | BUG_ON(ioread32(&wq->ctrl->enable)); | ||
164 | |||
165 | buf = wq->to_clean; | ||
166 | |||
167 | while (vnic_wq_desc_used(wq) > 0) { | ||
168 | |||
169 | (*buf_clean)(wq, buf); | ||
170 | |||
171 | buf = wq->to_clean = buf->next; | ||
172 | wq->ring.desc_avail++; | ||
173 | } | ||
174 | |||
175 | wq->to_use = wq->to_clean = wq->bufs[0]; | ||
176 | |||
177 | iowrite32(0, &wq->ctrl->fetch_index); | ||
178 | iowrite32(0, &wq->ctrl->posted_index); | ||
179 | iowrite32(0, &wq->ctrl->error_status); | ||
180 | |||
181 | vnic_dev_clear_desc_ring(&wq->ring); | ||
182 | } | ||
diff --git a/drivers/scsi/fnic/vnic_wq.h b/drivers/scsi/fnic/vnic_wq.h new file mode 100644 index 000000000000..5cd094f79281 --- /dev/null +++ b/drivers/scsi/fnic/vnic_wq.h | |||
@@ -0,0 +1,175 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | */ | ||
18 | #ifndef _VNIC_WQ_H_ | ||
19 | #define _VNIC_WQ_H_ | ||
20 | |||
21 | #include <linux/pci.h> | ||
22 | #include "vnic_dev.h" | ||
23 | #include "vnic_cq.h" | ||
24 | |||
25 | /* | ||
26 | * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth | ||
27 | * Driver) when both are built with CONFIG options =y | ||
28 | */ | ||
29 | #define vnic_wq_desc_avail fnic_wq_desc_avail | ||
30 | #define vnic_wq_desc_used fnic_wq_desc_used | ||
31 | #define vnic_wq_next_desc fni_cwq_next_desc | ||
32 | #define vnic_wq_post fnic_wq_post | ||
33 | #define vnic_wq_service fnic_wq_service | ||
34 | #define vnic_wq_free fnic_wq_free | ||
35 | #define vnic_wq_alloc fnic_wq_alloc | ||
36 | #define vnic_wq_init fnic_wq_init | ||
37 | #define vnic_wq_error_status fnic_wq_error_status | ||
38 | #define vnic_wq_enable fnic_wq_enable | ||
39 | #define vnic_wq_disable fnic_wq_disable | ||
40 | #define vnic_wq_clean fnic_wq_clean | ||
41 | |||
42 | /* Work queue control */ | ||
43 | struct vnic_wq_ctrl { | ||
44 | u64 ring_base; /* 0x00 */ | ||
45 | u32 ring_size; /* 0x08 */ | ||
46 | u32 pad0; | ||
47 | u32 posted_index; /* 0x10 */ | ||
48 | u32 pad1; | ||
49 | u32 cq_index; /* 0x18 */ | ||
50 | u32 pad2; | ||
51 | u32 enable; /* 0x20 */ | ||
52 | u32 pad3; | ||
53 | u32 running; /* 0x28 */ | ||
54 | u32 pad4; | ||
55 | u32 fetch_index; /* 0x30 */ | ||
56 | u32 pad5; | ||
57 | u32 dca_value; /* 0x38 */ | ||
58 | u32 pad6; | ||
59 | u32 error_interrupt_enable; /* 0x40 */ | ||
60 | u32 pad7; | ||
61 | u32 error_interrupt_offset; /* 0x48 */ | ||
62 | u32 pad8; | ||
63 | u32 error_status; /* 0x50 */ | ||
64 | u32 pad9; | ||
65 | }; | ||
66 | |||
67 | struct vnic_wq_buf { | ||
68 | struct vnic_wq_buf *next; | ||
69 | dma_addr_t dma_addr; | ||
70 | void *os_buf; | ||
71 | unsigned int len; | ||
72 | unsigned int index; | ||
73 | int sop; | ||
74 | void *desc; | ||
75 | }; | ||
76 | |||
77 | /* Break the vnic_wq_buf allocations into blocks of 64 entries */ | ||
78 | #define VNIC_WQ_BUF_BLK_ENTRIES 64 | ||
79 | #define VNIC_WQ_BUF_BLK_SZ \ | ||
80 | (VNIC_WQ_BUF_BLK_ENTRIES * sizeof(struct vnic_wq_buf)) | ||
81 | #define VNIC_WQ_BUF_BLKS_NEEDED(entries) \ | ||
82 | DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES) | ||
83 | #define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096) | ||
84 | |||
85 | struct vnic_wq { | ||
86 | unsigned int index; | ||
87 | struct vnic_dev *vdev; | ||
88 | struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */ | ||
89 | struct vnic_dev_ring ring; | ||
90 | struct vnic_wq_buf *bufs[VNIC_WQ_BUF_BLKS_MAX]; | ||
91 | struct vnic_wq_buf *to_use; | ||
92 | struct vnic_wq_buf *to_clean; | ||
93 | unsigned int pkts_outstanding; | ||
94 | }; | ||
95 | |||
96 | static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) | ||
97 | { | ||
98 | /* how many does SW own? */ | ||
99 | return wq->ring.desc_avail; | ||
100 | } | ||
101 | |||
102 | static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq) | ||
103 | { | ||
104 | /* how many does HW own? */ | ||
105 | return wq->ring.desc_count - wq->ring.desc_avail - 1; | ||
106 | } | ||
107 | |||
108 | static inline void *vnic_wq_next_desc(struct vnic_wq *wq) | ||
109 | { | ||
110 | return wq->to_use->desc; | ||
111 | } | ||
112 | |||
113 | static inline void vnic_wq_post(struct vnic_wq *wq, | ||
114 | void *os_buf, dma_addr_t dma_addr, | ||
115 | unsigned int len, int sop, int eop) | ||
116 | { | ||
117 | struct vnic_wq_buf *buf = wq->to_use; | ||
118 | |||
119 | buf->sop = sop; | ||
120 | buf->os_buf = eop ? os_buf : NULL; | ||
121 | buf->dma_addr = dma_addr; | ||
122 | buf->len = len; | ||
123 | |||
124 | buf = buf->next; | ||
125 | if (eop) { | ||
126 | /* Adding write memory barrier prevents compiler and/or CPU | ||
127 | * reordering, thus avoiding descriptor posting before | ||
128 | * descriptor is initialized. Otherwise, hardware can read | ||
129 | * stale descriptor fields. | ||
130 | */ | ||
131 | wmb(); | ||
132 | iowrite32(buf->index, &wq->ctrl->posted_index); | ||
133 | } | ||
134 | wq->to_use = buf; | ||
135 | |||
136 | wq->ring.desc_avail--; | ||
137 | } | ||
138 | |||
139 | static inline void vnic_wq_service(struct vnic_wq *wq, | ||
140 | struct cq_desc *cq_desc, u16 completed_index, | ||
141 | void (*buf_service)(struct vnic_wq *wq, | ||
142 | struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque), | ||
143 | void *opaque) | ||
144 | { | ||
145 | struct vnic_wq_buf *buf; | ||
146 | |||
147 | buf = wq->to_clean; | ||
148 | while (1) { | ||
149 | |||
150 | (*buf_service)(wq, cq_desc, buf, opaque); | ||
151 | |||
152 | wq->ring.desc_avail++; | ||
153 | |||
154 | wq->to_clean = buf->next; | ||
155 | |||
156 | if (buf->index == completed_index) | ||
157 | break; | ||
158 | |||
159 | buf = wq->to_clean; | ||
160 | } | ||
161 | } | ||
162 | |||
163 | void vnic_wq_free(struct vnic_wq *wq); | ||
164 | int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, | ||
165 | unsigned int desc_count, unsigned int desc_size); | ||
166 | void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, | ||
167 | unsigned int error_interrupt_enable, | ||
168 | unsigned int error_interrupt_offset); | ||
169 | unsigned int vnic_wq_error_status(struct vnic_wq *wq); | ||
170 | void vnic_wq_enable(struct vnic_wq *wq); | ||
171 | int vnic_wq_disable(struct vnic_wq *wq); | ||
172 | void vnic_wq_clean(struct vnic_wq *wq, | ||
173 | void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)); | ||
174 | |||
175 | #endif /* _VNIC_WQ_H_ */ | ||
diff --git a/drivers/scsi/fnic/vnic_wq_copy.c b/drivers/scsi/fnic/vnic_wq_copy.c new file mode 100644 index 000000000000..9eab7e7caf38 --- /dev/null +++ b/drivers/scsi/fnic/vnic_wq_copy.c | |||
@@ -0,0 +1,117 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | */ | ||
18 | |||
19 | #include <linux/errno.h> | ||
20 | #include <linux/types.h> | ||
21 | #include <linux/pci.h> | ||
22 | #include <linux/delay.h> | ||
23 | #include "vnic_wq_copy.h" | ||
24 | |||
25 | void vnic_wq_copy_enable(struct vnic_wq_copy *wq) | ||
26 | { | ||
27 | iowrite32(1, &wq->ctrl->enable); | ||
28 | } | ||
29 | |||
30 | int vnic_wq_copy_disable(struct vnic_wq_copy *wq) | ||
31 | { | ||
32 | unsigned int wait; | ||
33 | |||
34 | iowrite32(0, &wq->ctrl->enable); | ||
35 | |||
36 | /* Wait for HW to ACK disable request */ | ||
37 | for (wait = 0; wait < 100; wait++) { | ||
38 | if (!(ioread32(&wq->ctrl->running))) | ||
39 | return 0; | ||
40 | udelay(1); | ||
41 | } | ||
42 | |||
43 | printk(KERN_ERR "Failed to disable Copy WQ[%d]," | ||
44 | " fetch index=%d, posted_index=%d\n", | ||
45 | wq->index, ioread32(&wq->ctrl->fetch_index), | ||
46 | ioread32(&wq->ctrl->posted_index)); | ||
47 | |||
48 | return -ENODEV; | ||
49 | } | ||
50 | |||
51 | void vnic_wq_copy_clean(struct vnic_wq_copy *wq, | ||
52 | void (*q_clean)(struct vnic_wq_copy *wq, | ||
53 | struct fcpio_host_req *wq_desc)) | ||
54 | { | ||
55 | BUG_ON(ioread32(&wq->ctrl->enable)); | ||
56 | |||
57 | if (vnic_wq_copy_desc_in_use(wq)) | ||
58 | vnic_wq_copy_service(wq, -1, q_clean); | ||
59 | |||
60 | wq->to_use_index = wq->to_clean_index = 0; | ||
61 | |||
62 | iowrite32(0, &wq->ctrl->fetch_index); | ||
63 | iowrite32(0, &wq->ctrl->posted_index); | ||
64 | iowrite32(0, &wq->ctrl->error_status); | ||
65 | |||
66 | vnic_dev_clear_desc_ring(&wq->ring); | ||
67 | } | ||
68 | |||
69 | void vnic_wq_copy_free(struct vnic_wq_copy *wq) | ||
70 | { | ||
71 | struct vnic_dev *vdev; | ||
72 | |||
73 | vdev = wq->vdev; | ||
74 | vnic_dev_free_desc_ring(vdev, &wq->ring); | ||
75 | wq->ctrl = NULL; | ||
76 | } | ||
77 | |||
78 | int vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq, | ||
79 | unsigned int index, unsigned int desc_count, | ||
80 | unsigned int desc_size) | ||
81 | { | ||
82 | int err; | ||
83 | |||
84 | wq->index = index; | ||
85 | wq->vdev = vdev; | ||
86 | wq->to_use_index = wq->to_clean_index = 0; | ||
87 | wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index); | ||
88 | if (!wq->ctrl) { | ||
89 | printk(KERN_ERR "Failed to hook COPY WQ[%d] resource\n", index); | ||
90 | return -EINVAL; | ||
91 | } | ||
92 | |||
93 | vnic_wq_copy_disable(wq); | ||
94 | |||
95 | err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); | ||
96 | if (err) | ||
97 | return err; | ||
98 | |||
99 | return 0; | ||
100 | } | ||
101 | |||
102 | void vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index, | ||
103 | unsigned int error_interrupt_enable, | ||
104 | unsigned int error_interrupt_offset) | ||
105 | { | ||
106 | u64 paddr; | ||
107 | |||
108 | paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET; | ||
109 | writeq(paddr, &wq->ctrl->ring_base); | ||
110 | iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size); | ||
111 | iowrite32(0, &wq->ctrl->fetch_index); | ||
112 | iowrite32(0, &wq->ctrl->posted_index); | ||
113 | iowrite32(cq_index, &wq->ctrl->cq_index); | ||
114 | iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable); | ||
115 | iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset); | ||
116 | } | ||
117 | |||
diff --git a/drivers/scsi/fnic/vnic_wq_copy.h b/drivers/scsi/fnic/vnic_wq_copy.h new file mode 100644 index 000000000000..6aff9740c3df --- /dev/null +++ b/drivers/scsi/fnic/vnic_wq_copy.h | |||
@@ -0,0 +1,128 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | */ | ||
18 | #ifndef _VNIC_WQ_COPY_H_ | ||
19 | #define _VNIC_WQ_COPY_H_ | ||
20 | |||
21 | #include <linux/pci.h> | ||
22 | #include "vnic_wq.h" | ||
23 | #include "fcpio.h" | ||
24 | |||
25 | #define VNIC_WQ_COPY_MAX 1 | ||
26 | |||
27 | struct vnic_wq_copy { | ||
28 | unsigned int index; | ||
29 | struct vnic_dev *vdev; | ||
30 | struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */ | ||
31 | struct vnic_dev_ring ring; | ||
32 | unsigned to_use_index; | ||
33 | unsigned to_clean_index; | ||
34 | }; | ||
35 | |||
36 | static inline unsigned int vnic_wq_copy_desc_avail(struct vnic_wq_copy *wq) | ||
37 | { | ||
38 | return wq->ring.desc_avail; | ||
39 | } | ||
40 | |||
41 | static inline unsigned int vnic_wq_copy_desc_in_use(struct vnic_wq_copy *wq) | ||
42 | { | ||
43 | return wq->ring.desc_count - 1 - wq->ring.desc_avail; | ||
44 | } | ||
45 | |||
46 | static inline void *vnic_wq_copy_next_desc(struct vnic_wq_copy *wq) | ||
47 | { | ||
48 | struct fcpio_host_req *desc = wq->ring.descs; | ||
49 | return &desc[wq->to_use_index]; | ||
50 | } | ||
51 | |||
52 | static inline void vnic_wq_copy_post(struct vnic_wq_copy *wq) | ||
53 | { | ||
54 | |||
55 | ((wq->to_use_index + 1) == wq->ring.desc_count) ? | ||
56 | (wq->to_use_index = 0) : (wq->to_use_index++); | ||
57 | wq->ring.desc_avail--; | ||
58 | |||
59 | /* Adding write memory barrier prevents compiler and/or CPU | ||
60 | * reordering, thus avoiding descriptor posting before | ||
61 | * descriptor is initialized. Otherwise, hardware can read | ||
62 | * stale descriptor fields. | ||
63 | */ | ||
64 | wmb(); | ||
65 | |||
66 | iowrite32(wq->to_use_index, &wq->ctrl->posted_index); | ||
67 | } | ||
68 | |||
69 | static inline void vnic_wq_copy_desc_process(struct vnic_wq_copy *wq, u16 index) | ||
70 | { | ||
71 | unsigned int cnt; | ||
72 | |||
73 | if (wq->to_clean_index <= index) | ||
74 | cnt = (index - wq->to_clean_index) + 1; | ||
75 | else | ||
76 | cnt = wq->ring.desc_count - wq->to_clean_index + index + 1; | ||
77 | |||
78 | wq->to_clean_index = ((index + 1) % wq->ring.desc_count); | ||
79 | wq->ring.desc_avail += cnt; | ||
80 | |||
81 | } | ||
82 | |||
83 | static inline void vnic_wq_copy_service(struct vnic_wq_copy *wq, | ||
84 | u16 completed_index, | ||
85 | void (*q_service)(struct vnic_wq_copy *wq, | ||
86 | struct fcpio_host_req *wq_desc)) | ||
87 | { | ||
88 | struct fcpio_host_req *wq_desc = wq->ring.descs; | ||
89 | unsigned int curr_index; | ||
90 | |||
91 | while (1) { | ||
92 | |||
93 | if (q_service) | ||
94 | (*q_service)(wq, &wq_desc[wq->to_clean_index]); | ||
95 | |||
96 | wq->ring.desc_avail++; | ||
97 | |||
98 | curr_index = wq->to_clean_index; | ||
99 | |||
100 | /* increment the to-clean index so that we start | ||
101 | * with an unprocessed index next time we enter the loop | ||
102 | */ | ||
103 | ((wq->to_clean_index + 1) == wq->ring.desc_count) ? | ||
104 | (wq->to_clean_index = 0) : (wq->to_clean_index++); | ||
105 | |||
106 | if (curr_index == completed_index) | ||
107 | break; | ||
108 | |||
109 | /* we have cleaned all the entries */ | ||
110 | if ((completed_index == (u16)-1) && | ||
111 | (wq->to_clean_index == wq->to_use_index)) | ||
112 | break; | ||
113 | } | ||
114 | } | ||
115 | |||
116 | void vnic_wq_copy_enable(struct vnic_wq_copy *wq); | ||
117 | int vnic_wq_copy_disable(struct vnic_wq_copy *wq); | ||
118 | void vnic_wq_copy_free(struct vnic_wq_copy *wq); | ||
119 | int vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq, | ||
120 | unsigned int index, unsigned int desc_count, unsigned int desc_size); | ||
121 | void vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index, | ||
122 | unsigned int error_interrupt_enable, | ||
123 | unsigned int error_interrupt_offset); | ||
124 | void vnic_wq_copy_clean(struct vnic_wq_copy *wq, | ||
125 | void (*q_clean)(struct vnic_wq_copy *wq, | ||
126 | struct fcpio_host_req *wq_desc)); | ||
127 | |||
128 | #endif /* _VNIC_WQ_COPY_H_ */ | ||
diff --git a/drivers/scsi/fnic/wq_enet_desc.h b/drivers/scsi/fnic/wq_enet_desc.h new file mode 100644 index 000000000000..b121cbad18b8 --- /dev/null +++ b/drivers/scsi/fnic/wq_enet_desc.h | |||
@@ -0,0 +1,96 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | */ | ||
18 | #ifndef _WQ_ENET_DESC_H_ | ||
19 | #define _WQ_ENET_DESC_H_ | ||
20 | |||
21 | /* Ethernet work queue descriptor: 16B */ | ||
22 | struct wq_enet_desc { | ||
23 | __le64 address; | ||
24 | __le16 length; | ||
25 | __le16 mss_loopback; | ||
26 | __le16 header_length_flags; | ||
27 | __le16 vlan_tag; | ||
28 | }; | ||
29 | |||
30 | #define WQ_ENET_ADDR_BITS 64 | ||
31 | #define WQ_ENET_LEN_BITS 14 | ||
32 | #define WQ_ENET_LEN_MASK ((1 << WQ_ENET_LEN_BITS) - 1) | ||
33 | #define WQ_ENET_MSS_BITS 14 | ||
34 | #define WQ_ENET_MSS_MASK ((1 << WQ_ENET_MSS_BITS) - 1) | ||
35 | #define WQ_ENET_MSS_SHIFT 2 | ||
36 | #define WQ_ENET_LOOPBACK_SHIFT 1 | ||
37 | #define WQ_ENET_HDRLEN_BITS 10 | ||
38 | #define WQ_ENET_HDRLEN_MASK ((1 << WQ_ENET_HDRLEN_BITS) - 1) | ||
39 | #define WQ_ENET_FLAGS_OM_BITS 2 | ||
40 | #define WQ_ENET_FLAGS_OM_MASK ((1 << WQ_ENET_FLAGS_OM_BITS) - 1) | ||
41 | #define WQ_ENET_FLAGS_EOP_SHIFT 12 | ||
42 | #define WQ_ENET_FLAGS_CQ_ENTRY_SHIFT 13 | ||
43 | #define WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT 14 | ||
44 | #define WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT 15 | ||
45 | |||
46 | #define WQ_ENET_OFFLOAD_MODE_CSUM 0 | ||
47 | #define WQ_ENET_OFFLOAD_MODE_RESERVED 1 | ||
48 | #define WQ_ENET_OFFLOAD_MODE_CSUM_L4 2 | ||
49 | #define WQ_ENET_OFFLOAD_MODE_TSO 3 | ||
50 | |||
51 | static inline void wq_enet_desc_enc(struct wq_enet_desc *desc, | ||
52 | u64 address, u16 length, u16 mss, u16 header_length, | ||
53 | u8 offload_mode, u8 eop, u8 cq_entry, u8 fcoe_encap, | ||
54 | u8 vlan_tag_insert, u16 vlan_tag, u8 loopback) | ||
55 | { | ||
56 | desc->address = cpu_to_le64(address); | ||
57 | desc->length = cpu_to_le16(length & WQ_ENET_LEN_MASK); | ||
58 | desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) << | ||
59 | WQ_ENET_MSS_SHIFT | (loopback & 1) << WQ_ENET_LOOPBACK_SHIFT); | ||
60 | desc->header_length_flags = cpu_to_le16( | ||
61 | (header_length & WQ_ENET_HDRLEN_MASK) | | ||
62 | (offload_mode & WQ_ENET_FLAGS_OM_MASK) << WQ_ENET_HDRLEN_BITS | | ||
63 | (eop & 1) << WQ_ENET_FLAGS_EOP_SHIFT | | ||
64 | (cq_entry & 1) << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT | | ||
65 | (fcoe_encap & 1) << WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT | | ||
66 | (vlan_tag_insert & 1) << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT); | ||
67 | desc->vlan_tag = cpu_to_le16(vlan_tag); | ||
68 | } | ||
69 | |||
70 | static inline void wq_enet_desc_dec(struct wq_enet_desc *desc, | ||
71 | u64 *address, u16 *length, u16 *mss, u16 *header_length, | ||
72 | u8 *offload_mode, u8 *eop, u8 *cq_entry, u8 *fcoe_encap, | ||
73 | u8 *vlan_tag_insert, u16 *vlan_tag, u8 *loopback) | ||
74 | { | ||
75 | *address = le64_to_cpu(desc->address); | ||
76 | *length = le16_to_cpu(desc->length) & WQ_ENET_LEN_MASK; | ||
77 | *mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) & | ||
78 | WQ_ENET_MSS_MASK; | ||
79 | *loopback = (u8)((le16_to_cpu(desc->mss_loopback) >> | ||
80 | WQ_ENET_LOOPBACK_SHIFT) & 1); | ||
81 | *header_length = le16_to_cpu(desc->header_length_flags) & | ||
82 | WQ_ENET_HDRLEN_MASK; | ||
83 | *offload_mode = (u8)((le16_to_cpu(desc->header_length_flags) >> | ||
84 | WQ_ENET_HDRLEN_BITS) & WQ_ENET_FLAGS_OM_MASK); | ||
85 | *eop = (u8)((le16_to_cpu(desc->header_length_flags) >> | ||
86 | WQ_ENET_FLAGS_EOP_SHIFT) & 1); | ||
87 | *cq_entry = (u8)((le16_to_cpu(desc->header_length_flags) >> | ||
88 | WQ_ENET_FLAGS_CQ_ENTRY_SHIFT) & 1); | ||
89 | *fcoe_encap = (u8)((le16_to_cpu(desc->header_length_flags) >> | ||
90 | WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT) & 1); | ||
91 | *vlan_tag_insert = (u8)((le16_to_cpu(desc->header_length_flags) >> | ||
92 | WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT) & 1); | ||
93 | *vlan_tag = le16_to_cpu(desc->vlan_tag); | ||
94 | } | ||
95 | |||
96 | #endif /* _WQ_ENET_DESC_H_ */ | ||
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index babd4cc0cb25..36b1d1052ba1 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h | |||
@@ -69,7 +69,7 @@ | |||
69 | #define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" | 69 | #define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" |
70 | #define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" | 70 | #define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" |
71 | #define MPT2SAS_DRIVER_VERSION "01.100.02.00" | 71 | #define MPT2SAS_DRIVER_VERSION "01.100.02.00" |
72 | #define MPT2SAS_MAJOR_VERSION 00 | 72 | #define MPT2SAS_MAJOR_VERSION 01 |
73 | #define MPT2SAS_MINOR_VERSION 100 | 73 | #define MPT2SAS_MINOR_VERSION 100 |
74 | #define MPT2SAS_BUILD_VERSION 02 | 74 | #define MPT2SAS_BUILD_VERSION 02 |
75 | #define MPT2SAS_RELEASE_VERSION 00 | 75 | #define MPT2SAS_RELEASE_VERSION 00 |
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 6f51ca485f35..e2b50d8f57a8 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c | |||
@@ -425,6 +425,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent, | |||
425 | INIT_LIST_HEAD(&starget->devices); | 425 | INIT_LIST_HEAD(&starget->devices); |
426 | starget->state = STARGET_CREATED; | 426 | starget->state = STARGET_CREATED; |
427 | starget->scsi_level = SCSI_2; | 427 | starget->scsi_level = SCSI_2; |
428 | starget->max_target_blocked = SCSI_DEFAULT_TARGET_BLOCKED; | ||
428 | retry: | 429 | retry: |
429 | spin_lock_irqsave(shost->host_lock, flags); | 430 | spin_lock_irqsave(shost->host_lock, flags); |
430 | 431 | ||
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 094795455293..0a2ce7b6325c 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c | |||
@@ -357,7 +357,7 @@ int iscsi_session_chkready(struct iscsi_cls_session *session) | |||
357 | err = 0; | 357 | err = 0; |
358 | break; | 358 | break; |
359 | case ISCSI_SESSION_FAILED: | 359 | case ISCSI_SESSION_FAILED: |
360 | err = DID_TRANSPORT_DISRUPTED << 16; | 360 | err = DID_IMM_RETRY << 16; |
361 | break; | 361 | break; |
362 | case ISCSI_SESSION_FREE: | 362 | case ISCSI_SESSION_FREE: |
363 | err = DID_TRANSPORT_FAILFAST << 16; | 363 | err = DID_TRANSPORT_FAILFAST << 16; |
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h index c9184f756cad..68a8d873bbd9 100644 --- a/include/scsi/scsi_transport_fc.h +++ b/include/scsi/scsi_transport_fc.h | |||
@@ -680,7 +680,7 @@ fc_remote_port_chkready(struct fc_rport *rport) | |||
680 | if (rport->roles & FC_PORT_ROLE_FCP_TARGET) | 680 | if (rport->roles & FC_PORT_ROLE_FCP_TARGET) |
681 | result = 0; | 681 | result = 0; |
682 | else if (rport->flags & FC_RPORT_DEVLOSS_PENDING) | 682 | else if (rport->flags & FC_RPORT_DEVLOSS_PENDING) |
683 | result = DID_TRANSPORT_DISRUPTED << 16; | 683 | result = DID_IMM_RETRY << 16; |
684 | else | 684 | else |
685 | result = DID_NO_CONNECT << 16; | 685 | result = DID_NO_CONNECT << 16; |
686 | break; | 686 | break; |
@@ -688,7 +688,7 @@ fc_remote_port_chkready(struct fc_rport *rport) | |||
688 | if (rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT) | 688 | if (rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT) |
689 | result = DID_TRANSPORT_FAILFAST << 16; | 689 | result = DID_TRANSPORT_FAILFAST << 16; |
690 | else | 690 | else |
691 | result = DID_TRANSPORT_DISRUPTED << 16; | 691 | result = DID_IMM_RETRY << 16; |
692 | break; | 692 | break; |
693 | default: | 693 | default: |
694 | result = DID_NO_CONNECT << 16; | 694 | result = DID_NO_CONNECT << 16; |