aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorScott Feldman <scofeldm@cisco.com>2008-09-15 12:17:11 -0400
committerJeff Garzik <jgarzik@redhat.com>2008-09-18 11:34:53 -0400
commit01f2e4ead2c51226ed1283ef6a8388ca6f4cff8f (patch)
treeb1cc2ef1a191a3bf00f371d5dbc2028e1fee01c5
parent452c1ce218a68b5dbd626397ecfc65ca89dd3cbb (diff)
enic: add Cisco 10G Ethernet NIC driver
Signed-off-by: Scott Feldman <scofeldm@cisco.com> Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
-rw-r--r--MAINTAINERS7
-rw-r--r--drivers/net/Kconfig7
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/enic/Makefile5
-rw-r--r--drivers/net/enic/cq_desc.h79
-rw-r--r--drivers/net/enic/cq_enet_desc.h169
-rw-r--r--drivers/net/enic/enic.h115
-rw-r--r--drivers/net/enic/enic_main.c1949
-rw-r--r--drivers/net/enic/enic_res.c370
-rw-r--r--drivers/net/enic/enic_res.h151
-rw-r--r--drivers/net/enic/rq_enet_desc.h60
-rw-r--r--drivers/net/enic/vnic_cq.c89
-rw-r--r--drivers/net/enic/vnic_cq.h113
-rw-r--r--drivers/net/enic/vnic_dev.c674
-rw-r--r--drivers/net/enic/vnic_dev.h106
-rw-r--r--drivers/net/enic/vnic_devcmd.h282
-rw-r--r--drivers/net/enic/vnic_enet.h47
-rw-r--r--drivers/net/enic/vnic_intr.c62
-rw-r--r--drivers/net/enic/vnic_intr.h92
-rw-r--r--drivers/net/enic/vnic_nic.h65
-rw-r--r--drivers/net/enic/vnic_resource.h63
-rw-r--r--drivers/net/enic/vnic_rq.c199
-rw-r--r--drivers/net/enic/vnic_rq.h204
-rw-r--r--drivers/net/enic/vnic_rss.h32
-rw-r--r--drivers/net/enic/vnic_stats.h70
-rw-r--r--drivers/net/enic/vnic_wq.c184
-rw-r--r--drivers/net/enic/vnic_wq.h154
-rw-r--r--drivers/net/enic/wq_enet_desc.h98
-rw-r--r--include/linux/pci_ids.h2
29 files changed, 5449 insertions, 0 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index b3e92fbe336c..467f994b1fa0 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1046,6 +1046,13 @@ L: cbe-oss-dev@ozlabs.org
1046W: http://www.ibm.com/developerworks/power/cell/ 1046W: http://www.ibm.com/developerworks/power/cell/
1047S: Supported 1047S: Supported
1048 1048
1049CISCO 10G ETHERNET DRIVER
1050P: Scott Feldman
1051M: scofeldm@cisco.com
1052P: Joe Eykholt
1053M: jeykholt@cisco.com
1054S: Supported
1055
1049CFAG12864B LCD DRIVER 1056CFAG12864B LCD DRIVER
1050P: Miguel Ojeda Sandonis 1057P: Miguel Ojeda Sandonis
1051M: miguel.ojeda.sandonis@gmail.com 1058M: miguel.ojeda.sandonis@gmail.com
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 81a3e959c6c3..5c012cd8fe3d 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2388,6 +2388,13 @@ config EHEA
2388 To compile the driver as a module, choose M here. The module 2388 To compile the driver as a module, choose M here. The module
2389 will be called ehea. 2389 will be called ehea.
2390 2390
2391config ENIC
2392 tristate "E, the Cisco 10G Ethernet NIC"
2393 depends on PCI && INET
2394 select INET_LRO
2395 help
2396 This enables the support for the Cisco 10G Ethernet card.
2397
2391config IXGBE 2398config IXGBE
2392 tristate "Intel(R) 10GbE PCI Express adapters support" 2399 tristate "Intel(R) 10GbE PCI Express adapters support"
2393 depends on PCI && INET 2400 depends on PCI && INET
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 9221346a515e..d4ec6ba7f073 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_ATL2) += atlx/
19obj-$(CONFIG_ATL1E) += atl1e/ 19obj-$(CONFIG_ATL1E) += atl1e/
20obj-$(CONFIG_GIANFAR) += gianfar_driver.o 20obj-$(CONFIG_GIANFAR) += gianfar_driver.o
21obj-$(CONFIG_TEHUTI) += tehuti.o 21obj-$(CONFIG_TEHUTI) += tehuti.o
22obj-$(CONFIG_ENIC) += enic/
22 23
23gianfar_driver-objs := gianfar.o \ 24gianfar_driver-objs := gianfar.o \
24 gianfar_ethtool.o \ 25 gianfar_ethtool.o \
diff --git a/drivers/net/enic/Makefile b/drivers/net/enic/Makefile
new file mode 100644
index 000000000000..391c3bce5b79
--- /dev/null
+++ b/drivers/net/enic/Makefile
@@ -0,0 +1,5 @@
1obj-$(CONFIG_ENIC) := enic.o
2
3enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
4 enic_res.o vnic_dev.o vnic_rq.o
5
diff --git a/drivers/net/enic/cq_desc.h b/drivers/net/enic/cq_desc.h
new file mode 100644
index 000000000000..c036a8bfd043
--- /dev/null
+++ b/drivers/net/enic/cq_desc.h
@@ -0,0 +1,79 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _CQ_DESC_H_
21#define _CQ_DESC_H_
22
23/*
24 * Completion queue descriptor types
25 */
26enum cq_desc_types {
27 CQ_DESC_TYPE_WQ_ENET = 0,
28 CQ_DESC_TYPE_DESC_COPY = 1,
29 CQ_DESC_TYPE_WQ_EXCH = 2,
30 CQ_DESC_TYPE_RQ_ENET = 3,
31 CQ_DESC_TYPE_RQ_FCP = 4,
32};
33
34/* Completion queue descriptor: 16B
35 *
36 * All completion queues have this basic layout. The
37 * type_specfic area is unique for each completion
38 * queue type.
39 */
40struct cq_desc {
41 __le16 completed_index;
42 __le16 q_number;
43 u8 type_specfic[11];
44 u8 type_color;
45};
46
47#define CQ_DESC_TYPE_BITS 7
48#define CQ_DESC_TYPE_MASK ((1 << CQ_DESC_TYPE_BITS) - 1)
49#define CQ_DESC_COLOR_MASK 1
50#define CQ_DESC_Q_NUM_BITS 10
51#define CQ_DESC_Q_NUM_MASK ((1 << CQ_DESC_Q_NUM_BITS) - 1)
52#define CQ_DESC_COMP_NDX_BITS 12
53#define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1)
54
55static inline void cq_desc_dec(const struct cq_desc *desc_arg,
56 u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
57{
58 const struct cq_desc *desc = desc_arg;
59 const u8 type_color = desc->type_color;
60
61 *color = (type_color >> CQ_DESC_TYPE_BITS) & CQ_DESC_COLOR_MASK;
62
63 /*
64 * Make sure color bit is read from desc *before* other fields
65 * are read from desc. Hardware guarantees color bit is last
66 * bit (byte) written. Adding the rmb() prevents the compiler
67 * and/or CPU from reordering the reads which would potentially
68 * result in reading stale values.
69 */
70
71 rmb();
72
73 *type = type_color & CQ_DESC_TYPE_MASK;
74 *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
75 *completed_index = le16_to_cpu(desc->completed_index) &
76 CQ_DESC_COMP_NDX_MASK;
77}
78
79#endif /* _CQ_DESC_H_ */
diff --git a/drivers/net/enic/cq_enet_desc.h b/drivers/net/enic/cq_enet_desc.h
new file mode 100644
index 000000000000..03dce9ed612c
--- /dev/null
+++ b/drivers/net/enic/cq_enet_desc.h
@@ -0,0 +1,169 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _CQ_ENET_DESC_H_
21#define _CQ_ENET_DESC_H_
22
23#include "cq_desc.h"
24
25/* Ethernet completion queue descriptor: 16B */
26struct cq_enet_wq_desc {
27 __le16 completed_index;
28 __le16 q_number;
29 u8 reserved[11];
30 u8 type_color;
31};
32
33static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc,
34 u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
35{
36 cq_desc_dec((struct cq_desc *)desc, type,
37 color, q_number, completed_index);
38}
39
40/* Completion queue descriptor: Ethernet receive queue, 16B */
41struct cq_enet_rq_desc {
42 __le16 completed_index_flags;
43 __le16 q_number_rss_type_flags;
44 __le32 rss_hash;
45 __le16 bytes_written_flags;
46 __le16 vlan;
47 __le16 checksum_fcoe;
48 u8 flags;
49 u8 type_color;
50};
51
52#define CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT (0x1 << 12)
53#define CQ_ENET_RQ_DESC_FLAGS_FCOE (0x1 << 13)
54#define CQ_ENET_RQ_DESC_FLAGS_EOP (0x1 << 14)
55#define CQ_ENET_RQ_DESC_FLAGS_SOP (0x1 << 15)
56
57#define CQ_ENET_RQ_DESC_RSS_TYPE_BITS 4
58#define CQ_ENET_RQ_DESC_RSS_TYPE_MASK \
59 ((1 << CQ_ENET_RQ_DESC_RSS_TYPE_BITS) - 1)
60#define CQ_ENET_RQ_DESC_RSS_TYPE_NONE 0
61#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv4 1
62#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4 2
63#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6 3
64#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6 4
65#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX 5
66#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX 6
67
68#define CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC (0x1 << 14)
69
70#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS 14
71#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK \
72 ((1 << CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS) - 1)
73#define CQ_ENET_RQ_DESC_FLAGS_TRUNCATED (0x1 << 14)
74#define CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED (0x1 << 15)
75
76#define CQ_ENET_RQ_DESC_FCOE_SOF_BITS 4
77#define CQ_ENET_RQ_DESC_FCOE_SOF_MASK \
78 ((1 << CQ_ENET_RQ_DESC_FCOE_SOF_BITS) - 1)
79#define CQ_ENET_RQ_DESC_FCOE_EOF_BITS 8
80#define CQ_ENET_RQ_DESC_FCOE_EOF_MASK \
81 ((1 << CQ_ENET_RQ_DESC_FCOE_EOF_BITS) - 1)
82#define CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT 8
83
84#define CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK (0x1 << 0)
85#define CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK (0x1 << 0)
86#define CQ_ENET_RQ_DESC_FLAGS_UDP (0x1 << 1)
87#define CQ_ENET_RQ_DESC_FCOE_ENC_ERROR (0x1 << 1)
88#define CQ_ENET_RQ_DESC_FLAGS_TCP (0x1 << 2)
89#define CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK (0x1 << 3)
90#define CQ_ENET_RQ_DESC_FLAGS_IPV6 (0x1 << 4)
91#define CQ_ENET_RQ_DESC_FLAGS_IPV4 (0x1 << 5)
92#define CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT (0x1 << 6)
93#define CQ_ENET_RQ_DESC_FLAGS_FCS_OK (0x1 << 7)
94
95static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc,
96 u8 *type, u8 *color, u16 *q_number, u16 *completed_index,
97 u8 *ingress_port, u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type,
98 u8 *csum_not_calc, u32 *rss_hash, u16 *bytes_written, u8 *packet_error,
99 u8 *vlan_stripped, u16 *vlan, u16 *checksum, u8 *fcoe_sof,
100 u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error, u8 *fcoe_eof,
101 u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok,
102 u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok)
103{
104 u16 completed_index_flags = le16_to_cpu(desc->completed_index_flags);
105 u16 q_number_rss_type_flags =
106 le16_to_cpu(desc->q_number_rss_type_flags);
107 u16 bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
108
109 cq_desc_dec((struct cq_desc *)desc, type,
110 color, q_number, completed_index);
111
112 *ingress_port = (completed_index_flags &
113 CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
114 *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
115 1 : 0;
116 *eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ?
117 1 : 0;
118 *sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ?
119 1 : 0;
120
121 *rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) &
122 CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
123 *csum_not_calc = (q_number_rss_type_flags &
124 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0;
125
126 *rss_hash = le32_to_cpu(desc->rss_hash);
127
128 *bytes_written = bytes_written_flags &
129 CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
130 *packet_error = (bytes_written_flags &
131 CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0;
132 *vlan_stripped = (bytes_written_flags &
133 CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0;
134
135 *vlan = le16_to_cpu(desc->vlan);
136
137 if (*fcoe) {
138 *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) &
139 CQ_ENET_RQ_DESC_FCOE_SOF_MASK);
140 *fcoe_fc_crc_ok = (desc->flags &
141 CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0;
142 *fcoe_enc_error = (desc->flags &
143 CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0;
144 *fcoe_eof = (u8)((desc->checksum_fcoe >>
145 CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) &
146 CQ_ENET_RQ_DESC_FCOE_EOF_MASK);
147 *checksum = 0;
148 } else {
149 *fcoe_sof = 0;
150 *fcoe_fc_crc_ok = 0;
151 *fcoe_enc_error = 0;
152 *fcoe_eof = 0;
153 *checksum = le16_to_cpu(desc->checksum_fcoe);
154 }
155
156 *tcp_udp_csum_ok =
157 (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0;
158 *udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0;
159 *tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0;
160 *ipv4_csum_ok =
161 (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0;
162 *ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0;
163 *ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0;
164 *ipv4_fragment =
165 (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0;
166 *fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0;
167}
168
169#endif /* _CQ_ENET_DESC_H_ */
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
new file mode 100644
index 000000000000..fb83c926da58
--- /dev/null
+++ b/drivers/net/enic/enic.h
@@ -0,0 +1,115 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _ENIC_H_
21#define _ENIC_H_
22
23#include <linux/inet_lro.h>
24
25#include "vnic_enet.h"
26#include "vnic_dev.h"
27#include "vnic_wq.h"
28#include "vnic_rq.h"
29#include "vnic_cq.h"
30#include "vnic_intr.h"
31#include "vnic_stats.h"
32#include "vnic_rss.h"
33
34#define DRV_NAME "enic"
35#define DRV_DESCRIPTION "Cisco 10G Ethernet Driver"
36#define DRV_VERSION "0.0.1.18163.472"
37#define DRV_COPYRIGHT "Copyright 2008 Cisco Systems, Inc"
38#define PFX DRV_NAME ": "
39
40#define ENIC_LRO_MAX_DESC 8
41#define ENIC_LRO_MAX_AGGR 64
42
43enum enic_cq_index {
44 ENIC_CQ_RQ,
45 ENIC_CQ_WQ,
46 ENIC_CQ_MAX,
47};
48
49enum enic_intx_intr_index {
50 ENIC_INTX_WQ_RQ,
51 ENIC_INTX_ERR,
52 ENIC_INTX_NOTIFY,
53 ENIC_INTX_MAX,
54};
55
56enum enic_msix_intr_index {
57 ENIC_MSIX_RQ,
58 ENIC_MSIX_WQ,
59 ENIC_MSIX_ERR,
60 ENIC_MSIX_NOTIFY,
61 ENIC_MSIX_MAX,
62};
63
64struct enic_msix_entry {
65 int requested;
66 char devname[IFNAMSIZ];
67 irqreturn_t (*isr)(int, void *);
68 void *devid;
69};
70
71/* Per-instance private data structure */
72struct enic {
73 struct net_device *netdev;
74 struct pci_dev *pdev;
75 struct vnic_enet_config config;
76 struct vnic_dev_bar bar0;
77 struct vnic_dev *vdev;
78 struct net_device_stats net_stats;
79 struct timer_list notify_timer;
80 struct work_struct reset;
81 struct msix_entry msix_entry[ENIC_MSIX_MAX];
82 struct enic_msix_entry msix[ENIC_MSIX_MAX];
83 u32 msg_enable;
84 spinlock_t devcmd_lock;
85 u8 mac_addr[ETH_ALEN];
86 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
87 unsigned int mc_count;
88 int csum_rx_enabled;
89 u32 port_mtu;
90
91 /* work queue cache line section */
92 ____cacheline_aligned struct vnic_wq wq[1];
93 spinlock_t wq_lock[1];
94 unsigned int wq_count;
95 struct vlan_group *vlan_group;
96
97 /* receive queue cache line section */
98 ____cacheline_aligned struct vnic_rq rq[1];
99 unsigned int rq_count;
100 int (*rq_alloc_buf)(struct vnic_rq *rq);
101 struct napi_struct napi;
102 struct net_lro_mgr lro_mgr;
103 struct net_lro_desc lro_desc[ENIC_LRO_MAX_DESC];
104
105 /* interrupt resource cache line section */
106 ____cacheline_aligned struct vnic_intr intr[ENIC_MSIX_MAX];
107 unsigned int intr_count;
108 u32 __iomem *legacy_pba; /* memory-mapped */
109
110 /* completion queue cache line section */
111 ____cacheline_aligned struct vnic_cq cq[ENIC_CQ_MAX];
112 unsigned int cq_count;
113};
114
115#endif /* _ENIC_H_ */
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
new file mode 100644
index 000000000000..4cf5ec76c993
--- /dev/null
+++ b/drivers/net/enic/enic_main.c
@@ -0,0 +1,1949 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#include <linux/module.h>
21#include <linux/kernel.h>
22#include <linux/string.h>
23#include <linux/errno.h>
24#include <linux/types.h>
25#include <linux/init.h>
26#include <linux/workqueue.h>
27#include <linux/pci.h>
28#include <linux/netdevice.h>
29#include <linux/etherdevice.h>
30#include <linux/if_ether.h>
31#include <linux/if_vlan.h>
32#include <linux/ethtool.h>
33#include <linux/in.h>
34#include <linux/ip.h>
35#include <linux/ipv6.h>
36#include <linux/tcp.h>
37
38#include "cq_enet_desc.h"
39#include "vnic_dev.h"
40#include "vnic_intr.h"
41#include "vnic_stats.h"
42#include "enic_res.h"
43#include "enic.h"
44
45#define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
46#define ENIC_JUMBO_FIRST_BUF_SIZE 256
47
48/* Supported devices */
49static struct pci_device_id enic_id_table[] = {
50 { PCI_VDEVICE(CISCO, 0x0043) },
51 { 0, } /* end of table */
52};
53
54MODULE_DESCRIPTION(DRV_DESCRIPTION);
55MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>");
56MODULE_LICENSE("GPL");
57MODULE_VERSION(DRV_VERSION);
58MODULE_DEVICE_TABLE(pci, enic_id_table);
59
60struct enic_stat {
61 char name[ETH_GSTRING_LEN];
62 unsigned int offset;
63};
64
65#define ENIC_TX_STAT(stat) \
66 { .name = #stat, .offset = offsetof(struct vnic_tx_stats, stat) / 8 }
67#define ENIC_RX_STAT(stat) \
68 { .name = #stat, .offset = offsetof(struct vnic_rx_stats, stat) / 8 }
69
70static const struct enic_stat enic_tx_stats[] = {
71 ENIC_TX_STAT(tx_frames_ok),
72 ENIC_TX_STAT(tx_unicast_frames_ok),
73 ENIC_TX_STAT(tx_multicast_frames_ok),
74 ENIC_TX_STAT(tx_broadcast_frames_ok),
75 ENIC_TX_STAT(tx_bytes_ok),
76 ENIC_TX_STAT(tx_unicast_bytes_ok),
77 ENIC_TX_STAT(tx_multicast_bytes_ok),
78 ENIC_TX_STAT(tx_broadcast_bytes_ok),
79 ENIC_TX_STAT(tx_drops),
80 ENIC_TX_STAT(tx_errors),
81 ENIC_TX_STAT(tx_tso),
82};
83
84static const struct enic_stat enic_rx_stats[] = {
85 ENIC_RX_STAT(rx_frames_ok),
86 ENIC_RX_STAT(rx_frames_total),
87 ENIC_RX_STAT(rx_unicast_frames_ok),
88 ENIC_RX_STAT(rx_multicast_frames_ok),
89 ENIC_RX_STAT(rx_broadcast_frames_ok),
90 ENIC_RX_STAT(rx_bytes_ok),
91 ENIC_RX_STAT(rx_unicast_bytes_ok),
92 ENIC_RX_STAT(rx_multicast_bytes_ok),
93 ENIC_RX_STAT(rx_broadcast_bytes_ok),
94 ENIC_RX_STAT(rx_drop),
95 ENIC_RX_STAT(rx_no_bufs),
96 ENIC_RX_STAT(rx_errors),
97 ENIC_RX_STAT(rx_rss),
98 ENIC_RX_STAT(rx_crc_errors),
99 ENIC_RX_STAT(rx_frames_64),
100 ENIC_RX_STAT(rx_frames_127),
101 ENIC_RX_STAT(rx_frames_255),
102 ENIC_RX_STAT(rx_frames_511),
103 ENIC_RX_STAT(rx_frames_1023),
104 ENIC_RX_STAT(rx_frames_1518),
105 ENIC_RX_STAT(rx_frames_to_max),
106};
107
108static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
109static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
110
111static int enic_get_settings(struct net_device *netdev,
112 struct ethtool_cmd *ecmd)
113{
114 struct enic *enic = netdev_priv(netdev);
115
116 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
117 ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
118 ecmd->port = PORT_FIBRE;
119 ecmd->transceiver = XCVR_EXTERNAL;
120
121 if (netif_carrier_ok(netdev)) {
122 ecmd->speed = vnic_dev_port_speed(enic->vdev);
123 ecmd->duplex = DUPLEX_FULL;
124 } else {
125 ecmd->speed = -1;
126 ecmd->duplex = -1;
127 }
128
129 ecmd->autoneg = AUTONEG_DISABLE;
130
131 return 0;
132}
133
134static void enic_get_drvinfo(struct net_device *netdev,
135 struct ethtool_drvinfo *drvinfo)
136{
137 struct enic *enic = netdev_priv(netdev);
138 struct vnic_devcmd_fw_info *fw_info;
139
140 spin_lock(&enic->devcmd_lock);
141 vnic_dev_fw_info(enic->vdev, &fw_info);
142 spin_unlock(&enic->devcmd_lock);
143
144 strncpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
145 strncpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
146 strncpy(drvinfo->fw_version, fw_info->fw_version,
147 sizeof(drvinfo->fw_version));
148 strncpy(drvinfo->bus_info, pci_name(enic->pdev),
149 sizeof(drvinfo->bus_info));
150}
151
152static void enic_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
153{
154 unsigned int i;
155
156 switch (stringset) {
157 case ETH_SS_STATS:
158 for (i = 0; i < enic_n_tx_stats; i++) {
159 memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
160 data += ETH_GSTRING_LEN;
161 }
162 for (i = 0; i < enic_n_rx_stats; i++) {
163 memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
164 data += ETH_GSTRING_LEN;
165 }
166 break;
167 }
168}
169
170static int enic_get_stats_count(struct net_device *netdev)
171{
172 return enic_n_tx_stats + enic_n_rx_stats;
173}
174
175static void enic_get_ethtool_stats(struct net_device *netdev,
176 struct ethtool_stats *stats, u64 *data)
177{
178 struct enic *enic = netdev_priv(netdev);
179 struct vnic_stats *vstats;
180 unsigned int i;
181
182 spin_lock(&enic->devcmd_lock);
183 vnic_dev_stats_dump(enic->vdev, &vstats);
184 spin_unlock(&enic->devcmd_lock);
185
186 for (i = 0; i < enic_n_tx_stats; i++)
187 *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].offset];
188 for (i = 0; i < enic_n_rx_stats; i++)
189 *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].offset];
190}
191
192static u32 enic_get_rx_csum(struct net_device *netdev)
193{
194 struct enic *enic = netdev_priv(netdev);
195 return enic->csum_rx_enabled;
196}
197
198static int enic_set_rx_csum(struct net_device *netdev, u32 data)
199{
200 struct enic *enic = netdev_priv(netdev);
201
202 enic->csum_rx_enabled =
203 (data && ENIC_SETTING(enic, RXCSUM)) ? 1 : 0;
204
205 return 0;
206}
207
208static int enic_set_tx_csum(struct net_device *netdev, u32 data)
209{
210 struct enic *enic = netdev_priv(netdev);
211
212 if (data && ENIC_SETTING(enic, TXCSUM))
213 netdev->features |= NETIF_F_HW_CSUM;
214 else
215 netdev->features &= ~NETIF_F_HW_CSUM;
216
217 return 0;
218}
219
220static int enic_set_tso(struct net_device *netdev, u32 data)
221{
222 struct enic *enic = netdev_priv(netdev);
223
224 if (data && ENIC_SETTING(enic, TSO))
225 netdev->features |=
226 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN;
227 else
228 netdev->features &=
229 ~(NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN);
230
231 return 0;
232}
233
234static u32 enic_get_msglevel(struct net_device *netdev)
235{
236 struct enic *enic = netdev_priv(netdev);
237 return enic->msg_enable;
238}
239
240static void enic_set_msglevel(struct net_device *netdev, u32 value)
241{
242 struct enic *enic = netdev_priv(netdev);
243 enic->msg_enable = value;
244}
245
246static struct ethtool_ops enic_ethtool_ops = {
247 .get_settings = enic_get_settings,
248 .get_drvinfo = enic_get_drvinfo,
249 .get_msglevel = enic_get_msglevel,
250 .set_msglevel = enic_set_msglevel,
251 .get_link = ethtool_op_get_link,
252 .get_strings = enic_get_strings,
253 .get_stats_count = enic_get_stats_count,
254 .get_ethtool_stats = enic_get_ethtool_stats,
255 .get_rx_csum = enic_get_rx_csum,
256 .set_rx_csum = enic_set_rx_csum,
257 .get_tx_csum = ethtool_op_get_tx_csum,
258 .set_tx_csum = enic_set_tx_csum,
259 .get_sg = ethtool_op_get_sg,
260 .set_sg = ethtool_op_set_sg,
261 .get_tso = ethtool_op_get_tso,
262 .set_tso = enic_set_tso,
263};
264
265static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
266{
267 struct enic *enic = vnic_dev_priv(wq->vdev);
268
269 if (buf->sop)
270 pci_unmap_single(enic->pdev, buf->dma_addr,
271 buf->len, PCI_DMA_TODEVICE);
272 else
273 pci_unmap_page(enic->pdev, buf->dma_addr,
274 buf->len, PCI_DMA_TODEVICE);
275
276 if (buf->os_buf)
277 dev_kfree_skb_any(buf->os_buf);
278}
279
280static void enic_wq_free_buf(struct vnic_wq *wq,
281 struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque)
282{
283 enic_free_wq_buf(wq, buf);
284}
285
286static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
287 u8 type, u16 q_number, u16 completed_index, void *opaque)
288{
289 struct enic *enic = vnic_dev_priv(vdev);
290
291 spin_lock(&enic->wq_lock[q_number]);
292
293 vnic_wq_service(&enic->wq[q_number], cq_desc,
294 completed_index, enic_wq_free_buf,
295 opaque);
296
297 if (netif_queue_stopped(enic->netdev) &&
298 vnic_wq_desc_avail(&enic->wq[q_number]) >= MAX_SKB_FRAGS + 1)
299 netif_wake_queue(enic->netdev);
300
301 spin_unlock(&enic->wq_lock[q_number]);
302
303 return 0;
304}
305
306static void enic_log_q_error(struct enic *enic)
307{
308 unsigned int i;
309 u32 error_status;
310
311 for (i = 0; i < enic->wq_count; i++) {
312 error_status = vnic_wq_error_status(&enic->wq[i]);
313 if (error_status)
314 printk(KERN_ERR PFX "%s: WQ[%d] error_status %d\n",
315 enic->netdev->name, i, error_status);
316 }
317
318 for (i = 0; i < enic->rq_count; i++) {
319 error_status = vnic_rq_error_status(&enic->rq[i]);
320 if (error_status)
321 printk(KERN_ERR PFX "%s: RQ[%d] error_status %d\n",
322 enic->netdev->name, i, error_status);
323 }
324}
325
326static void enic_link_check(struct enic *enic)
327{
328 int link_status = vnic_dev_link_status(enic->vdev);
329 int carrier_ok = netif_carrier_ok(enic->netdev);
330
331 if (link_status && !carrier_ok) {
332 printk(KERN_INFO PFX "%s: Link UP\n", enic->netdev->name);
333 netif_carrier_on(enic->netdev);
334 } else if (!link_status && carrier_ok) {
335 printk(KERN_INFO PFX "%s: Link DOWN\n", enic->netdev->name);
336 netif_carrier_off(enic->netdev);
337 }
338}
339
340static void enic_mtu_check(struct enic *enic)
341{
342 u32 mtu = vnic_dev_mtu(enic->vdev);
343
344 if (mtu != enic->port_mtu) {
345 if (mtu < enic->netdev->mtu)
346 printk(KERN_WARNING PFX
347 "%s: interface MTU (%d) set higher "
348 "than switch port MTU (%d)\n",
349 enic->netdev->name, enic->netdev->mtu, mtu);
350 enic->port_mtu = mtu;
351 }
352}
353
354static void enic_msglvl_check(struct enic *enic)
355{
356 u32 msg_enable = vnic_dev_msg_lvl(enic->vdev);
357
358 if (msg_enable != enic->msg_enable) {
359 printk(KERN_INFO PFX "%s: msg lvl changed from 0x%x to 0x%x\n",
360 enic->netdev->name, enic->msg_enable, msg_enable);
361 enic->msg_enable = msg_enable;
362 }
363}
364
365static void enic_notify_check(struct enic *enic)
366{
367 enic_msglvl_check(enic);
368 enic_mtu_check(enic);
369 enic_link_check(enic);
370}
371
372#define ENIC_TEST_INTR(pba, i) (pba & (1 << i))
373
374static irqreturn_t enic_isr_legacy(int irq, void *data)
375{
376 struct net_device *netdev = data;
377 struct enic *enic = netdev_priv(netdev);
378 u32 pba;
379
380 vnic_intr_mask(&enic->intr[ENIC_INTX_WQ_RQ]);
381
382 pba = vnic_intr_legacy_pba(enic->legacy_pba);
383 if (!pba) {
384 vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]);
385 return IRQ_NONE; /* not our interrupt */
386 }
387
388 if (ENIC_TEST_INTR(pba, ENIC_INTX_NOTIFY))
389 enic_notify_check(enic);
390
391 if (ENIC_TEST_INTR(pba, ENIC_INTX_ERR)) {
392 enic_log_q_error(enic);
393 /* schedule recovery from WQ/RQ error */
394 schedule_work(&enic->reset);
395 return IRQ_HANDLED;
396 }
397
398 if (ENIC_TEST_INTR(pba, ENIC_INTX_WQ_RQ)) {
399 if (netif_rx_schedule_prep(netdev, &enic->napi))
400 __netif_rx_schedule(netdev, &enic->napi);
401 } else {
402 vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]);
403 }
404
405 return IRQ_HANDLED;
406}
407
408static irqreturn_t enic_isr_msi(int irq, void *data)
409{
410 struct enic *enic = data;
411
412 /* With MSI, there is no sharing of interrupts, so this is
413 * our interrupt and there is no need to ack it. The device
414 * is not providing per-vector masking, so the OS will not
415 * write to PCI config space to mask/unmask the interrupt.
416 * We're using mask_on_assertion for MSI, so the device
417 * automatically masks the interrupt when the interrupt is
418 * generated. Later, when exiting polling, the interrupt
419 * will be unmasked (see enic_poll).
420 *
421 * Also, the device uses the same PCIe Traffic Class (TC)
422 * for Memory Write data and MSI, so there are no ordering
423 * issues; the MSI will always arrive at the Root Complex
424 * _after_ corresponding Memory Writes (i.e. descriptor
425 * writes).
426 */
427
428 netif_rx_schedule(enic->netdev, &enic->napi);
429
430 return IRQ_HANDLED;
431}
432
433static irqreturn_t enic_isr_msix_rq(int irq, void *data)
434{
435 struct enic *enic = data;
436
437 /* schedule NAPI polling for RQ cleanup */
438 netif_rx_schedule(enic->netdev, &enic->napi);
439
440 return IRQ_HANDLED;
441}
442
443static irqreturn_t enic_isr_msix_wq(int irq, void *data)
444{
445 struct enic *enic = data;
446 unsigned int wq_work_to_do = -1; /* no limit */
447 unsigned int wq_work_done;
448
449 wq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_WQ],
450 wq_work_to_do, enic_wq_service, NULL);
451
452 vnic_intr_return_credits(&enic->intr[ENIC_MSIX_WQ],
453 wq_work_done,
454 1 /* unmask intr */,
455 1 /* reset intr timer */);
456
457 return IRQ_HANDLED;
458}
459
460static irqreturn_t enic_isr_msix_err(int irq, void *data)
461{
462 struct enic *enic = data;
463
464 enic_log_q_error(enic);
465
466 /* schedule recovery from WQ/RQ error */
467 schedule_work(&enic->reset);
468
469 return IRQ_HANDLED;
470}
471
472static irqreturn_t enic_isr_msix_notify(int irq, void *data)
473{
474 struct enic *enic = data;
475
476 enic_notify_check(enic);
477 vnic_intr_unmask(&enic->intr[ENIC_MSIX_NOTIFY]);
478
479 return IRQ_HANDLED;
480}
481
482static inline void enic_queue_wq_skb_cont(struct enic *enic,
483 struct vnic_wq *wq, struct sk_buff *skb,
484 unsigned int len_left)
485{
486 skb_frag_t *frag;
487
488 /* Queue additional data fragments */
489 for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
490 len_left -= frag->size;
491 enic_queue_wq_desc_cont(wq, skb,
492 pci_map_page(enic->pdev, frag->page,
493 frag->page_offset, frag->size,
494 PCI_DMA_TODEVICE),
495 frag->size,
496 (len_left == 0)); /* EOP? */
497 }
498}
499
500static inline void enic_queue_wq_skb_vlan(struct enic *enic,
501 struct vnic_wq *wq, struct sk_buff *skb,
502 int vlan_tag_insert, unsigned int vlan_tag)
503{
504 unsigned int head_len = skb_headlen(skb);
505 unsigned int len_left = skb->len - head_len;
506 int eop = (len_left == 0);
507
508 /* Queue the main skb fragment */
509 enic_queue_wq_desc(wq, skb,
510 pci_map_single(enic->pdev, skb->data,
511 head_len, PCI_DMA_TODEVICE),
512 head_len,
513 vlan_tag_insert, vlan_tag,
514 eop);
515
516 if (!eop)
517 enic_queue_wq_skb_cont(enic, wq, skb, len_left);
518}
519
520static inline void enic_queue_wq_skb_csum_l4(struct enic *enic,
521 struct vnic_wq *wq, struct sk_buff *skb,
522 int vlan_tag_insert, unsigned int vlan_tag)
523{
524 unsigned int head_len = skb_headlen(skb);
525 unsigned int len_left = skb->len - head_len;
526 unsigned int hdr_len = skb_transport_offset(skb);
527 unsigned int csum_offset = hdr_len + skb->csum_offset;
528 int eop = (len_left == 0);
529
530 /* Queue the main skb fragment */
531 enic_queue_wq_desc_csum_l4(wq, skb,
532 pci_map_single(enic->pdev, skb->data,
533 head_len, PCI_DMA_TODEVICE),
534 head_len,
535 csum_offset,
536 hdr_len,
537 vlan_tag_insert, vlan_tag,
538 eop);
539
540 if (!eop)
541 enic_queue_wq_skb_cont(enic, wq, skb, len_left);
542}
543
544static inline void enic_queue_wq_skb_tso(struct enic *enic,
545 struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss,
546 int vlan_tag_insert, unsigned int vlan_tag)
547{
548 unsigned int head_len = skb_headlen(skb);
549 unsigned int len_left = skb->len - head_len;
550 unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
551 int eop = (len_left == 0);
552
553 /* Preload TCP csum field with IP pseudo hdr calculated
554 * with IP length set to zero. HW will later add in length
555 * to each TCP segment resulting from the TSO.
556 */
557
558 if (skb->protocol == __constant_htons(ETH_P_IP)) {
559 ip_hdr(skb)->check = 0;
560 tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
561 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
562 } else if (skb->protocol == __constant_htons(ETH_P_IPV6)) {
563 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
564 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
565 }
566
567 /* Queue the main skb fragment */
568 enic_queue_wq_desc_tso(wq, skb,
569 pci_map_single(enic->pdev, skb->data,
570 head_len, PCI_DMA_TODEVICE),
571 head_len,
572 mss, hdr_len,
573 vlan_tag_insert, vlan_tag,
574 eop);
575
576 if (!eop)
577 enic_queue_wq_skb_cont(enic, wq, skb, len_left);
578}
579
580static inline void enic_queue_wq_skb(struct enic *enic,
581 struct vnic_wq *wq, struct sk_buff *skb)
582{
583 unsigned int mss = skb_shinfo(skb)->gso_size;
584 unsigned int vlan_tag = 0;
585 int vlan_tag_insert = 0;
586
587 if (enic->vlan_group && vlan_tx_tag_present(skb)) {
588 /* VLAN tag from trunking driver */
589 vlan_tag_insert = 1;
590 vlan_tag = vlan_tx_tag_get(skb);
591 }
592
593 if (mss)
594 enic_queue_wq_skb_tso(enic, wq, skb, mss,
595 vlan_tag_insert, vlan_tag);
596 else if (skb->ip_summed == CHECKSUM_PARTIAL)
597 enic_queue_wq_skb_csum_l4(enic, wq, skb,
598 vlan_tag_insert, vlan_tag);
599 else
600 enic_queue_wq_skb_vlan(enic, wq, skb,
601 vlan_tag_insert, vlan_tag);
602}
603
604/* netif_tx_lock held, process context with BHs disabled */
605static int enic_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev)
606{
607 struct enic *enic = netdev_priv(netdev);
608 struct vnic_wq *wq = &enic->wq[0];
609 unsigned long flags;
610
611 if (skb->len <= 0) {
612 dev_kfree_skb(skb);
613 return NETDEV_TX_OK;
614 }
615
616 /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
617 * which is very likely. In the off chance it's going to take
618 * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
619 */
620
621 if (skb_shinfo(skb)->gso_size == 0 &&
622 skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
623 skb_linearize(skb)) {
624 dev_kfree_skb(skb);
625 return NETDEV_TX_OK;
626 }
627
628 spin_lock_irqsave(&enic->wq_lock[0], flags);
629
630 if (vnic_wq_desc_avail(wq) < skb_shinfo(skb)->nr_frags + 1) {
631 netif_stop_queue(netdev);
632 /* This is a hard error, log it */
633 printk(KERN_ERR PFX "%s: BUG! Tx ring full when "
634 "queue awake!\n", netdev->name);
635 spin_unlock_irqrestore(&enic->wq_lock[0], flags);
636 return NETDEV_TX_BUSY;
637 }
638
639 enic_queue_wq_skb(enic, wq, skb);
640
641 if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + 1)
642 netif_stop_queue(netdev);
643
644 netdev->trans_start = jiffies;
645
646 spin_unlock_irqrestore(&enic->wq_lock[0], flags);
647
648 return NETDEV_TX_OK;
649}
650
651/* dev_base_lock rwlock held, nominally process context */
652static struct net_device_stats *enic_get_stats(struct net_device *netdev)
653{
654 struct enic *enic = netdev_priv(netdev);
655 struct vnic_stats *stats;
656
657 spin_lock(&enic->devcmd_lock);
658 vnic_dev_stats_dump(enic->vdev, &stats);
659 spin_unlock(&enic->devcmd_lock);
660
661 enic->net_stats.tx_packets = stats->tx.tx_frames_ok;
662 enic->net_stats.tx_bytes = stats->tx.tx_bytes_ok;
663 enic->net_stats.tx_errors = stats->tx.tx_errors;
664 enic->net_stats.tx_dropped = stats->tx.tx_drops;
665
666 enic->net_stats.rx_packets = stats->rx.rx_frames_ok;
667 enic->net_stats.rx_bytes = stats->rx.rx_bytes_ok;
668 enic->net_stats.rx_errors = stats->rx.rx_errors;
669 enic->net_stats.multicast = stats->rx.rx_multicast_frames_ok;
670 enic->net_stats.rx_crc_errors = stats->rx.rx_crc_errors;
671 enic->net_stats.rx_dropped = stats->rx.rx_no_bufs;
672
673 return &enic->net_stats;
674}
675
676static void enic_reset_mcaddrs(struct enic *enic)
677{
678 enic->mc_count = 0;
679}
680
681static int enic_set_mac_addr(struct net_device *netdev, char *addr)
682{
683 if (!is_valid_ether_addr(addr))
684 return -EADDRNOTAVAIL;
685
686 memcpy(netdev->dev_addr, addr, netdev->addr_len);
687
688 return 0;
689}
690
691/* netif_tx_lock held, BHs disabled */
692static void enic_set_multicast_list(struct net_device *netdev)
693{
694 struct enic *enic = netdev_priv(netdev);
695 struct dev_mc_list *list = netdev->mc_list;
696 int directed = 1;
697 int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
698 int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
699 int promisc = (netdev->flags & IFF_PROMISC) ? 1 : 0;
700 int allmulti = (netdev->flags & IFF_ALLMULTI) ||
701 (netdev->mc_count > ENIC_MULTICAST_PERFECT_FILTERS);
702 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
703 unsigned int mc_count = netdev->mc_count;
704 unsigned int i, j;
705
706 if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS)
707 mc_count = ENIC_MULTICAST_PERFECT_FILTERS;
708
709 spin_lock(&enic->devcmd_lock);
710
711 vnic_dev_packet_filter(enic->vdev, directed,
712 multicast, broadcast, promisc, allmulti);
713
714 /* Is there an easier way? Trying to minimize to
715 * calls to add/del multicast addrs. We keep the
716 * addrs from the last call in enic->mc_addr and
717 * look for changes to add/del.
718 */
719
720 for (i = 0; list && i < mc_count; i++) {
721 memcpy(mc_addr[i], list->dmi_addr, ETH_ALEN);
722 list = list->next;
723 }
724
725 for (i = 0; i < enic->mc_count; i++) {
726 for (j = 0; j < mc_count; j++)
727 if (compare_ether_addr(enic->mc_addr[i],
728 mc_addr[j]) == 0)
729 break;
730 if (j == mc_count)
731 enic_del_multicast_addr(enic, enic->mc_addr[i]);
732 }
733
734 for (i = 0; i < mc_count; i++) {
735 for (j = 0; j < enic->mc_count; j++)
736 if (compare_ether_addr(mc_addr[i],
737 enic->mc_addr[j]) == 0)
738 break;
739 if (j == enic->mc_count)
740 enic_add_multicast_addr(enic, mc_addr[i]);
741 }
742
743 /* Save the list to compare against next time
744 */
745
746 for (i = 0; i < mc_count; i++)
747 memcpy(enic->mc_addr[i], mc_addr[i], ETH_ALEN);
748
749 enic->mc_count = mc_count;
750
751 spin_unlock(&enic->devcmd_lock);
752}
753
754/* rtnl lock is held */
755static void enic_vlan_rx_register(struct net_device *netdev,
756 struct vlan_group *vlan_group)
757{
758 struct enic *enic = netdev_priv(netdev);
759 enic->vlan_group = vlan_group;
760}
761
762/* rtnl lock is held */
763static void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
764{
765 struct enic *enic = netdev_priv(netdev);
766
767 spin_lock(&enic->devcmd_lock);
768 enic_add_vlan(enic, vid);
769 spin_unlock(&enic->devcmd_lock);
770}
771
772/* rtnl lock is held */
773static void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
774{
775 struct enic *enic = netdev_priv(netdev);
776
777 spin_lock(&enic->devcmd_lock);
778 enic_del_vlan(enic, vid);
779 spin_unlock(&enic->devcmd_lock);
780}
781
782/* netif_tx_lock held, BHs disabled */
783static void enic_tx_timeout(struct net_device *netdev)
784{
785 struct enic *enic = netdev_priv(netdev);
786 schedule_work(&enic->reset);
787}
788
789static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
790{
791 struct enic *enic = vnic_dev_priv(rq->vdev);
792
793 if (!buf->os_buf)
794 return;
795
796 pci_unmap_single(enic->pdev, buf->dma_addr,
797 buf->len, PCI_DMA_FROMDEVICE);
798 dev_kfree_skb_any(buf->os_buf);
799}
800
801static inline struct sk_buff *enic_rq_alloc_skb(unsigned int size)
802{
803 struct sk_buff *skb;
804
805 skb = dev_alloc_skb(size + NET_IP_ALIGN);
806
807 if (skb)
808 skb_reserve(skb, NET_IP_ALIGN);
809
810 return skb;
811}
812
813static int enic_rq_alloc_buf(struct vnic_rq *rq)
814{
815 struct enic *enic = vnic_dev_priv(rq->vdev);
816 struct sk_buff *skb;
817 unsigned int len = enic->netdev->mtu + ETH_HLEN;
818 unsigned int os_buf_index = 0;
819 dma_addr_t dma_addr;
820
821 skb = enic_rq_alloc_skb(len);
822 if (!skb)
823 return -ENOMEM;
824
825 dma_addr = pci_map_single(enic->pdev, skb->data,
826 len, PCI_DMA_FROMDEVICE);
827
828 enic_queue_rq_desc(rq, skb, os_buf_index,
829 dma_addr, len);
830
831 return 0;
832}
833
834static int enic_get_skb_header(struct sk_buff *skb, void **iphdr,
835 void **tcph, u64 *hdr_flags, void *priv)
836{
837 struct cq_enet_rq_desc *cq_desc = priv;
838 unsigned int ip_len;
839 struct iphdr *iph;
840
841 u8 type, color, eop, sop, ingress_port, vlan_stripped;
842 u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
843 u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
844 u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
845 u8 packet_error;
846 u16 q_number, completed_index, bytes_written, vlan, checksum;
847 u32 rss_hash;
848
849 cq_enet_rq_desc_dec(cq_desc,
850 &type, &color, &q_number, &completed_index,
851 &ingress_port, &fcoe, &eop, &sop, &rss_type,
852 &csum_not_calc, &rss_hash, &bytes_written,
853 &packet_error, &vlan_stripped, &vlan, &checksum,
854 &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
855 &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
856 &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
857 &fcs_ok);
858
859 if (!(ipv4 && tcp && !ipv4_fragment))
860 return -1;
861
862 skb_reset_network_header(skb);
863 iph = ip_hdr(skb);
864
865 ip_len = ip_hdrlen(skb);
866 skb_set_transport_header(skb, ip_len);
867
868 /* check if ip header and tcp header are complete */
869 if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
870 return -1;
871
872 *hdr_flags = LRO_IPV4 | LRO_TCP;
873 *tcph = tcp_hdr(skb);
874 *iphdr = iph;
875
876 return 0;
877}
878
879static void enic_rq_indicate_buf(struct vnic_rq *rq,
880 struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
881 int skipped, void *opaque)
882{
883 struct enic *enic = vnic_dev_priv(rq->vdev);
884 struct sk_buff *skb;
885
886 u8 type, color, eop, sop, ingress_port, vlan_stripped;
887 u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
888 u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
889 u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
890 u8 packet_error;
891 u16 q_number, completed_index, bytes_written, vlan, checksum;
892 u32 rss_hash;
893
894 if (skipped)
895 return;
896
897 skb = buf->os_buf;
898 prefetch(skb->data - NET_IP_ALIGN);
899 pci_unmap_single(enic->pdev, buf->dma_addr,
900 buf->len, PCI_DMA_FROMDEVICE);
901
902 cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
903 &type, &color, &q_number, &completed_index,
904 &ingress_port, &fcoe, &eop, &sop, &rss_type,
905 &csum_not_calc, &rss_hash, &bytes_written,
906 &packet_error, &vlan_stripped, &vlan, &checksum,
907 &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
908 &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
909 &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
910 &fcs_ok);
911
912 if (packet_error) {
913
914 if (bytes_written > 0 && !fcs_ok) {
915 if (net_ratelimit())
916 printk(KERN_ERR PFX
917 "%s: packet error: bad FCS\n",
918 enic->netdev->name);
919 }
920
921 dev_kfree_skb_any(skb);
922
923 return;
924 }
925
926 if (eop && bytes_written > 0) {
927
928 /* Good receive
929 */
930
931 skb_put(skb, bytes_written);
932 skb->protocol = eth_type_trans(skb, enic->netdev);
933
934 if (enic->csum_rx_enabled && !csum_not_calc) {
935 skb->csum = htons(checksum);
936 skb->ip_summed = CHECKSUM_COMPLETE;
937 }
938
939 skb->dev = enic->netdev;
940 enic->netdev->last_rx = jiffies;
941
942 if (enic->vlan_group && vlan_stripped) {
943
944 if (ENIC_SETTING(enic, LRO))
945 lro_vlan_hwaccel_receive_skb(&enic->lro_mgr,
946 skb, enic->vlan_group,
947 vlan, cq_desc);
948 else
949 vlan_hwaccel_receive_skb(skb,
950 enic->vlan_group, vlan);
951
952 } else {
953
954 if (ENIC_SETTING(enic, LRO))
955 lro_receive_skb(&enic->lro_mgr, skb, cq_desc);
956 else
957 netif_receive_skb(skb);
958
959 }
960
961 } else {
962
963 /* Buffer overflow
964 */
965
966 dev_kfree_skb_any(skb);
967 }
968}
969
970static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
971 u8 type, u16 q_number, u16 completed_index, void *opaque)
972{
973 struct enic *enic = vnic_dev_priv(vdev);
974
975 vnic_rq_service(&enic->rq[q_number], cq_desc,
976 completed_index, VNIC_RQ_RETURN_DESC,
977 enic_rq_indicate_buf, opaque);
978
979 return 0;
980}
981
982static void enic_rq_drop_buf(struct vnic_rq *rq,
983 struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
984 int skipped, void *opaque)
985{
986 struct enic *enic = vnic_dev_priv(rq->vdev);
987 struct sk_buff *skb = buf->os_buf;
988
989 if (skipped)
990 return;
991
992 pci_unmap_single(enic->pdev, buf->dma_addr,
993 buf->len, PCI_DMA_FROMDEVICE);
994
995 dev_kfree_skb_any(skb);
996}
997
998static int enic_rq_service_drop(struct vnic_dev *vdev, struct cq_desc *cq_desc,
999 u8 type, u16 q_number, u16 completed_index, void *opaque)
1000{
1001 struct enic *enic = vnic_dev_priv(vdev);
1002
1003 vnic_rq_service(&enic->rq[q_number], cq_desc,
1004 completed_index, VNIC_RQ_RETURN_DESC,
1005 enic_rq_drop_buf, opaque);
1006
1007 return 0;
1008}
1009
1010static int enic_poll(struct napi_struct *napi, int budget)
1011{
1012 struct enic *enic = container_of(napi, struct enic, napi);
1013 struct net_device *netdev = enic->netdev;
1014 unsigned int rq_work_to_do = budget;
1015 unsigned int wq_work_to_do = -1; /* no limit */
1016 unsigned int work_done, rq_work_done, wq_work_done;
1017
1018 /* Service RQ (first) and WQ
1019 */
1020
1021 rq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ],
1022 rq_work_to_do, enic_rq_service, NULL);
1023
1024 wq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_WQ],
1025 wq_work_to_do, enic_wq_service, NULL);
1026
1027 /* Accumulate intr event credits for this polling
1028 * cycle. An intr event is the completion of a
1029 * a WQ or RQ packet.
1030 */
1031
1032 work_done = rq_work_done + wq_work_done;
1033
1034 if (work_done > 0)
1035 vnic_intr_return_credits(&enic->intr[ENIC_INTX_WQ_RQ],
1036 work_done,
1037 0 /* don't unmask intr */,
1038 0 /* don't reset intr timer */);
1039
1040 if (rq_work_done > 0) {
1041
1042 /* Replenish RQ
1043 */
1044
1045 vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
1046
1047 } else {
1048
1049 /* If no work done, flush all LROs and exit polling
1050 */
1051
1052 if (ENIC_SETTING(enic, LRO))
1053 lro_flush_all(&enic->lro_mgr);
1054
1055 netif_rx_complete(netdev, napi);
1056 vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]);
1057 }
1058
1059 return rq_work_done;
1060}
1061
1062static int enic_poll_msix(struct napi_struct *napi, int budget)
1063{
1064 struct enic *enic = container_of(napi, struct enic, napi);
1065 struct net_device *netdev = enic->netdev;
1066 unsigned int work_to_do = budget;
1067 unsigned int work_done;
1068
1069 /* Service RQ
1070 */
1071
1072 work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ],
1073 work_to_do, enic_rq_service, NULL);
1074
1075 if (work_done > 0) {
1076
1077 /* Replenish RQ
1078 */
1079
1080 vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
1081
1082 /* Accumulate intr event credits for this polling
1083 * cycle. An intr event is the completion of a
1084 * a WQ or RQ packet.
1085 */
1086
1087 vnic_intr_return_credits(&enic->intr[ENIC_MSIX_RQ],
1088 work_done,
1089 0 /* don't unmask intr */,
1090 0 /* don't reset intr timer */);
1091 } else {
1092
1093 /* If no work done, flush all LROs and exit polling
1094 */
1095
1096 if (ENIC_SETTING(enic, LRO))
1097 lro_flush_all(&enic->lro_mgr);
1098
1099 netif_rx_complete(netdev, napi);
1100 vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]);
1101 }
1102
1103 return work_done;
1104}
1105
1106static void enic_notify_timer(unsigned long data)
1107{
1108 struct enic *enic = (struct enic *)data;
1109
1110 enic_notify_check(enic);
1111
1112 mod_timer(&enic->notify_timer, round_jiffies(ENIC_NOTIFY_TIMER_PERIOD));
1113}
1114
1115static void enic_free_intr(struct enic *enic)
1116{
1117 struct net_device *netdev = enic->netdev;
1118 unsigned int i;
1119
1120 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1121 case VNIC_DEV_INTR_MODE_INTX:
1122 case VNIC_DEV_INTR_MODE_MSI:
1123 free_irq(enic->pdev->irq, netdev);
1124 break;
1125 case VNIC_DEV_INTR_MODE_MSIX:
1126 for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
1127 if (enic->msix[i].requested)
1128 free_irq(enic->msix_entry[i].vector,
1129 enic->msix[i].devid);
1130 break;
1131 default:
1132 break;
1133 }
1134}
1135
1136static int enic_request_intr(struct enic *enic)
1137{
1138 struct net_device *netdev = enic->netdev;
1139 unsigned int i;
1140 int err = 0;
1141
1142 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1143
1144 case VNIC_DEV_INTR_MODE_INTX:
1145
1146 err = request_irq(enic->pdev->irq, enic_isr_legacy,
1147 IRQF_SHARED, netdev->name, netdev);
1148 break;
1149
1150 case VNIC_DEV_INTR_MODE_MSI:
1151
1152 err = request_irq(enic->pdev->irq, enic_isr_msi,
1153 0, netdev->name, enic);
1154 break;
1155
1156 case VNIC_DEV_INTR_MODE_MSIX:
1157
1158 sprintf(enic->msix[ENIC_MSIX_RQ].devname,
1159 "%.11s-rx", netdev->name);
1160 enic->msix[ENIC_MSIX_RQ].isr = enic_isr_msix_rq;
1161 enic->msix[ENIC_MSIX_RQ].devid = enic;
1162
1163 sprintf(enic->msix[ENIC_MSIX_WQ].devname,
1164 "%.11s-tx", netdev->name);
1165 enic->msix[ENIC_MSIX_WQ].isr = enic_isr_msix_wq;
1166 enic->msix[ENIC_MSIX_WQ].devid = enic;
1167
1168 sprintf(enic->msix[ENIC_MSIX_ERR].devname,
1169 "%.11s-err", netdev->name);
1170 enic->msix[ENIC_MSIX_ERR].isr = enic_isr_msix_err;
1171 enic->msix[ENIC_MSIX_ERR].devid = enic;
1172
1173 sprintf(enic->msix[ENIC_MSIX_NOTIFY].devname,
1174 "%.11s-notify", netdev->name);
1175 enic->msix[ENIC_MSIX_NOTIFY].isr = enic_isr_msix_notify;
1176 enic->msix[ENIC_MSIX_NOTIFY].devid = enic;
1177
1178 for (i = 0; i < ARRAY_SIZE(enic->msix); i++) {
1179 err = request_irq(enic->msix_entry[i].vector,
1180 enic->msix[i].isr, 0,
1181 enic->msix[i].devname,
1182 enic->msix[i].devid);
1183 if (err) {
1184 enic_free_intr(enic);
1185 break;
1186 }
1187 enic->msix[i].requested = 1;
1188 }
1189
1190 break;
1191
1192 default:
1193 break;
1194 }
1195
1196 return err;
1197}
1198
1199static int enic_notify_set(struct enic *enic)
1200{
1201 int err;
1202
1203 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1204 case VNIC_DEV_INTR_MODE_INTX:
1205 err = vnic_dev_notify_set(enic->vdev, ENIC_INTX_NOTIFY);
1206 break;
1207 case VNIC_DEV_INTR_MODE_MSIX:
1208 err = vnic_dev_notify_set(enic->vdev, ENIC_MSIX_NOTIFY);
1209 break;
1210 default:
1211 err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */);
1212 break;
1213 }
1214
1215 return err;
1216}
1217
1218static void enic_notify_timer_start(struct enic *enic)
1219{
1220 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1221 case VNIC_DEV_INTR_MODE_MSI:
1222 mod_timer(&enic->notify_timer, jiffies);
1223 break;
1224 default:
1225 /* Using intr for notification for INTx/MSI-X */
1226 break;
1227 };
1228}
1229
1230/* rtnl lock is held, process context */
1231static int enic_open(struct net_device *netdev)
1232{
1233 struct enic *enic = netdev_priv(netdev);
1234 unsigned int i;
1235 int err;
1236
1237 for (i = 0; i < enic->rq_count; i++) {
1238 err = vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
1239 if (err) {
1240 printk(KERN_ERR PFX
1241 "%s: Unable to alloc receive buffers.\n",
1242 netdev->name);
1243 return err;
1244 }
1245 }
1246
1247 for (i = 0; i < enic->wq_count; i++)
1248 vnic_wq_enable(&enic->wq[i]);
1249 for (i = 0; i < enic->rq_count; i++)
1250 vnic_rq_enable(&enic->rq[i]);
1251
1252 enic_add_station_addr(enic);
1253 enic_set_multicast_list(netdev);
1254
1255 netif_wake_queue(netdev);
1256 napi_enable(&enic->napi);
1257 vnic_dev_enable(enic->vdev);
1258
1259 for (i = 0; i < enic->intr_count; i++)
1260 vnic_intr_unmask(&enic->intr[i]);
1261
1262 enic_notify_timer_start(enic);
1263
1264 return 0;
1265}
1266
1267/* rtnl lock is held, process context */
1268static int enic_stop(struct net_device *netdev)
1269{
1270 struct enic *enic = netdev_priv(netdev);
1271 unsigned int i;
1272 int err;
1273
1274 del_timer_sync(&enic->notify_timer);
1275
1276 vnic_dev_disable(enic->vdev);
1277 napi_disable(&enic->napi);
1278 netif_stop_queue(netdev);
1279
1280 for (i = 0; i < enic->intr_count; i++)
1281 vnic_intr_mask(&enic->intr[i]);
1282
1283 for (i = 0; i < enic->wq_count; i++) {
1284 err = vnic_wq_disable(&enic->wq[i]);
1285 if (err)
1286 return err;
1287 }
1288 for (i = 0; i < enic->rq_count; i++) {
1289 err = vnic_rq_disable(&enic->rq[i]);
1290 if (err)
1291 return err;
1292 }
1293
1294 (void)vnic_cq_service(&enic->cq[ENIC_CQ_RQ],
1295 -1, enic_rq_service_drop, NULL);
1296 (void)vnic_cq_service(&enic->cq[ENIC_CQ_WQ],
1297 -1, enic_wq_service, NULL);
1298
1299 for (i = 0; i < enic->wq_count; i++)
1300 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
1301 for (i = 0; i < enic->rq_count; i++)
1302 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
1303 for (i = 0; i < enic->cq_count; i++)
1304 vnic_cq_clean(&enic->cq[i]);
1305 for (i = 0; i < enic->intr_count; i++)
1306 vnic_intr_clean(&enic->intr[i]);
1307
1308 return 0;
1309}
1310
1311static int enic_change_mtu(struct net_device *netdev, int new_mtu)
1312{
1313 struct enic *enic = netdev_priv(netdev);
1314 int running = netif_running(netdev);
1315
1316 if (running)
1317 enic_stop(netdev);
1318
1319 if (new_mtu < ENIC_MIN_MTU)
1320 new_mtu = ENIC_MIN_MTU;
1321 if (new_mtu > ENIC_MAX_MTU)
1322 new_mtu = ENIC_MAX_MTU;
1323
1324 netdev->mtu = new_mtu;
1325
1326 if (netdev->mtu > enic->port_mtu)
1327 printk(KERN_WARNING PFX
1328 "%s: interface MTU (%d) set higher "
1329 "than port MTU (%d)\n",
1330 netdev->name, netdev->mtu, enic->port_mtu);
1331
1332 if (running)
1333 enic_open(netdev);
1334
1335 return 0;
1336}
1337
1338#ifdef CONFIG_NET_POLL_CONTROLLER
1339static void enic_poll_controller(struct net_device *netdev)
1340{
1341 struct enic *enic = netdev_priv(netdev);
1342 struct vnic_dev *vdev = enic->vdev;
1343
1344 switch (vnic_dev_get_intr_mode(vdev)) {
1345 case VNIC_DEV_INTR_MODE_MSIX:
1346 enic_isr_msix_rq(enic->pdev->irq, enic);
1347 enic_isr_msix_wq(enic->pdev->irq, enic);
1348 break;
1349 case VNIC_DEV_INTR_MODE_MSI:
1350 enic_isr_msi(enic->pdev->irq, enic);
1351 break;
1352 case VNIC_DEV_INTR_MODE_INTX:
1353 enic_isr_legacy(enic->pdev->irq, netdev);
1354 break;
1355 default:
1356 break;
1357 }
1358}
1359#endif
1360
1361static int enic_dev_wait(struct vnic_dev *vdev,
1362 int (*start)(struct vnic_dev *, int),
1363 int (*finished)(struct vnic_dev *, int *),
1364 int arg)
1365{
1366 unsigned long time;
1367 int done;
1368 int err;
1369
1370 BUG_ON(in_interrupt());
1371
1372 err = start(vdev, arg);
1373 if (err)
1374 return err;
1375
1376 /* Wait for func to complete...2 seconds max
1377 */
1378
1379 time = jiffies + (HZ * 2);
1380 do {
1381
1382 err = finished(vdev, &done);
1383 if (err)
1384 return err;
1385
1386 if (done)
1387 return 0;
1388
1389 schedule_timeout_uninterruptible(HZ / 10);
1390
1391 } while (time_after(time, jiffies));
1392
1393 return -ETIMEDOUT;
1394}
1395
1396static int enic_dev_open(struct enic *enic)
1397{
1398 int err;
1399
1400 err = enic_dev_wait(enic->vdev, vnic_dev_open,
1401 vnic_dev_open_done, 0);
1402 if (err)
1403 printk(KERN_ERR PFX
1404 "vNIC device open failed, err %d.\n", err);
1405
1406 return err;
1407}
1408
1409static int enic_dev_soft_reset(struct enic *enic)
1410{
1411 int err;
1412
1413 err = enic_dev_wait(enic->vdev, vnic_dev_soft_reset,
1414 vnic_dev_soft_reset_done, 0);
1415 if (err)
1416 printk(KERN_ERR PFX
1417 "vNIC soft reset failed, err %d.\n", err);
1418
1419 return err;
1420}
1421
1422static void enic_reset(struct work_struct *work)
1423{
1424 struct enic *enic = container_of(work, struct enic, reset);
1425
1426 if (!netif_running(enic->netdev))
1427 return;
1428
1429 rtnl_lock();
1430
1431 spin_lock(&enic->devcmd_lock);
1432 vnic_dev_hang_notify(enic->vdev);
1433 spin_unlock(&enic->devcmd_lock);
1434
1435 enic_stop(enic->netdev);
1436 enic_dev_soft_reset(enic);
1437 enic_reset_mcaddrs(enic);
1438 enic_init_vnic_resources(enic);
1439 enic_open(enic->netdev);
1440
1441 rtnl_unlock();
1442}
1443
1444static int enic_set_intr_mode(struct enic *enic)
1445{
1446 unsigned int n = ARRAY_SIZE(enic->rq);
1447 unsigned int m = ARRAY_SIZE(enic->wq);
1448 unsigned int i;
1449
1450 /* Set interrupt mode (INTx, MSI, MSI-X) depending
1451 * system capabilities.
1452 *
1453 * Try MSI-X first
1454 *
1455 * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs
1456 * (the second to last INTR is used for WQ/RQ errors)
1457 * (the last INTR is used for notifications)
1458 */
1459
1460 BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2);
1461 for (i = 0; i < n + m + 2; i++)
1462 enic->msix_entry[i].entry = i;
1463
1464 if (enic->config.intr_mode < 1 &&
1465 enic->rq_count >= n &&
1466 enic->wq_count >= m &&
1467 enic->cq_count >= n + m &&
1468 enic->intr_count >= n + m + 2 &&
1469 !pci_enable_msix(enic->pdev, enic->msix_entry, n + m + 2)) {
1470
1471 enic->rq_count = n;
1472 enic->wq_count = m;
1473 enic->cq_count = n + m;
1474 enic->intr_count = n + m + 2;
1475
1476 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSIX);
1477
1478 return 0;
1479 }
1480
1481 /* Next try MSI
1482 *
1483 * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR
1484 */
1485
1486 if (enic->config.intr_mode < 2 &&
1487 enic->rq_count >= 1 &&
1488 enic->wq_count >= 1 &&
1489 enic->cq_count >= 2 &&
1490 enic->intr_count >= 1 &&
1491 !pci_enable_msi(enic->pdev)) {
1492
1493 enic->rq_count = 1;
1494 enic->wq_count = 1;
1495 enic->cq_count = 2;
1496 enic->intr_count = 1;
1497
1498 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI);
1499
1500 return 0;
1501 }
1502
1503 /* Next try INTx
1504 *
1505 * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs
1506 * (the first INTR is used for WQ/RQ)
1507 * (the second INTR is used for WQ/RQ errors)
1508 * (the last INTR is used for notifications)
1509 */
1510
1511 if (enic->config.intr_mode < 3 &&
1512 enic->rq_count >= 1 &&
1513 enic->wq_count >= 1 &&
1514 enic->cq_count >= 2 &&
1515 enic->intr_count >= 3) {
1516
1517 enic->rq_count = 1;
1518 enic->wq_count = 1;
1519 enic->cq_count = 2;
1520 enic->intr_count = 3;
1521
1522 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX);
1523
1524 return 0;
1525 }
1526
1527 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
1528
1529 return -EINVAL;
1530}
1531
1532static void enic_clear_intr_mode(struct enic *enic)
1533{
1534 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1535 case VNIC_DEV_INTR_MODE_MSIX:
1536 pci_disable_msix(enic->pdev);
1537 break;
1538 case VNIC_DEV_INTR_MODE_MSI:
1539 pci_disable_msi(enic->pdev);
1540 break;
1541 default:
1542 break;
1543 }
1544
1545 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
1546}
1547
1548static void enic_iounmap(struct enic *enic)
1549{
1550 if (enic->bar0.vaddr)
1551 iounmap(enic->bar0.vaddr);
1552}
1553
1554static int __devinit enic_probe(struct pci_dev *pdev,
1555 const struct pci_device_id *ent)
1556{
1557 struct net_device *netdev;
1558 struct enic *enic;
1559 int using_dac = 0;
1560 unsigned int i;
1561 int err;
1562
1563 const u8 rss_default_cpu = 0;
1564 const u8 rss_hash_type = 0;
1565 const u8 rss_hash_bits = 0;
1566 const u8 rss_base_cpu = 0;
1567 const u8 rss_enable = 0;
1568 const u8 tso_ipid_split_en = 0;
1569 const u8 ig_vlan_strip_en = 1;
1570
1571 /* Allocate net device structure and initialize. Private
1572 * instance data is initialized to zero.
1573 */
1574
1575 netdev = alloc_etherdev(sizeof(struct enic));
1576 if (!netdev) {
1577 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
1578 return -ENOMEM;
1579 }
1580
1581 /* Set the netdev name early so intr vectors are properly
1582 * named and any error msgs can include netdev->name
1583 */
1584
1585 rtnl_lock();
1586 err = dev_alloc_name(netdev, netdev->name);
1587 rtnl_unlock();
1588 if (err < 0) {
1589 printk(KERN_ERR PFX "Unable to allocate netdev name.\n");
1590 goto err_out_free_netdev;
1591 }
1592
1593 pci_set_drvdata(pdev, netdev);
1594
1595 SET_NETDEV_DEV(netdev, &pdev->dev);
1596
1597 enic = netdev_priv(netdev);
1598 enic->netdev = netdev;
1599 enic->pdev = pdev;
1600
1601 /* Setup PCI resources
1602 */
1603
1604 err = pci_enable_device(pdev);
1605 if (err) {
1606 printk(KERN_ERR PFX
1607 "%s: Cannot enable PCI device, aborting.\n",
1608 netdev->name);
1609 goto err_out_free_netdev;
1610 }
1611
1612 err = pci_request_regions(pdev, DRV_NAME);
1613 if (err) {
1614 printk(KERN_ERR PFX
1615 "%s: Cannot request PCI regions, aborting.\n",
1616 netdev->name);
1617 goto err_out_disable_device;
1618 }
1619
1620 pci_set_master(pdev);
1621
1622 /* Query PCI controller on system for DMA addressing
1623 * limitation for the device. Try 40-bit first, and
1624 * fail to 32-bit.
1625 */
1626
1627 err = pci_set_dma_mask(pdev, DMA_40BIT_MASK);
1628 if (err) {
1629 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1630 if (err) {
1631 printk(KERN_ERR PFX
1632 "%s: No usable DMA configuration, aborting.\n",
1633 netdev->name);
1634 goto err_out_release_regions;
1635 }
1636 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1637 if (err) {
1638 printk(KERN_ERR PFX
1639 "%s: Unable to obtain 32-bit DMA "
1640 "for consistent allocations, aborting.\n",
1641 netdev->name);
1642 goto err_out_release_regions;
1643 }
1644 } else {
1645 err = pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK);
1646 if (err) {
1647 printk(KERN_ERR PFX
1648 "%s: Unable to obtain 40-bit DMA "
1649 "for consistent allocations, aborting.\n",
1650 netdev->name);
1651 goto err_out_release_regions;
1652 }
1653 using_dac = 1;
1654 }
1655
1656 /* Map vNIC resources from BAR0
1657 */
1658
1659 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1660 printk(KERN_ERR PFX
1661 "%s: BAR0 not memory-map'able, aborting.\n",
1662 netdev->name);
1663 err = -ENODEV;
1664 goto err_out_release_regions;
1665 }
1666
1667 enic->bar0.vaddr = pci_iomap(pdev, 0, enic->bar0.len);
1668 enic->bar0.bus_addr = pci_resource_start(pdev, 0);
1669 enic->bar0.len = pci_resource_len(pdev, 0);
1670
1671 if (!enic->bar0.vaddr) {
1672 printk(KERN_ERR PFX
1673 "%s: Cannot memory-map BAR0 res hdr, aborting.\n",
1674 netdev->name);
1675 err = -ENODEV;
1676 goto err_out_release_regions;
1677 }
1678
1679 /* Register vNIC device
1680 */
1681
1682 enic->vdev = vnic_dev_register(NULL, enic, pdev, &enic->bar0);
1683 if (!enic->vdev) {
1684 printk(KERN_ERR PFX
1685 "%s: vNIC registration failed, aborting.\n",
1686 netdev->name);
1687 err = -ENODEV;
1688 goto err_out_iounmap;
1689 }
1690
1691 /* Issue device open to get device in known state
1692 */
1693
1694 err = enic_dev_open(enic);
1695 if (err) {
1696 printk(KERN_ERR PFX
1697 "%s: vNIC dev open failed, aborting.\n",
1698 netdev->name);
1699 goto err_out_vnic_unregister;
1700 }
1701
1702 /* Issue device init to initialize the vnic-to-switch link.
1703 * We'll start with carrier off and wait for link UP
1704 * notification later to turn on carrier. We don't need
1705 * to wait here for the vnic-to-switch link initialization
1706 * to complete; link UP notification is the indication that
1707 * the process is complete.
1708 */
1709
1710 netif_carrier_off(netdev);
1711
1712 err = vnic_dev_init(enic->vdev, 0);
1713 if (err) {
1714 printk(KERN_ERR PFX
1715 "%s: vNIC dev init failed, aborting.\n",
1716 netdev->name);
1717 goto err_out_dev_close;
1718 }
1719
1720 /* Get vNIC configuration
1721 */
1722
1723 err = enic_get_vnic_config(enic);
1724 if (err) {
1725 printk(KERN_ERR PFX
1726 "%s: Get vNIC configuration failed, aborting.\n",
1727 netdev->name);
1728 goto err_out_dev_close;
1729 }
1730
1731 /* Get available resource counts
1732 */
1733
1734 enic_get_res_counts(enic);
1735
1736 /* Set interrupt mode based on resource counts and system
1737 * capabilities
1738 */
1739
1740 err = enic_set_intr_mode(enic);
1741 if (err) {
1742 printk(KERN_ERR PFX
1743 "%s: Failed to set intr mode, aborting.\n",
1744 netdev->name);
1745 goto err_out_dev_close;
1746 }
1747
1748 /* Request interrupt vector(s)
1749 */
1750
1751 err = enic_request_intr(enic);
1752 if (err) {
1753 printk(KERN_ERR PFX "%s: Unable to request irq.\n",
1754 netdev->name);
1755 goto err_out_dev_close;
1756 }
1757
1758 /* Allocate and configure vNIC resources
1759 */
1760
1761 err = enic_alloc_vnic_resources(enic);
1762 if (err) {
1763 printk(KERN_ERR PFX
1764 "%s: Failed to alloc vNIC resources, aborting.\n",
1765 netdev->name);
1766 goto err_out_free_vnic_resources;
1767 }
1768
1769 enic_init_vnic_resources(enic);
1770
1771 /* Enable VLAN tag stripping. RSS not enabled (yet).
1772 */
1773
1774 err = enic_set_nic_cfg(enic,
1775 rss_default_cpu, rss_hash_type,
1776 rss_hash_bits, rss_base_cpu,
1777 rss_enable, tso_ipid_split_en,
1778 ig_vlan_strip_en);
1779 if (err) {
1780 printk(KERN_ERR PFX
1781 "%s: Failed to config nic, aborting.\n",
1782 netdev->name);
1783 goto err_out_free_vnic_resources;
1784 }
1785
1786 /* Setup notification buffer area
1787 */
1788
1789 err = enic_notify_set(enic);
1790 if (err) {
1791 printk(KERN_ERR PFX
1792 "%s: Failed to alloc notify buffer, aborting.\n",
1793 netdev->name);
1794 goto err_out_free_vnic_resources;
1795 }
1796
1797 /* Setup notification timer, HW reset task, and locks
1798 */
1799
1800 init_timer(&enic->notify_timer);
1801 enic->notify_timer.function = enic_notify_timer;
1802 enic->notify_timer.data = (unsigned long)enic;
1803
1804 INIT_WORK(&enic->reset, enic_reset);
1805
1806 for (i = 0; i < enic->wq_count; i++)
1807 spin_lock_init(&enic->wq_lock[i]);
1808
1809 spin_lock_init(&enic->devcmd_lock);
1810
1811 /* Register net device
1812 */
1813
1814 enic->port_mtu = enic->config.mtu;
1815 (void)enic_change_mtu(netdev, enic->port_mtu);
1816
1817 err = enic_set_mac_addr(netdev, enic->mac_addr);
1818 if (err) {
1819 printk(KERN_ERR PFX
1820 "%s: Invalid MAC address, aborting.\n",
1821 netdev->name);
1822 goto err_out_notify_unset;
1823 }
1824
1825 netdev->open = enic_open;
1826 netdev->stop = enic_stop;
1827 netdev->hard_start_xmit = enic_hard_start_xmit;
1828 netdev->get_stats = enic_get_stats;
1829 netdev->set_multicast_list = enic_set_multicast_list;
1830 netdev->change_mtu = enic_change_mtu;
1831 netdev->vlan_rx_register = enic_vlan_rx_register;
1832 netdev->vlan_rx_add_vid = enic_vlan_rx_add_vid;
1833 netdev->vlan_rx_kill_vid = enic_vlan_rx_kill_vid;
1834 netdev->tx_timeout = enic_tx_timeout;
1835 netdev->watchdog_timeo = 2 * HZ;
1836 netdev->ethtool_ops = &enic_ethtool_ops;
1837#ifdef CONFIG_NET_POLL_CONTROLLER
1838 netdev->poll_controller = enic_poll_controller;
1839#endif
1840
1841 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1842 default:
1843 netif_napi_add(netdev, &enic->napi, enic_poll, 64);
1844 break;
1845 case VNIC_DEV_INTR_MODE_MSIX:
1846 netif_napi_add(netdev, &enic->napi, enic_poll_msix, 64);
1847 break;
1848 }
1849
1850 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1851 if (ENIC_SETTING(enic, TXCSUM))
1852 netdev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
1853 if (ENIC_SETTING(enic, TSO))
1854 netdev->features |= NETIF_F_TSO |
1855 NETIF_F_TSO6 | NETIF_F_TSO_ECN;
1856 if (using_dac)
1857 netdev->features |= NETIF_F_HIGHDMA;
1858
1859
1860 enic->csum_rx_enabled = ENIC_SETTING(enic, RXCSUM);
1861
1862 if (ENIC_SETTING(enic, LRO)) {
1863 enic->lro_mgr.max_aggr = ENIC_LRO_MAX_AGGR;
1864 enic->lro_mgr.max_desc = ENIC_LRO_MAX_DESC;
1865 enic->lro_mgr.lro_arr = enic->lro_desc;
1866 enic->lro_mgr.get_skb_header = enic_get_skb_header;
1867 enic->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
1868 enic->lro_mgr.dev = netdev;
1869 enic->lro_mgr.ip_summed = CHECKSUM_COMPLETE;
1870 enic->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1871 }
1872
1873 err = register_netdev(netdev);
1874 if (err) {
1875 printk(KERN_ERR PFX
1876 "%s: Cannot register net device, aborting.\n",
1877 netdev->name);
1878 goto err_out_notify_unset;
1879 }
1880
1881 return 0;
1882
1883err_out_notify_unset:
1884 vnic_dev_notify_unset(enic->vdev);
1885err_out_free_vnic_resources:
1886 enic_free_vnic_resources(enic);
1887 enic_free_intr(enic);
1888err_out_dev_close:
1889 vnic_dev_close(enic->vdev);
1890err_out_vnic_unregister:
1891 enic_clear_intr_mode(enic);
1892 vnic_dev_unregister(enic->vdev);
1893err_out_iounmap:
1894 enic_iounmap(enic);
1895err_out_release_regions:
1896 pci_release_regions(pdev);
1897err_out_disable_device:
1898 pci_disable_device(pdev);
1899err_out_free_netdev:
1900 pci_set_drvdata(pdev, NULL);
1901 free_netdev(netdev);
1902
1903 return err;
1904}
1905
1906static void __devexit enic_remove(struct pci_dev *pdev)
1907{
1908 struct net_device *netdev = pci_get_drvdata(pdev);
1909
1910 if (netdev) {
1911 struct enic *enic = netdev_priv(netdev);
1912
1913 flush_scheduled_work();
1914 unregister_netdev(netdev);
1915 vnic_dev_notify_unset(enic->vdev);
1916 enic_free_vnic_resources(enic);
1917 enic_free_intr(enic);
1918 vnic_dev_close(enic->vdev);
1919 enic_clear_intr_mode(enic);
1920 vnic_dev_unregister(enic->vdev);
1921 enic_iounmap(enic);
1922 pci_release_regions(pdev);
1923 pci_disable_device(pdev);
1924 pci_set_drvdata(pdev, NULL);
1925 free_netdev(netdev);
1926 }
1927}
1928
1929static struct pci_driver enic_driver = {
1930 .name = DRV_NAME,
1931 .id_table = enic_id_table,
1932 .probe = enic_probe,
1933 .remove = __devexit_p(enic_remove),
1934};
1935
1936static int __init enic_init_module(void)
1937{
1938 printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
1939
1940 return pci_register_driver(&enic_driver);
1941}
1942
1943static void __exit enic_cleanup_module(void)
1944{
1945 pci_unregister_driver(&enic_driver);
1946}
1947
1948module_init(enic_init_module);
1949module_exit(enic_cleanup_module);
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
new file mode 100644
index 000000000000..95184b9108ef
--- /dev/null
+++ b/drivers/net/enic/enic_res.c
@@ -0,0 +1,370 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#include <linux/kernel.h>
21#include <linux/errno.h>
22#include <linux/types.h>
23#include <linux/pci.h>
24#include <linux/netdevice.h>
25
26#include "wq_enet_desc.h"
27#include "rq_enet_desc.h"
28#include "cq_enet_desc.h"
29#include "vnic_resource.h"
30#include "vnic_enet.h"
31#include "vnic_dev.h"
32#include "vnic_wq.h"
33#include "vnic_rq.h"
34#include "vnic_cq.h"
35#include "vnic_intr.h"
36#include "vnic_stats.h"
37#include "vnic_nic.h"
38#include "vnic_rss.h"
39#include "enic_res.h"
40#include "enic.h"
41
42int enic_get_vnic_config(struct enic *enic)
43{
44 struct vnic_enet_config *c = &enic->config;
45 int err;
46
47 err = vnic_dev_mac_addr(enic->vdev, enic->mac_addr);
48 if (err) {
49 printk(KERN_ERR PFX "Error getting MAC addr, %d\n", err);
50 return err;
51 }
52
53#define GET_CONFIG(m) \
54 do { \
55 err = vnic_dev_spec(enic->vdev, \
56 offsetof(struct vnic_enet_config, m), \
57 sizeof(c->m), &c->m); \
58 if (err) { \
59 printk(KERN_ERR PFX \
60 "Error getting %s, %d\n", #m, err); \
61 return err; \
62 } \
63 } while (0)
64
65 GET_CONFIG(flags);
66 GET_CONFIG(wq_desc_count);
67 GET_CONFIG(rq_desc_count);
68 GET_CONFIG(mtu);
69 GET_CONFIG(intr_timer);
70 GET_CONFIG(intr_timer_type);
71 GET_CONFIG(intr_mode);
72
73 c->wq_desc_count =
74 min_t(u32, ENIC_MAX_WQ_DESCS,
75 max_t(u32, ENIC_MIN_WQ_DESCS,
76 c->wq_desc_count));
77 c->wq_desc_count &= 0xfffffff0; /* must be aligned to groups of 16 */
78
79 c->rq_desc_count =
80 min_t(u32, ENIC_MAX_RQ_DESCS,
81 max_t(u32, ENIC_MIN_RQ_DESCS,
82 c->rq_desc_count));
83 c->rq_desc_count &= 0xfffffff0; /* must be aligned to groups of 16 */
84
85 if (c->mtu == 0)
86 c->mtu = 1500;
87 c->mtu = min_t(u16, ENIC_MAX_MTU,
88 max_t(u16, ENIC_MIN_MTU,
89 c->mtu));
90
91 c->intr_timer = min_t(u16, VNIC_INTR_TIMER_MAX, c->intr_timer);
92
93 printk(KERN_INFO PFX "vNIC MAC addr %02x:%02x:%02x:%02x:%02x:%02x "
94 "wq/rq %d/%d\n",
95 enic->mac_addr[0], enic->mac_addr[1], enic->mac_addr[2],
96 enic->mac_addr[3], enic->mac_addr[4], enic->mac_addr[5],
97 c->wq_desc_count, c->rq_desc_count);
98 printk(KERN_INFO PFX "vNIC mtu %d csum tx/rx %d/%d tso/lro %d/%d "
99 "intr timer %d\n",
100 c->mtu, ENIC_SETTING(enic, TXCSUM),
101 ENIC_SETTING(enic, RXCSUM), ENIC_SETTING(enic, TSO),
102 ENIC_SETTING(enic, LRO), c->intr_timer);
103
104 return 0;
105}
106
107void enic_add_station_addr(struct enic *enic)
108{
109 vnic_dev_add_addr(enic->vdev, enic->mac_addr);
110}
111
112void enic_add_multicast_addr(struct enic *enic, u8 *addr)
113{
114 vnic_dev_add_addr(enic->vdev, addr);
115}
116
117void enic_del_multicast_addr(struct enic *enic, u8 *addr)
118{
119 vnic_dev_del_addr(enic->vdev, addr);
120}
121
122void enic_add_vlan(struct enic *enic, u16 vlanid)
123{
124 u64 a0 = vlanid, a1 = 0;
125 int wait = 1000;
126 int err;
127
128 err = vnic_dev_cmd(enic->vdev, CMD_VLAN_ADD, &a0, &a1, wait);
129 if (err)
130 printk(KERN_ERR PFX "Can't add vlan id, %d\n", err);
131}
132
133void enic_del_vlan(struct enic *enic, u16 vlanid)
134{
135 u64 a0 = vlanid, a1 = 0;
136 int wait = 1000;
137 int err;
138
139 err = vnic_dev_cmd(enic->vdev, CMD_VLAN_DEL, &a0, &a1, wait);
140 if (err)
141 printk(KERN_ERR PFX "Can't delete vlan id, %d\n", err);
142}
143
144int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,
145 u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en,
146 u8 ig_vlan_strip_en)
147{
148 u64 a0, a1;
149 u32 nic_cfg;
150 int wait = 1000;
151
152 vnic_set_nic_cfg(&nic_cfg, rss_default_cpu,
153 rss_hash_type, rss_hash_bits, rss_base_cpu,
154 rss_enable, tso_ipid_split_en, ig_vlan_strip_en);
155
156 a0 = nic_cfg;
157 a1 = 0;
158
159 return vnic_dev_cmd(enic->vdev, CMD_NIC_CFG, &a0, &a1, wait);
160}
161
162void enic_free_vnic_resources(struct enic *enic)
163{
164 unsigned int i;
165
166 for (i = 0; i < enic->wq_count; i++)
167 vnic_wq_free(&enic->wq[i]);
168 for (i = 0; i < enic->rq_count; i++)
169 vnic_rq_free(&enic->rq[i]);
170 for (i = 0; i < enic->cq_count; i++)
171 vnic_cq_free(&enic->cq[i]);
172 for (i = 0; i < enic->intr_count; i++)
173 vnic_intr_free(&enic->intr[i]);
174}
175
176void enic_get_res_counts(struct enic *enic)
177{
178 enic->wq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ);
179 enic->rq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ);
180 enic->cq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ);
181 enic->intr_count = vnic_dev_get_res_count(enic->vdev,
182 RES_TYPE_INTR_CTRL);
183
184 printk(KERN_INFO PFX "vNIC resources avail: "
185 "wq %d rq %d cq %d intr %d\n",
186 enic->wq_count, enic->rq_count,
187 enic->cq_count, enic->intr_count);
188}
189
190void enic_init_vnic_resources(struct enic *enic)
191{
192 enum vnic_dev_intr_mode intr_mode;
193 unsigned int mask_on_assertion;
194 unsigned int interrupt_offset;
195 unsigned int error_interrupt_enable;
196 unsigned int error_interrupt_offset;
197 unsigned int cq_index;
198 unsigned int i;
199
200 intr_mode = vnic_dev_get_intr_mode(enic->vdev);
201
202 /* Init RQ/WQ resources.
203 *
204 * RQ[0 - n-1] point to CQ[0 - n-1]
205 * WQ[0 - m-1] point to CQ[n - n+m-1]
206 *
207 * Error interrupt is not enabled for MSI.
208 */
209
210 switch (intr_mode) {
211 case VNIC_DEV_INTR_MODE_INTX:
212 case VNIC_DEV_INTR_MODE_MSIX:
213 error_interrupt_enable = 1;
214 error_interrupt_offset = enic->intr_count - 2;
215 break;
216 default:
217 error_interrupt_enable = 0;
218 error_interrupt_offset = 0;
219 break;
220 }
221
222 for (i = 0; i < enic->rq_count; i++) {
223 cq_index = i;
224 vnic_rq_init(&enic->rq[i],
225 cq_index,
226 error_interrupt_enable,
227 error_interrupt_offset);
228 }
229
230 for (i = 0; i < enic->wq_count; i++) {
231 cq_index = enic->rq_count + i;
232 vnic_wq_init(&enic->wq[i],
233 cq_index,
234 error_interrupt_enable,
235 error_interrupt_offset);
236 }
237
238 /* Init CQ resources
239 *
240 * CQ[0 - n+m-1] point to INTR[0] for INTx, MSI
241 * CQ[0 - n+m-1] point to INTR[0 - n+m-1] for MSI-X
242 */
243
244 for (i = 0; i < enic->cq_count; i++) {
245
246 switch (intr_mode) {
247 case VNIC_DEV_INTR_MODE_MSIX:
248 interrupt_offset = i;
249 break;
250 default:
251 interrupt_offset = 0;
252 break;
253 }
254
255 vnic_cq_init(&enic->cq[i],
256 0 /* flow_control_enable */,
257 1 /* color_enable */,
258 0 /* cq_head */,
259 0 /* cq_tail */,
260 1 /* cq_tail_color */,
261 1 /* interrupt_enable */,
262 1 /* cq_entry_enable */,
263 0 /* cq_message_enable */,
264 interrupt_offset,
265 0 /* cq_message_addr */);
266 }
267
268 /* Init INTR resources
269 *
270 * mask_on_assertion is not used for INTx due to the level-
271 * triggered nature of INTx
272 */
273
274 switch (intr_mode) {
275 case VNIC_DEV_INTR_MODE_MSI:
276 case VNIC_DEV_INTR_MODE_MSIX:
277 mask_on_assertion = 1;
278 break;
279 default:
280 mask_on_assertion = 0;
281 break;
282 }
283
284 for (i = 0; i < enic->intr_count; i++) {
285 vnic_intr_init(&enic->intr[i],
286 enic->config.intr_timer,
287 enic->config.intr_timer_type,
288 mask_on_assertion);
289 }
290
291 /* Clear LIF stats
292 */
293
294 vnic_dev_stats_clear(enic->vdev);
295}
296
297int enic_alloc_vnic_resources(struct enic *enic)
298{
299 enum vnic_dev_intr_mode intr_mode;
300 unsigned int i;
301 int err;
302
303 intr_mode = vnic_dev_get_intr_mode(enic->vdev);
304
305 printk(KERN_INFO PFX "vNIC resources used: "
306 "wq %d rq %d cq %d intr %d intr mode %s\n",
307 enic->wq_count, enic->rq_count,
308 enic->cq_count, enic->intr_count,
309 intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" :
310 intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" :
311 intr_mode == VNIC_DEV_INTR_MODE_MSIX ? "MSI-X" :
312 "unknown"
313 );
314
315 /* Allocate queue resources
316 */
317
318 for (i = 0; i < enic->wq_count; i++) {
319 err = vnic_wq_alloc(enic->vdev, &enic->wq[i], i,
320 enic->config.wq_desc_count,
321 sizeof(struct wq_enet_desc));
322 if (err)
323 goto err_out_cleanup;
324 }
325
326 for (i = 0; i < enic->rq_count; i++) {
327 err = vnic_rq_alloc(enic->vdev, &enic->rq[i], i,
328 enic->config.rq_desc_count,
329 sizeof(struct rq_enet_desc));
330 if (err)
331 goto err_out_cleanup;
332 }
333
334 for (i = 0; i < enic->cq_count; i++) {
335 if (i < enic->rq_count)
336 err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i,
337 enic->config.rq_desc_count,
338 sizeof(struct cq_enet_rq_desc));
339 else
340 err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i,
341 enic->config.wq_desc_count,
342 sizeof(struct cq_enet_wq_desc));
343 if (err)
344 goto err_out_cleanup;
345 }
346
347 for (i = 0; i < enic->intr_count; i++) {
348 err = vnic_intr_alloc(enic->vdev, &enic->intr[i], i);
349 if (err)
350 goto err_out_cleanup;
351 }
352
353 /* Hook remaining resource
354 */
355
356 enic->legacy_pba = vnic_dev_get_res(enic->vdev,
357 RES_TYPE_INTR_PBA_LEGACY, 0);
358 if (!enic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) {
359 printk(KERN_ERR PFX "Failed to hook legacy pba resource\n");
360 err = -ENODEV;
361 goto err_out_cleanup;
362 }
363
364 return 0;
365
366err_out_cleanup:
367 enic_free_vnic_resources(enic);
368
369 return err;
370}
diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h
new file mode 100644
index 000000000000..68534a29b7ac
--- /dev/null
+++ b/drivers/net/enic/enic_res.h
@@ -0,0 +1,151 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _ENIC_RES_H_
21#define _ENIC_RES_H_
22
23#include "wq_enet_desc.h"
24#include "rq_enet_desc.h"
25#include "vnic_wq.h"
26#include "vnic_rq.h"
27
28#define ENIC_MIN_WQ_DESCS 64
29#define ENIC_MAX_WQ_DESCS 4096
30#define ENIC_MIN_RQ_DESCS 64
31#define ENIC_MAX_RQ_DESCS 4096
32
33#define ENIC_MIN_MTU 576 /* minimum for IPv4 */
34#define ENIC_MAX_MTU 9000
35
36#define ENIC_MULTICAST_PERFECT_FILTERS 32
37
38#define ENIC_NON_TSO_MAX_DESC 16
39
40#define ENIC_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0)
41
42static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq,
43 void *os_buf, dma_addr_t dma_addr, unsigned int len,
44 unsigned int mss_or_csum_offset, unsigned int hdr_len,
45 int vlan_tag_insert, unsigned int vlan_tag,
46 int offload_mode, int cq_entry, int sop, int eop)
47{
48 struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
49
50 wq_enet_desc_enc(desc,
51 (u64)dma_addr | VNIC_PADDR_TARGET,
52 (u16)len,
53 (u16)mss_or_csum_offset,
54 (u16)hdr_len, (u8)offload_mode,
55 (u8)eop, (u8)cq_entry,
56 0, /* fcoe_encap */
57 (u8)vlan_tag_insert,
58 (u16)vlan_tag,
59 0 /* loopback */);
60
61 wmb();
62
63 vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop);
64}
65
66static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq,
67 void *os_buf, dma_addr_t dma_addr, unsigned int len, int eop)
68{
69 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
70 0, 0, 0, 0, 0,
71 eop, 0 /* !SOP */, eop);
72}
73
74static inline void enic_queue_wq_desc(struct vnic_wq *wq, void *os_buf,
75 dma_addr_t dma_addr, unsigned int len, int vlan_tag_insert,
76 unsigned int vlan_tag, int eop)
77{
78 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
79 0, 0, vlan_tag_insert, vlan_tag,
80 WQ_ENET_OFFLOAD_MODE_CSUM,
81 eop, 1 /* SOP */, eop);
82}
83
84static inline void enic_queue_wq_desc_csum(struct vnic_wq *wq,
85 void *os_buf, dma_addr_t dma_addr, unsigned int len,
86 int ip_csum, int tcpudp_csum, int vlan_tag_insert,
87 unsigned int vlan_tag, int eop)
88{
89 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
90 (ip_csum ? 1 : 0) + (tcpudp_csum ? 2 : 0),
91 0, vlan_tag_insert, vlan_tag,
92 WQ_ENET_OFFLOAD_MODE_CSUM,
93 eop, 1 /* SOP */, eop);
94}
95
96static inline void enic_queue_wq_desc_csum_l4(struct vnic_wq *wq,
97 void *os_buf, dma_addr_t dma_addr, unsigned int len,
98 unsigned int csum_offset, unsigned int hdr_len,
99 int vlan_tag_insert, unsigned int vlan_tag, int eop)
100{
101 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
102 csum_offset, hdr_len, vlan_tag_insert, vlan_tag,
103 WQ_ENET_OFFLOAD_MODE_CSUM_L4,
104 eop, 1 /* SOP */, eop);
105}
106
107static inline void enic_queue_wq_desc_tso(struct vnic_wq *wq,
108 void *os_buf, dma_addr_t dma_addr, unsigned int len,
109 unsigned int mss, unsigned int hdr_len, int vlan_tag_insert,
110 unsigned int vlan_tag, int eop)
111{
112 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
113 mss, hdr_len, vlan_tag_insert, vlan_tag,
114 WQ_ENET_OFFLOAD_MODE_TSO,
115 eop, 1 /* SOP */, eop);
116}
117
118static inline void enic_queue_rq_desc(struct vnic_rq *rq,
119 void *os_buf, unsigned int os_buf_index,
120 dma_addr_t dma_addr, unsigned int len)
121{
122 struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
123 u8 type = os_buf_index ?
124 RQ_ENET_TYPE_NOT_SOP : RQ_ENET_TYPE_ONLY_SOP;
125
126 rq_enet_desc_enc(desc,
127 (u64)dma_addr | VNIC_PADDR_TARGET,
128 type, (u16)len);
129
130 wmb();
131
132 vnic_rq_post(rq, os_buf, os_buf_index, dma_addr, len);
133}
134
135struct enic;
136
137int enic_get_vnic_config(struct enic *);
138void enic_add_station_addr(struct enic *enic);
139void enic_add_multicast_addr(struct enic *enic, u8 *addr);
140void enic_del_multicast_addr(struct enic *enic, u8 *addr);
141void enic_add_vlan(struct enic *enic, u16 vlanid);
142void enic_del_vlan(struct enic *enic, u16 vlanid);
143int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,
144 u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en,
145 u8 ig_vlan_strip_en);
146void enic_get_res_counts(struct enic *enic);
147void enic_init_vnic_resources(struct enic *enic);
148int enic_alloc_vnic_resources(struct enic *);
149void enic_free_vnic_resources(struct enic *);
150
151#endif /* _ENIC_RES_H_ */
diff --git a/drivers/net/enic/rq_enet_desc.h b/drivers/net/enic/rq_enet_desc.h
new file mode 100644
index 000000000000..a06e649010ce
--- /dev/null
+++ b/drivers/net/enic/rq_enet_desc.h
@@ -0,0 +1,60 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _RQ_ENET_DESC_H_
21#define _RQ_ENET_DESC_H_
22
23/* Ethernet receive queue descriptor: 16B */
24struct rq_enet_desc {
25 __le64 address;
26 __le16 length_type;
27 u8 reserved[6];
28};
29
30enum rq_enet_type_types {
31 RQ_ENET_TYPE_ONLY_SOP = 0,
32 RQ_ENET_TYPE_NOT_SOP = 1,
33 RQ_ENET_TYPE_RESV2 = 2,
34 RQ_ENET_TYPE_RESV3 = 3,
35};
36
37#define RQ_ENET_ADDR_BITS 64
38#define RQ_ENET_LEN_BITS 14
39#define RQ_ENET_LEN_MASK ((1 << RQ_ENET_LEN_BITS) - 1)
40#define RQ_ENET_TYPE_BITS 2
41#define RQ_ENET_TYPE_MASK ((1 << RQ_ENET_TYPE_BITS) - 1)
42
43static inline void rq_enet_desc_enc(struct rq_enet_desc *desc,
44 u64 address, u8 type, u16 length)
45{
46 desc->address = cpu_to_le64(address);
47 desc->length_type = cpu_to_le16((length & RQ_ENET_LEN_MASK) |
48 ((type & RQ_ENET_TYPE_MASK) << RQ_ENET_LEN_BITS));
49}
50
51static inline void rq_enet_desc_dec(struct rq_enet_desc *desc,
52 u64 *address, u8 *type, u16 *length)
53{
54 *address = le64_to_cpu(desc->address);
55 *length = le16_to_cpu(desc->length_type) & RQ_ENET_LEN_MASK;
56 *type = (u8)((le16_to_cpu(desc->length_type) >> RQ_ENET_LEN_BITS) &
57 RQ_ENET_TYPE_MASK);
58}
59
60#endif /* _RQ_ENET_DESC_H_ */
diff --git a/drivers/net/enic/vnic_cq.c b/drivers/net/enic/vnic_cq.c
new file mode 100644
index 000000000000..020ae6c3f3d9
--- /dev/null
+++ b/drivers/net/enic/vnic_cq.c
@@ -0,0 +1,89 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#include <linux/kernel.h>
21#include <linux/errno.h>
22#include <linux/types.h>
23#include <linux/pci.h>
24
25#include "vnic_dev.h"
26#include "vnic_cq.h"
27
28void vnic_cq_free(struct vnic_cq *cq)
29{
30 vnic_dev_free_desc_ring(cq->vdev, &cq->ring);
31
32 cq->ctrl = NULL;
33}
34
35int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
36 unsigned int desc_count, unsigned int desc_size)
37{
38 int err;
39
40 cq->index = index;
41 cq->vdev = vdev;
42
43 cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index);
44 if (!cq->ctrl) {
45 printk(KERN_ERR "Failed to hook CQ[%d] resource\n", index);
46 return -EINVAL;
47 }
48
49 err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size);
50 if (err)
51 return err;
52
53 return 0;
54}
55
56void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
57 unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
58 unsigned int cq_tail_color, unsigned int interrupt_enable,
59 unsigned int cq_entry_enable, unsigned int cq_message_enable,
60 unsigned int interrupt_offset, u64 cq_message_addr)
61{
62 u64 paddr;
63
64 paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET;
65 writeq(paddr, &cq->ctrl->ring_base);
66 iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size);
67 iowrite32(flow_control_enable, &cq->ctrl->flow_control_enable);
68 iowrite32(color_enable, &cq->ctrl->color_enable);
69 iowrite32(cq_head, &cq->ctrl->cq_head);
70 iowrite32(cq_tail, &cq->ctrl->cq_tail);
71 iowrite32(cq_tail_color, &cq->ctrl->cq_tail_color);
72 iowrite32(interrupt_enable, &cq->ctrl->interrupt_enable);
73 iowrite32(cq_entry_enable, &cq->ctrl->cq_entry_enable);
74 iowrite32(cq_message_enable, &cq->ctrl->cq_message_enable);
75 iowrite32(interrupt_offset, &cq->ctrl->interrupt_offset);
76 writeq(cq_message_addr, &cq->ctrl->cq_message_addr);
77}
78
79void vnic_cq_clean(struct vnic_cq *cq)
80{
81 cq->to_clean = 0;
82 cq->last_color = 0;
83
84 iowrite32(0, &cq->ctrl->cq_head);
85 iowrite32(0, &cq->ctrl->cq_tail);
86 iowrite32(1, &cq->ctrl->cq_tail_color);
87
88 vnic_dev_clear_desc_ring(&cq->ring);
89}
diff --git a/drivers/net/enic/vnic_cq.h b/drivers/net/enic/vnic_cq.h
new file mode 100644
index 000000000000..114763cbc2f8
--- /dev/null
+++ b/drivers/net/enic/vnic_cq.h
@@ -0,0 +1,113 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _VNIC_CQ_H_
21#define _VNIC_CQ_H_
22
23#include "cq_desc.h"
24#include "vnic_dev.h"
25
26/* Completion queue control */
27struct vnic_cq_ctrl {
28 u64 ring_base; /* 0x00 */
29 u32 ring_size; /* 0x08 */
30 u32 pad0;
31 u32 flow_control_enable; /* 0x10 */
32 u32 pad1;
33 u32 color_enable; /* 0x18 */
34 u32 pad2;
35 u32 cq_head; /* 0x20 */
36 u32 pad3;
37 u32 cq_tail; /* 0x28 */
38 u32 pad4;
39 u32 cq_tail_color; /* 0x30 */
40 u32 pad5;
41 u32 interrupt_enable; /* 0x38 */
42 u32 pad6;
43 u32 cq_entry_enable; /* 0x40 */
44 u32 pad7;
45 u32 cq_message_enable; /* 0x48 */
46 u32 pad8;
47 u32 interrupt_offset; /* 0x50 */
48 u32 pad9;
49 u64 cq_message_addr; /* 0x58 */
50 u32 pad10;
51};
52
53struct vnic_cq {
54 unsigned int index;
55 struct vnic_dev *vdev;
56 struct vnic_cq_ctrl __iomem *ctrl; /* memory-mapped */
57 struct vnic_dev_ring ring;
58 unsigned int to_clean;
59 unsigned int last_color;
60};
61
62static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
63 unsigned int work_to_do,
64 int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc,
65 u8 type, u16 q_number, u16 completed_index, void *opaque),
66 void *opaque)
67{
68 struct cq_desc *cq_desc;
69 unsigned int work_done = 0;
70 u16 q_number, completed_index;
71 u8 type, color;
72
73 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
74 cq->ring.desc_size * cq->to_clean);
75 cq_desc_dec(cq_desc, &type, &color,
76 &q_number, &completed_index);
77
78 while (color != cq->last_color) {
79
80 if ((*q_service)(cq->vdev, cq_desc, type,
81 q_number, completed_index, opaque))
82 break;
83
84 cq->to_clean++;
85 if (cq->to_clean == cq->ring.desc_count) {
86 cq->to_clean = 0;
87 cq->last_color = cq->last_color ? 0 : 1;
88 }
89
90 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
91 cq->ring.desc_size * cq->to_clean);
92 cq_desc_dec(cq_desc, &type, &color,
93 &q_number, &completed_index);
94
95 work_done++;
96 if (work_done >= work_to_do)
97 break;
98 }
99
100 return work_done;
101}
102
103void vnic_cq_free(struct vnic_cq *cq);
104int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
105 unsigned int desc_count, unsigned int desc_size);
106void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
107 unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
108 unsigned int cq_tail_color, unsigned int interrupt_enable,
109 unsigned int cq_entry_enable, unsigned int message_enable,
110 unsigned int interrupt_offset, u64 message_addr);
111void vnic_cq_clean(struct vnic_cq *cq);
112
113#endif /* _VNIC_CQ_H_ */
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c
new file mode 100644
index 000000000000..4d104f5c30f9
--- /dev/null
+++ b/drivers/net/enic/vnic_dev.c
@@ -0,0 +1,674 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#include <linux/kernel.h>
21#include <linux/errno.h>
22#include <linux/types.h>
23#include <linux/pci.h>
24#include <linux/delay.h>
25#include <linux/if_ether.h>
26
27#include "vnic_resource.h"
28#include "vnic_devcmd.h"
29#include "vnic_dev.h"
30#include "vnic_stats.h"
31
32struct vnic_res {
33 void __iomem *vaddr;
34 unsigned int count;
35};
36
37struct vnic_dev {
38 void *priv;
39 struct pci_dev *pdev;
40 struct vnic_res res[RES_TYPE_MAX];
41 enum vnic_dev_intr_mode intr_mode;
42 struct vnic_devcmd __iomem *devcmd;
43 struct vnic_devcmd_notify *notify;
44 struct vnic_devcmd_notify notify_copy;
45 dma_addr_t notify_pa;
46 u32 *linkstatus;
47 dma_addr_t linkstatus_pa;
48 struct vnic_stats *stats;
49 dma_addr_t stats_pa;
50 struct vnic_devcmd_fw_info *fw_info;
51 dma_addr_t fw_info_pa;
52};
53
54#define VNIC_MAX_RES_HDR_SIZE \
55 (sizeof(struct vnic_resource_header) + \
56 sizeof(struct vnic_resource) * RES_TYPE_MAX)
57#define VNIC_RES_STRIDE 128
58
59void *vnic_dev_priv(struct vnic_dev *vdev)
60{
61 return vdev->priv;
62}
63
64static int vnic_dev_discover_res(struct vnic_dev *vdev,
65 struct vnic_dev_bar *bar)
66{
67 struct vnic_resource_header __iomem *rh;
68 struct vnic_resource __iomem *r;
69 u8 type;
70
71 if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
72 printk(KERN_ERR "vNIC BAR0 res hdr length error\n");
73 return -EINVAL;
74 }
75
76 rh = bar->vaddr;
77 if (!rh) {
78 printk(KERN_ERR "vNIC BAR0 res hdr not mem-mapped\n");
79 return -EINVAL;
80 }
81
82 if (ioread32(&rh->magic) != VNIC_RES_MAGIC ||
83 ioread32(&rh->version) != VNIC_RES_VERSION) {
84 printk(KERN_ERR "vNIC BAR0 res magic/version error "
85 "exp (%lx/%lx) curr (%x/%x)\n",
86 VNIC_RES_MAGIC, VNIC_RES_VERSION,
87 ioread32(&rh->magic), ioread32(&rh->version));
88 return -EINVAL;
89 }
90
91 r = (struct vnic_resource __iomem *)(rh + 1);
92
93 while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
94
95 u8 bar_num = ioread8(&r->bar);
96 u32 bar_offset = ioread32(&r->bar_offset);
97 u32 count = ioread32(&r->count);
98 u32 len;
99
100 r++;
101
102 if (bar_num != 0) /* only mapping in BAR0 resources */
103 continue;
104
105 switch (type) {
106 case RES_TYPE_WQ:
107 case RES_TYPE_RQ:
108 case RES_TYPE_CQ:
109 case RES_TYPE_INTR_CTRL:
110 /* each count is stride bytes long */
111 len = count * VNIC_RES_STRIDE;
112 if (len + bar_offset > bar->len) {
113 printk(KERN_ERR "vNIC BAR0 resource %d "
114 "out-of-bounds, offset 0x%x + "
115 "size 0x%x > bar len 0x%lx\n",
116 type, bar_offset,
117 len,
118 bar->len);
119 return -EINVAL;
120 }
121 break;
122 case RES_TYPE_INTR_PBA_LEGACY:
123 case RES_TYPE_DEVCMD:
124 len = count;
125 break;
126 default:
127 continue;
128 }
129
130 vdev->res[type].count = count;
131 vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset;
132 }
133
134 return 0;
135}
136
137unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
138 enum vnic_res_type type)
139{
140 return vdev->res[type].count;
141}
142
143void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
144 unsigned int index)
145{
146 if (!vdev->res[type].vaddr)
147 return NULL;
148
149 switch (type) {
150 case RES_TYPE_WQ:
151 case RES_TYPE_RQ:
152 case RES_TYPE_CQ:
153 case RES_TYPE_INTR_CTRL:
154 return (char __iomem *)vdev->res[type].vaddr +
155 index * VNIC_RES_STRIDE;
156 default:
157 return (char __iomem *)vdev->res[type].vaddr;
158 }
159}
160
161unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
162 unsigned int desc_count, unsigned int desc_size)
163{
164 /* The base address of the desc rings must be 512 byte aligned.
165 * Descriptor count is aligned to groups of 32 descriptors. A
166 * count of 0 means the maximum 4096 descriptors. Descriptor
167 * size is aligned to 16 bytes.
168 */
169
170 unsigned int count_align = 32;
171 unsigned int desc_align = 16;
172
173 ring->base_align = 512;
174
175 if (desc_count == 0)
176 desc_count = 4096;
177
178 ring->desc_count = ALIGN(desc_count, count_align);
179
180 ring->desc_size = ALIGN(desc_size, desc_align);
181
182 ring->size = ring->desc_count * ring->desc_size;
183 ring->size_unaligned = ring->size + ring->base_align;
184
185 return ring->size_unaligned;
186}
187
188void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
189{
190 memset(ring->descs, 0, ring->size);
191}
192
193int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
194 unsigned int desc_count, unsigned int desc_size)
195{
196 vnic_dev_desc_ring_size(ring, desc_count, desc_size);
197
198 ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
199 ring->size_unaligned,
200 &ring->base_addr_unaligned);
201
202 if (!ring->descs_unaligned) {
203 printk(KERN_ERR
204 "Failed to allocate ring (size=%d), aborting\n",
205 (int)ring->size);
206 return -ENOMEM;
207 }
208
209 ring->base_addr = ALIGN(ring->base_addr_unaligned,
210 ring->base_align);
211 ring->descs = (u8 *)ring->descs_unaligned +
212 (ring->base_addr - ring->base_addr_unaligned);
213
214 vnic_dev_clear_desc_ring(ring);
215
216 ring->desc_avail = ring->desc_count - 1;
217
218 return 0;
219}
220
221void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
222{
223 if (ring->descs) {
224 pci_free_consistent(vdev->pdev,
225 ring->size_unaligned,
226 ring->descs_unaligned,
227 ring->base_addr_unaligned);
228 ring->descs = NULL;
229 }
230}
231
232int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
233 u64 *a0, u64 *a1, int wait)
234{
235 struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
236 int delay;
237 u32 status;
238 int dev_cmd_err[] = {
239 /* convert from fw's version of error.h to host's version */
240 0, /* ERR_SUCCESS */
241 EINVAL, /* ERR_EINVAL */
242 EFAULT, /* ERR_EFAULT */
243 EPERM, /* ERR_EPERM */
244 EBUSY, /* ERR_EBUSY */
245 };
246 int err;
247
248 status = ioread32(&devcmd->status);
249 if (status & STAT_BUSY) {
250 printk(KERN_ERR "Busy devcmd %d\n", _CMD_N(cmd));
251 return -EBUSY;
252 }
253
254 if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
255 writeq(*a0, &devcmd->args[0]);
256 writeq(*a1, &devcmd->args[1]);
257 wmb();
258 }
259
260 iowrite32(cmd, &devcmd->cmd);
261
262 if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
263 return 0;
264
265 for (delay = 0; delay < wait; delay++) {
266
267 udelay(100);
268
269 status = ioread32(&devcmd->status);
270 if (!(status & STAT_BUSY)) {
271
272 if (status & STAT_ERROR) {
273 err = dev_cmd_err[(int)readq(&devcmd->args[0])];
274 printk(KERN_ERR "Error %d devcmd %d\n",
275 err, _CMD_N(cmd));
276 return -err;
277 }
278
279 if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
280 rmb();
281 *a0 = readq(&devcmd->args[0]);
282 *a1 = readq(&devcmd->args[1]);
283 }
284
285 return 0;
286 }
287 }
288
289 printk(KERN_ERR "Timedout devcmd %d\n", _CMD_N(cmd));
290 return -ETIMEDOUT;
291}
292
293int vnic_dev_fw_info(struct vnic_dev *vdev,
294 struct vnic_devcmd_fw_info **fw_info)
295{
296 u64 a0, a1 = 0;
297 int wait = 1000;
298 int err = 0;
299
300 if (!vdev->fw_info) {
301 vdev->fw_info = pci_alloc_consistent(vdev->pdev,
302 sizeof(struct vnic_devcmd_fw_info),
303 &vdev->fw_info_pa);
304 if (!vdev->fw_info)
305 return -ENOMEM;
306
307 a0 = vdev->fw_info_pa;
308
309 /* only get fw_info once and cache it */
310 err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait);
311 }
312
313 *fw_info = vdev->fw_info;
314
315 return err;
316}
317
318int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
319 void *value)
320{
321 u64 a0, a1;
322 int wait = 1000;
323 int err;
324
325 a0 = offset;
326 a1 = size;
327
328 err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
329
330 switch (size) {
331 case 1: *(u8 *)value = (u8)a0; break;
332 case 2: *(u16 *)value = (u16)a0; break;
333 case 4: *(u32 *)value = (u32)a0; break;
334 case 8: *(u64 *)value = a0; break;
335 default: BUG(); break;
336 }
337
338 return err;
339}
340
341int vnic_dev_stats_clear(struct vnic_dev *vdev)
342{
343 u64 a0 = 0, a1 = 0;
344 int wait = 1000;
345 return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
346}
347
348int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
349{
350 u64 a0, a1;
351 int wait = 1000;
352
353 if (!vdev->stats) {
354 vdev->stats = pci_alloc_consistent(vdev->pdev,
355 sizeof(struct vnic_stats), &vdev->stats_pa);
356 if (!vdev->stats)
357 return -ENOMEM;
358 }
359
360 *stats = vdev->stats;
361 a0 = vdev->stats_pa;
362 a1 = sizeof(struct vnic_stats);
363
364 return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
365}
366
367int vnic_dev_close(struct vnic_dev *vdev)
368{
369 u64 a0 = 0, a1 = 0;
370 int wait = 1000;
371 return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
372}
373
374int vnic_dev_enable(struct vnic_dev *vdev)
375{
376 u64 a0 = 0, a1 = 0;
377 int wait = 1000;
378 return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
379}
380
381int vnic_dev_disable(struct vnic_dev *vdev)
382{
383 u64 a0 = 0, a1 = 0;
384 int wait = 1000;
385 return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
386}
387
388int vnic_dev_open(struct vnic_dev *vdev, int arg)
389{
390 u64 a0 = (u32)arg, a1 = 0;
391 int wait = 1000;
392 return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
393}
394
395int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
396{
397 u64 a0 = 0, a1 = 0;
398 int wait = 1000;
399 int err;
400
401 *done = 0;
402
403 err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
404 if (err)
405 return err;
406
407 *done = (a0 == 0);
408
409 return 0;
410}
411
412int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
413{
414 u64 a0 = (u32)arg, a1 = 0;
415 int wait = 1000;
416 return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait);
417}
418
419int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
420{
421 u64 a0 = 0, a1 = 0;
422 int wait = 1000;
423 int err;
424
425 *done = 0;
426
427 err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait);
428 if (err)
429 return err;
430
431 *done = (a0 == 0);
432
433 return 0;
434}
435
436int vnic_dev_hang_notify(struct vnic_dev *vdev)
437{
438 u64 a0, a1;
439 int wait = 1000;
440 return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait);
441}
442
443int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
444{
445 u64 a0, a1;
446 int wait = 1000;
447 int err, i;
448
449 for (i = 0; i < ETH_ALEN; i++)
450 mac_addr[i] = 0;
451
452 err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait);
453 if (err)
454 return err;
455
456 for (i = 0; i < ETH_ALEN; i++)
457 mac_addr[i] = ((u8 *)&a0)[i];
458
459 return 0;
460}
461
462void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
463 int broadcast, int promisc, int allmulti)
464{
465 u64 a0, a1 = 0;
466 int wait = 1000;
467 int err;
468
469 a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
470 (multicast ? CMD_PFILTER_MULTICAST : 0) |
471 (broadcast ? CMD_PFILTER_BROADCAST : 0) |
472 (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
473 (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
474
475 err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
476 if (err)
477 printk(KERN_ERR "Can't set packet filter\n");
478}
479
480void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
481{
482 u64 a0 = 0, a1 = 0;
483 int wait = 1000;
484 int err;
485 int i;
486
487 for (i = 0; i < ETH_ALEN; i++)
488 ((u8 *)&a0)[i] = addr[i];
489
490 err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
491 if (err)
492 printk(KERN_ERR
493 "Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
494 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
495 err);
496}
497
498void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
499{
500 u64 a0 = 0, a1 = 0;
501 int wait = 1000;
502 int err;
503 int i;
504
505 for (i = 0; i < ETH_ALEN; i++)
506 ((u8 *)&a0)[i] = addr[i];
507
508 err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
509 if (err)
510 printk(KERN_ERR
511 "Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
512 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
513 err);
514}
515
516int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
517{
518 u64 a0, a1;
519 int wait = 1000;
520
521 if (!vdev->notify) {
522 vdev->notify = pci_alloc_consistent(vdev->pdev,
523 sizeof(struct vnic_devcmd_notify),
524 &vdev->notify_pa);
525 if (!vdev->notify)
526 return -ENOMEM;
527 }
528
529 a0 = vdev->notify_pa;
530 a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
531 a1 += sizeof(struct vnic_devcmd_notify);
532
533 return vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
534}
535
536void vnic_dev_notify_unset(struct vnic_dev *vdev)
537{
538 u64 a0, a1;
539 int wait = 1000;
540
541 a0 = 0; /* paddr = 0 to unset notify buffer */
542 a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
543 a1 += sizeof(struct vnic_devcmd_notify);
544
545 vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
546}
547
548static int vnic_dev_notify_ready(struct vnic_dev *vdev)
549{
550 u32 *words;
551 unsigned int nwords = sizeof(struct vnic_devcmd_notify) / 4;
552 unsigned int i;
553 u32 csum;
554
555 if (!vdev->notify)
556 return 0;
557
558 do {
559 csum = 0;
560 memcpy(&vdev->notify_copy, vdev->notify,
561 sizeof(struct vnic_devcmd_notify));
562 words = (u32 *)&vdev->notify_copy;
563 for (i = 1; i < nwords; i++)
564 csum += words[i];
565 } while (csum != words[0]);
566
567 return 1;
568}
569
570int vnic_dev_init(struct vnic_dev *vdev, int arg)
571{
572 u64 a0 = (u32)arg, a1 = 0;
573 int wait = 1000;
574 return vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
575}
576
577int vnic_dev_link_status(struct vnic_dev *vdev)
578{
579 if (vdev->linkstatus)
580 return *vdev->linkstatus;
581
582 if (!vnic_dev_notify_ready(vdev))
583 return 0;
584
585 return vdev->notify_copy.link_state;
586}
587
588u32 vnic_dev_port_speed(struct vnic_dev *vdev)
589{
590 if (!vnic_dev_notify_ready(vdev))
591 return 0;
592
593 return vdev->notify_copy.port_speed;
594}
595
596u32 vnic_dev_msg_lvl(struct vnic_dev *vdev)
597{
598 if (!vnic_dev_notify_ready(vdev))
599 return 0;
600
601 return vdev->notify_copy.msglvl;
602}
603
604u32 vnic_dev_mtu(struct vnic_dev *vdev)
605{
606 if (!vnic_dev_notify_ready(vdev))
607 return 0;
608
609 return vdev->notify_copy.mtu;
610}
611
612void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
613 enum vnic_dev_intr_mode intr_mode)
614{
615 vdev->intr_mode = intr_mode;
616}
617
618enum vnic_dev_intr_mode vnic_dev_get_intr_mode(
619 struct vnic_dev *vdev)
620{
621 return vdev->intr_mode;
622}
623
624void vnic_dev_unregister(struct vnic_dev *vdev)
625{
626 if (vdev) {
627 if (vdev->notify)
628 pci_free_consistent(vdev->pdev,
629 sizeof(struct vnic_devcmd_notify),
630 vdev->notify,
631 vdev->notify_pa);
632 if (vdev->linkstatus)
633 pci_free_consistent(vdev->pdev,
634 sizeof(u32),
635 vdev->linkstatus,
636 vdev->linkstatus_pa);
637 if (vdev->stats)
638 pci_free_consistent(vdev->pdev,
639 sizeof(struct vnic_dev),
640 vdev->stats, vdev->stats_pa);
641 if (vdev->fw_info)
642 pci_free_consistent(vdev->pdev,
643 sizeof(struct vnic_devcmd_fw_info),
644 vdev->fw_info, vdev->fw_info_pa);
645 kfree(vdev);
646 }
647}
648
649struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
650 void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar)
651{
652 if (!vdev) {
653 vdev = kzalloc(sizeof(struct vnic_dev), GFP_ATOMIC);
654 if (!vdev)
655 return NULL;
656 }
657
658 vdev->priv = priv;
659 vdev->pdev = pdev;
660
661 if (vnic_dev_discover_res(vdev, bar))
662 goto err_out;
663
664 vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
665 if (!vdev->devcmd)
666 goto err_out;
667
668 return vdev;
669
670err_out:
671 vnic_dev_unregister(vdev);
672 return NULL;
673}
674
diff --git a/drivers/net/enic/vnic_dev.h b/drivers/net/enic/vnic_dev.h
new file mode 100644
index 000000000000..2dcffd3a24bd
--- /dev/null
+++ b/drivers/net/enic/vnic_dev.h
@@ -0,0 +1,106 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _VNIC_DEV_H_
21#define _VNIC_DEV_H_
22
23#include "vnic_resource.h"
24#include "vnic_devcmd.h"
25
26#ifndef VNIC_PADDR_TARGET
27#define VNIC_PADDR_TARGET 0x0000000000000000ULL
28#endif
29
30enum vnic_dev_intr_mode {
31 VNIC_DEV_INTR_MODE_UNKNOWN,
32 VNIC_DEV_INTR_MODE_INTX,
33 VNIC_DEV_INTR_MODE_MSI,
34 VNIC_DEV_INTR_MODE_MSIX,
35};
36
37struct vnic_dev_bar {
38 void __iomem *vaddr;
39 dma_addr_t bus_addr;
40 unsigned long len;
41};
42
43struct vnic_dev_ring {
44 void *descs;
45 size_t size;
46 dma_addr_t base_addr;
47 size_t base_align;
48 void *descs_unaligned;
49 size_t size_unaligned;
50 dma_addr_t base_addr_unaligned;
51 unsigned int desc_size;
52 unsigned int desc_count;
53 unsigned int desc_avail;
54};
55
56struct vnic_dev;
57struct vnic_stats;
58
59void *vnic_dev_priv(struct vnic_dev *vdev);
60unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
61 enum vnic_res_type type);
62void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
63 unsigned int index);
64unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
65 unsigned int desc_count, unsigned int desc_size);
66void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring);
67int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
68 unsigned int desc_count, unsigned int desc_size);
69void vnic_dev_free_desc_ring(struct vnic_dev *vdev,
70 struct vnic_dev_ring *ring);
71int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
72 u64 *a0, u64 *a1, int wait);
73int vnic_dev_fw_info(struct vnic_dev *vdev,
74 struct vnic_devcmd_fw_info **fw_info);
75int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
76 void *value);
77int vnic_dev_stats_clear(struct vnic_dev *vdev);
78int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
79int vnic_dev_hang_notify(struct vnic_dev *vdev);
80void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
81 int broadcast, int promisc, int allmulti);
82void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr);
83void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr);
84int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
85int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr);
86void vnic_dev_notify_unset(struct vnic_dev *vdev);
87int vnic_dev_link_status(struct vnic_dev *vdev);
88u32 vnic_dev_port_speed(struct vnic_dev *vdev);
89u32 vnic_dev_msg_lvl(struct vnic_dev *vdev);
90u32 vnic_dev_mtu(struct vnic_dev *vdev);
91int vnic_dev_close(struct vnic_dev *vdev);
92int vnic_dev_enable(struct vnic_dev *vdev);
93int vnic_dev_disable(struct vnic_dev *vdev);
94int vnic_dev_open(struct vnic_dev *vdev, int arg);
95int vnic_dev_open_done(struct vnic_dev *vdev, int *done);
96int vnic_dev_init(struct vnic_dev *vdev, int arg);
97int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg);
98int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done);
99void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
100 enum vnic_dev_intr_mode intr_mode);
101enum vnic_dev_intr_mode vnic_dev_get_intr_mode(struct vnic_dev *vdev);
102void vnic_dev_unregister(struct vnic_dev *vdev);
103struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
104 void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar);
105
106#endif /* _VNIC_DEV_H_ */
diff --git a/drivers/net/enic/vnic_devcmd.h b/drivers/net/enic/vnic_devcmd.h
new file mode 100644
index 000000000000..d8617a3373b1
--- /dev/null
+++ b/drivers/net/enic/vnic_devcmd.h
@@ -0,0 +1,282 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _VNIC_DEVCMD_H_
21#define _VNIC_DEVCMD_H_
22
23#define _CMD_NBITS 14
24#define _CMD_VTYPEBITS 10
25#define _CMD_FLAGSBITS 6
26#define _CMD_DIRBITS 2
27
28#define _CMD_NMASK ((1 << _CMD_NBITS)-1)
29#define _CMD_VTYPEMASK ((1 << _CMD_VTYPEBITS)-1)
30#define _CMD_FLAGSMASK ((1 << _CMD_FLAGSBITS)-1)
31#define _CMD_DIRMASK ((1 << _CMD_DIRBITS)-1)
32
33#define _CMD_NSHIFT 0
34#define _CMD_VTYPESHIFT (_CMD_NSHIFT+_CMD_NBITS)
35#define _CMD_FLAGSSHIFT (_CMD_VTYPESHIFT+_CMD_VTYPEBITS)
36#define _CMD_DIRSHIFT (_CMD_FLAGSSHIFT+_CMD_FLAGSBITS)
37
38/*
39 * Direction bits (from host perspective).
40 */
41#define _CMD_DIR_NONE 0U
42#define _CMD_DIR_WRITE 1U
43#define _CMD_DIR_READ 2U
44#define _CMD_DIR_RW (_CMD_DIR_WRITE | _CMD_DIR_READ)
45
46/*
47 * Flag bits.
48 */
49#define _CMD_FLAGS_NONE 0U
50#define _CMD_FLAGS_NOWAIT 1U
51
52/*
53 * vNIC type bits.
54 */
55#define _CMD_VTYPE_NONE 0U
56#define _CMD_VTYPE_ENET 1U
57#define _CMD_VTYPE_FC 2U
58#define _CMD_VTYPE_SCSI 4U
59#define _CMD_VTYPE_ALL (_CMD_VTYPE_ENET | _CMD_VTYPE_FC | _CMD_VTYPE_SCSI)
60
61/*
62 * Used to create cmds..
63*/
64#define _CMDCF(dir, flags, vtype, nr) \
65 (((dir) << _CMD_DIRSHIFT) | \
66 ((flags) << _CMD_FLAGSSHIFT) | \
67 ((vtype) << _CMD_VTYPESHIFT) | \
68 ((nr) << _CMD_NSHIFT))
69#define _CMDC(dir, vtype, nr) _CMDCF(dir, 0, vtype, nr)
70#define _CMDCNW(dir, vtype, nr) _CMDCF(dir, _CMD_FLAGS_NOWAIT, vtype, nr)
71
72/*
73 * Used to decode cmds..
74*/
75#define _CMD_DIR(cmd) (((cmd) >> _CMD_DIRSHIFT) & _CMD_DIRMASK)
76#define _CMD_FLAGS(cmd) (((cmd) >> _CMD_FLAGSSHIFT) & _CMD_FLAGSMASK)
77#define _CMD_VTYPE(cmd) (((cmd) >> _CMD_VTYPESHIFT) & _CMD_VTYPEMASK)
78#define _CMD_N(cmd) (((cmd) >> _CMD_NSHIFT) & _CMD_NMASK)
79
80enum vnic_devcmd_cmd {
81 CMD_NONE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0),
82
83 /* mcpu fw info in mem: (u64)a0=paddr to struct vnic_devcmd_fw_info */
84 CMD_MCPU_FW_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1),
85
86 /* dev-specific block member:
87 * in: (u16)a0=offset,(u8)a1=size
88 * out: a0=value */
89 CMD_DEV_SPEC = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 2),
90
91 /* stats clear */
92 CMD_STATS_CLEAR = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 3),
93
94 /* stats dump in mem: (u64)a0=paddr to stats area,
95 * (u16)a1=sizeof stats area */
96 CMD_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 4),
97
98 /* set Rx packet filter: (u32)a0=filters (see CMD_PFILTER_*) */
99 CMD_PACKET_FILTER = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 7),
100
101 /* hang detection notification */
102 CMD_HANG_NOTIFY = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8),
103
104 /* MAC address in (u48)a0 */
105 CMD_MAC_ADDR = _CMDC(_CMD_DIR_READ,
106 _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 9),
107
108 /* disable/enable promisc mode: (u8)a0=0/1 */
109/***** XXX DEPRECATED *****/
110 CMD_PROMISC_MODE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 10),
111
112 /* disable/enable all-multi mode: (u8)a0=0/1 */
113/***** XXX DEPRECATED *****/
114 CMD_ALLMULTI_MODE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 11),
115
116 /* add addr from (u48)a0 */
117 CMD_ADDR_ADD = _CMDCNW(_CMD_DIR_WRITE,
118 _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 12),
119
120 /* del addr from (u48)a0 */
121 CMD_ADDR_DEL = _CMDCNW(_CMD_DIR_WRITE,
122 _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 13),
123
124 /* add VLAN id in (u16)a0 */
125 CMD_VLAN_ADD = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 14),
126
127 /* del VLAN id in (u16)a0 */
128 CMD_VLAN_DEL = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 15),
129
130 /* nic_cfg in (u32)a0 */
131 CMD_NIC_CFG = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16),
132
133 /* union vnic_rss_key in mem: (u64)a0=paddr, (u16)a1=len */
134 CMD_RSS_KEY = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 17),
135
136 /* union vnic_rss_cpu in mem: (u64)a0=paddr, (u16)a1=len */
137 CMD_RSS_CPU = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 18),
138
139 /* initiate softreset */
140 CMD_SOFT_RESET = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 19),
141
142 /* softreset status:
143 * out: a0=0 reset complete, a0=1 reset in progress */
144 CMD_SOFT_RESET_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 20),
145
146 /* set struct vnic_devcmd_notify buffer in mem:
147 * in:
148 * (u64)a0=paddr to notify (set paddr=0 to unset)
149 * (u32)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify)
150 * (u16)a1 & 0x0000ffff00000000=intr num (-1 for no intr)
151 * out:
152 * (u32)a1 = effective size
153 */
154 CMD_NOTIFY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 21),
155
156 /* UNDI API: (u64)a0=paddr to s_PXENV_UNDI_ struct,
157 * (u8)a1=PXENV_UNDI_xxx */
158 CMD_UNDI = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 22),
159
160 /* initiate open sequence (u32)a0=flags (see CMD_OPENF_*) */
161 CMD_OPEN = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 23),
162
163 /* open status:
164 * out: a0=0 open complete, a0=1 open in progress */
165 CMD_OPEN_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 24),
166
167 /* close vnic */
168 CMD_CLOSE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 25),
169
170 /* initialize virtual link: (u32)a0=flags (see CMD_INITF_*) */
171 CMD_INIT = _CMDCNW(_CMD_DIR_READ, _CMD_VTYPE_ALL, 26),
172
173 /* variant of CMD_INIT, with provisioning info
174 * (u64)a0=paddr of vnic_devcmd_provinfo
175 * (u32)a1=sizeof provision info */
176 CMD_INIT_PROV_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 27),
177
178 /* enable virtual link */
179 CMD_ENABLE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28),
180
181 /* disable virtual link */
182 CMD_DISABLE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 29),
183
184 /* stats dump all vnics on uplink in mem: (u64)a0=paddr (u32)a1=uif */
185 CMD_STATS_DUMP_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 30),
186
187 /* init status:
188 * out: a0=0 init complete, a0=1 init in progress
189 * if a0=0, a1=errno */
190 CMD_INIT_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 31),
191
192 /* INT13 API: (u64)a0=paddr to vnic_int13_params struct
193 * (u8)a1=INT13_CMD_xxx */
194 CMD_INT13 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_FC, 32),
195
196 /* logical uplink enable/disable: (u64)a0: 0/1=disable/enable */
197 CMD_LOGICAL_UPLINK = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 33),
198
199 /* undo initialize of virtual link */
200 CMD_DEINIT = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34),
201};
202
203/* flags for CMD_OPEN */
204#define CMD_OPENF_OPROM 0x1 /* open coming from option rom */
205
206/* flags for CMD_INIT */
207#define CMD_INITF_DEFAULT_MAC 0x1 /* init with default mac addr */
208
209/* flags for CMD_PACKET_FILTER */
210#define CMD_PFILTER_DIRECTED 0x01
211#define CMD_PFILTER_MULTICAST 0x02
212#define CMD_PFILTER_BROADCAST 0x04
213#define CMD_PFILTER_PROMISCUOUS 0x08
214#define CMD_PFILTER_ALL_MULTICAST 0x10
215
216enum vnic_devcmd_status {
217 STAT_NONE = 0,
218 STAT_BUSY = 1 << 0, /* cmd in progress */
219 STAT_ERROR = 1 << 1, /* last cmd caused error (code in a0) */
220};
221
222enum vnic_devcmd_error {
223 ERR_SUCCESS = 0,
224 ERR_EINVAL = 1,
225 ERR_EFAULT = 2,
226 ERR_EPERM = 3,
227 ERR_EBUSY = 4,
228 ERR_ECMDUNKNOWN = 5,
229 ERR_EBADSTATE = 6,
230 ERR_ENOMEM = 7,
231 ERR_ETIMEDOUT = 8,
232 ERR_ELINKDOWN = 9,
233};
234
235struct vnic_devcmd_fw_info {
236 char fw_version[32];
237 char fw_build[32];
238 char hw_version[32];
239 char hw_serial_number[32];
240};
241
242struct vnic_devcmd_notify {
243 u32 csum; /* checksum over following words */
244
245 u32 link_state; /* link up == 1 */
246 u32 port_speed; /* effective port speed (rate limit) */
247 u32 mtu; /* MTU */
248 u32 msglvl; /* requested driver msg lvl */
249 u32 uif; /* uplink interface */
250 u32 status; /* status bits (see VNIC_STF_*) */
251 u32 error; /* error code (see ERR_*) for first ERR */
252};
253#define VNIC_STF_FATAL_ERR 0x0001 /* fatal fw error */
254
255struct vnic_devcmd_provinfo {
256 u8 oui[3];
257 u8 type;
258 u8 data[0];
259};
260
261/*
262 * Writing cmd register causes STAT_BUSY to get set in status register.
263 * When cmd completes, STAT_BUSY will be cleared.
264 *
265 * If cmd completed successfully STAT_ERROR will be clear
266 * and args registers contain cmd-specific results.
267 *
268 * If cmd error, STAT_ERROR will be set and args[0] contains error code.
269 *
270 * status register is read-only. While STAT_BUSY is set,
271 * all other register contents are read-only.
272 */
273
274/* Make sizeof(vnic_devcmd) a power-of-2 for I/O BAR. */
275#define VNIC_DEVCMD_NARGS 15
276struct vnic_devcmd {
277 u32 status; /* RO */
278 u32 cmd; /* RW */
279 u64 args[VNIC_DEVCMD_NARGS]; /* RW cmd args (little-endian) */
280};
281
282#endif /* _VNIC_DEVCMD_H_ */
diff --git a/drivers/net/enic/vnic_enet.h b/drivers/net/enic/vnic_enet.h
new file mode 100644
index 000000000000..6332ac9391b8
--- /dev/null
+++ b/drivers/net/enic/vnic_enet.h
@@ -0,0 +1,47 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _VNIC_ENIC_H_
21#define _VNIC_ENIC_H_
22
23/* Device-specific region: enet configuration */
24struct vnic_enet_config {
25 u32 flags;
26 u32 wq_desc_count;
27 u32 rq_desc_count;
28 u16 mtu;
29 u16 intr_timer;
30 u8 intr_timer_type;
31 u8 intr_mode;
32 char devname[16];
33};
34
35#define VENETF_TSO 0x1 /* TSO enabled */
36#define VENETF_LRO 0x2 /* LRO enabled */
37#define VENETF_RXCSUM 0x4 /* RX csum enabled */
38#define VENETF_TXCSUM 0x8 /* TX csum enabled */
39#define VENETF_RSS 0x10 /* RSS enabled */
40#define VENETF_RSSHASH_IPV4 0x20 /* Hash on IPv4 fields */
41#define VENETF_RSSHASH_TCPIPV4 0x40 /* Hash on TCP + IPv4 fields */
42#define VENETF_RSSHASH_IPV6 0x80 /* Hash on IPv6 fields */
43#define VENETF_RSSHASH_TCPIPV6 0x100 /* Hash on TCP + IPv6 fields */
44#define VENETF_RSSHASH_IPV6_EX 0x200 /* Hash on IPv6 extended fields */
45#define VENETF_RSSHASH_TCPIPV6_EX 0x400 /* Hash on TCP + IPv6 ext. fields */
46
47#endif /* _VNIC_ENIC_H_ */
diff --git a/drivers/net/enic/vnic_intr.c b/drivers/net/enic/vnic_intr.c
new file mode 100644
index 000000000000..ddc38f8f4656
--- /dev/null
+++ b/drivers/net/enic/vnic_intr.c
@@ -0,0 +1,62 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#include <linux/kernel.h>
21#include <linux/errno.h>
22#include <linux/types.h>
23#include <linux/pci.h>
24#include <linux/delay.h>
25
26#include "vnic_dev.h"
27#include "vnic_intr.h"
28
29void vnic_intr_free(struct vnic_intr *intr)
30{
31 intr->ctrl = NULL;
32}
33
34int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
35 unsigned int index)
36{
37 intr->index = index;
38 intr->vdev = vdev;
39
40 intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index);
41 if (!intr->ctrl) {
42 printk(KERN_ERR "Failed to hook INTR[%d].ctrl resource\n",
43 index);
44 return -EINVAL;
45 }
46
47 return 0;
48}
49
50void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
51 unsigned int coalescing_type, unsigned int mask_on_assertion)
52{
53 iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer);
54 iowrite32(coalescing_type, &intr->ctrl->coalescing_type);
55 iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion);
56 iowrite32(0, &intr->ctrl->int_credits);
57}
58
59void vnic_intr_clean(struct vnic_intr *intr)
60{
61 iowrite32(0, &intr->ctrl->int_credits);
62}
diff --git a/drivers/net/enic/vnic_intr.h b/drivers/net/enic/vnic_intr.h
new file mode 100644
index 000000000000..ccc408116af8
--- /dev/null
+++ b/drivers/net/enic/vnic_intr.h
@@ -0,0 +1,92 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _VNIC_INTR_H_
21#define _VNIC_INTR_H_
22
23#include <linux/pci.h>
24
25#include "vnic_dev.h"
26
27#define VNIC_INTR_TIMER_MAX 0xffff
28
29#define VNIC_INTR_TIMER_TYPE_ABS 0
30#define VNIC_INTR_TIMER_TYPE_QUIET 1
31
32/* Interrupt control */
33struct vnic_intr_ctrl {
34 u32 coalescing_timer; /* 0x00 */
35 u32 pad0;
36 u32 coalescing_value; /* 0x08 */
37 u32 pad1;
38 u32 coalescing_type; /* 0x10 */
39 u32 pad2;
40 u32 mask_on_assertion; /* 0x18 */
41 u32 pad3;
42 u32 mask; /* 0x20 */
43 u32 pad4;
44 u32 int_credits; /* 0x28 */
45 u32 pad5;
46 u32 int_credit_return; /* 0x30 */
47 u32 pad6;
48};
49
50struct vnic_intr {
51 unsigned int index;
52 struct vnic_dev *vdev;
53 struct vnic_intr_ctrl __iomem *ctrl; /* memory-mapped */
54};
55
56static inline void vnic_intr_unmask(struct vnic_intr *intr)
57{
58 iowrite32(0, &intr->ctrl->mask);
59}
60
61static inline void vnic_intr_mask(struct vnic_intr *intr)
62{
63 iowrite32(1, &intr->ctrl->mask);
64}
65
66static inline void vnic_intr_return_credits(struct vnic_intr *intr,
67 unsigned int credits, int unmask, int reset_timer)
68{
69#define VNIC_INTR_UNMASK_SHIFT 16
70#define VNIC_INTR_RESET_TIMER_SHIFT 17
71
72 u32 int_credit_return = (credits & 0xffff) |
73 (unmask ? (1 << VNIC_INTR_UNMASK_SHIFT) : 0) |
74 (reset_timer ? (1 << VNIC_INTR_RESET_TIMER_SHIFT) : 0);
75
76 iowrite32(int_credit_return, &intr->ctrl->int_credit_return);
77}
78
79static inline u32 vnic_intr_legacy_pba(u32 __iomem *legacy_pba)
80{
81 /* get and ack interrupt in one read (clear-and-ack-on-read) */
82 return ioread32(legacy_pba);
83}
84
85void vnic_intr_free(struct vnic_intr *intr);
86int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
87 unsigned int index);
88void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
89 unsigned int coalescing_type, unsigned int mask_on_assertion);
90void vnic_intr_clean(struct vnic_intr *intr);
91
92#endif /* _VNIC_INTR_H_ */
diff --git a/drivers/net/enic/vnic_nic.h b/drivers/net/enic/vnic_nic.h
new file mode 100644
index 000000000000..dadf26fae69a
--- /dev/null
+++ b/drivers/net/enic/vnic_nic.h
@@ -0,0 +1,65 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _VNIC_NIC_H_
21#define _VNIC_NIC_H_
22
23#define NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD 0xffUL
24#define NIC_CFG_RSS_DEFAULT_CPU_SHIFT 0
25#define NIC_CFG_RSS_HASH_TYPE (0xffUL << 8)
26#define NIC_CFG_RSS_HASH_TYPE_MASK_FIELD 0xffUL
27#define NIC_CFG_RSS_HASH_TYPE_SHIFT 8
28#define NIC_CFG_RSS_HASH_BITS (7UL << 16)
29#define NIC_CFG_RSS_HASH_BITS_MASK_FIELD 7UL
30#define NIC_CFG_RSS_HASH_BITS_SHIFT 16
31#define NIC_CFG_RSS_BASE_CPU (7UL << 19)
32#define NIC_CFG_RSS_BASE_CPU_MASK_FIELD 7UL
33#define NIC_CFG_RSS_BASE_CPU_SHIFT 19
34#define NIC_CFG_RSS_ENABLE (1UL << 22)
35#define NIC_CFG_RSS_ENABLE_MASK_FIELD 1UL
36#define NIC_CFG_RSS_ENABLE_SHIFT 22
37#define NIC_CFG_TSO_IPID_SPLIT_EN (1UL << 23)
38#define NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD 1UL
39#define NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT 23
40#define NIC_CFG_IG_VLAN_STRIP_EN (1UL << 24)
41#define NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD 1UL
42#define NIC_CFG_IG_VLAN_STRIP_EN_SHIFT 24
43
44static inline void vnic_set_nic_cfg(u32 *nic_cfg,
45 u8 rss_default_cpu, u8 rss_hash_type,
46 u8 rss_hash_bits, u8 rss_base_cpu,
47 u8 rss_enable, u8 tso_ipid_split_en,
48 u8 ig_vlan_strip_en)
49{
50 *nic_cfg = (rss_default_cpu & NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD) |
51 ((rss_hash_type & NIC_CFG_RSS_HASH_TYPE_MASK_FIELD)
52 << NIC_CFG_RSS_HASH_TYPE_SHIFT) |
53 ((rss_hash_bits & NIC_CFG_RSS_HASH_BITS_MASK_FIELD)
54 << NIC_CFG_RSS_HASH_BITS_SHIFT) |
55 ((rss_base_cpu & NIC_CFG_RSS_BASE_CPU_MASK_FIELD)
56 << NIC_CFG_RSS_BASE_CPU_SHIFT) |
57 ((rss_enable & NIC_CFG_RSS_ENABLE_MASK_FIELD)
58 << NIC_CFG_RSS_ENABLE_SHIFT) |
59 ((tso_ipid_split_en & NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD)
60 << NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT) |
61 ((ig_vlan_strip_en & NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD)
62 << NIC_CFG_IG_VLAN_STRIP_EN_SHIFT);
63}
64
65#endif /* _VNIC_NIC_H_ */
diff --git a/drivers/net/enic/vnic_resource.h b/drivers/net/enic/vnic_resource.h
new file mode 100644
index 000000000000..144d2812f081
--- /dev/null
+++ b/drivers/net/enic/vnic_resource.h
@@ -0,0 +1,63 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _VNIC_RESOURCE_H_
21#define _VNIC_RESOURCE_H_
22
23#define VNIC_RES_MAGIC 0x766E6963L /* 'vnic' */
24#define VNIC_RES_VERSION 0x00000000L
25
26/* vNIC resource types */
27enum vnic_res_type {
28 RES_TYPE_EOL, /* End-of-list */
29 RES_TYPE_WQ, /* Work queues */
30 RES_TYPE_RQ, /* Receive queues */
31 RES_TYPE_CQ, /* Completion queues */
32 RES_TYPE_RSVD1,
33 RES_TYPE_NIC_CFG, /* Enet NIC config registers */
34 RES_TYPE_RSVD2,
35 RES_TYPE_RSVD3,
36 RES_TYPE_RSVD4,
37 RES_TYPE_RSVD5,
38 RES_TYPE_INTR_CTRL, /* Interrupt ctrl table */
39 RES_TYPE_INTR_TABLE, /* MSI/MSI-X Interrupt table */
40 RES_TYPE_INTR_PBA, /* MSI/MSI-X PBA table */
41 RES_TYPE_INTR_PBA_LEGACY, /* Legacy intr status, r2c */
42 RES_TYPE_RSVD6,
43 RES_TYPE_RSVD7,
44 RES_TYPE_DEVCMD, /* Device command region */
45 RES_TYPE_PASS_THRU_PAGE, /* Pass-thru page */
46
47 RES_TYPE_MAX, /* Count of resource types */
48};
49
50struct vnic_resource_header {
51 u32 magic;
52 u32 version;
53};
54
55struct vnic_resource {
56 u8 type;
57 u8 bar;
58 u8 pad[2];
59 u32 bar_offset;
60 u32 count;
61};
62
63#endif /* _VNIC_RESOURCE_H_ */
diff --git a/drivers/net/enic/vnic_rq.c b/drivers/net/enic/vnic_rq.c
new file mode 100644
index 000000000000..9365e63e821a
--- /dev/null
+++ b/drivers/net/enic/vnic_rq.c
@@ -0,0 +1,199 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#include <linux/kernel.h>
21#include <linux/errno.h>
22#include <linux/types.h>
23#include <linux/pci.h>
24#include <linux/delay.h>
25
26#include "vnic_dev.h"
27#include "vnic_rq.h"
28
29static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
30{
31 struct vnic_rq_buf *buf;
32 struct vnic_dev *vdev;
33 unsigned int i, j, count = rq->ring.desc_count;
34 unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count);
35
36 vdev = rq->vdev;
37
38 for (i = 0; i < blks; i++) {
39 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC);
40 if (!rq->bufs[i]) {
41 printk(KERN_ERR "Failed to alloc rq_bufs\n");
42 return -ENOMEM;
43 }
44 }
45
46 for (i = 0; i < blks; i++) {
47 buf = rq->bufs[i];
48 for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES; j++) {
49 buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES + j;
50 buf->desc = (u8 *)rq->ring.descs +
51 rq->ring.desc_size * buf->index;
52 if (buf->index + 1 == count) {
53 buf->next = rq->bufs[0];
54 break;
55 } else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES) {
56 buf->next = rq->bufs[i + 1];
57 } else {
58 buf->next = buf + 1;
59 buf++;
60 }
61 }
62 }
63
64 rq->to_use = rq->to_clean = rq->bufs[0];
65 rq->buf_index = 0;
66
67 return 0;
68}
69
70void vnic_rq_free(struct vnic_rq *rq)
71{
72 struct vnic_dev *vdev;
73 unsigned int i;
74
75 vdev = rq->vdev;
76
77 vnic_dev_free_desc_ring(vdev, &rq->ring);
78
79 for (i = 0; i < VNIC_RQ_BUF_BLKS_MAX; i++) {
80 kfree(rq->bufs[i]);
81 rq->bufs[i] = NULL;
82 }
83
84 rq->ctrl = NULL;
85}
86
87int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
88 unsigned int desc_count, unsigned int desc_size)
89{
90 int err;
91
92 rq->index = index;
93 rq->vdev = vdev;
94
95 rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index);
96 if (!rq->ctrl) {
97 printk(KERN_ERR "Failed to hook RQ[%d] resource\n", index);
98 return -EINVAL;
99 }
100
101 vnic_rq_disable(rq);
102
103 err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size);
104 if (err)
105 return err;
106
107 err = vnic_rq_alloc_bufs(rq);
108 if (err) {
109 vnic_rq_free(rq);
110 return err;
111 }
112
113 return 0;
114}
115
116void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
117 unsigned int error_interrupt_enable,
118 unsigned int error_interrupt_offset)
119{
120 u64 paddr;
121 u32 fetch_index;
122
123 paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET;
124 writeq(paddr, &rq->ctrl->ring_base);
125 iowrite32(rq->ring.desc_count, &rq->ctrl->ring_size);
126 iowrite32(cq_index, &rq->ctrl->cq_index);
127 iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable);
128 iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset);
129 iowrite32(0, &rq->ctrl->dropped_packet_count);
130 iowrite32(0, &rq->ctrl->error_status);
131
132 /* Use current fetch_index as the ring starting point */
133 fetch_index = ioread32(&rq->ctrl->fetch_index);
134 rq->to_use = rq->to_clean =
135 &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
136 [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES];
137 iowrite32(fetch_index, &rq->ctrl->posted_index);
138
139 rq->buf_index = 0;
140}
141
142unsigned int vnic_rq_error_status(struct vnic_rq *rq)
143{
144 return ioread32(&rq->ctrl->error_status);
145}
146
147void vnic_rq_enable(struct vnic_rq *rq)
148{
149 iowrite32(1, &rq->ctrl->enable);
150}
151
152int vnic_rq_disable(struct vnic_rq *rq)
153{
154 unsigned int wait;
155
156 iowrite32(0, &rq->ctrl->enable);
157
158 /* Wait for HW to ACK disable request */
159 for (wait = 0; wait < 100; wait++) {
160 if (!(ioread32(&rq->ctrl->running)))
161 return 0;
162 udelay(1);
163 }
164
165 printk(KERN_ERR "Failed to disable RQ[%d]\n", rq->index);
166
167 return -ETIMEDOUT;
168}
169
170void vnic_rq_clean(struct vnic_rq *rq,
171 void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf))
172{
173 struct vnic_rq_buf *buf;
174 u32 fetch_index;
175
176 BUG_ON(ioread32(&rq->ctrl->enable));
177
178 buf = rq->to_clean;
179
180 while (vnic_rq_desc_used(rq) > 0) {
181
182 (*buf_clean)(rq, buf);
183
184 buf = rq->to_clean = buf->next;
185 rq->ring.desc_avail++;
186 }
187
188 /* Use current fetch_index as the ring starting point */
189 fetch_index = ioread32(&rq->ctrl->fetch_index);
190 rq->to_use = rq->to_clean =
191 &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
192 [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES];
193 iowrite32(fetch_index, &rq->ctrl->posted_index);
194
195 rq->buf_index = 0;
196
197 vnic_dev_clear_desc_ring(&rq->ring);
198}
199
diff --git a/drivers/net/enic/vnic_rq.h b/drivers/net/enic/vnic_rq.h
new file mode 100644
index 000000000000..82bfca67cc4d
--- /dev/null
+++ b/drivers/net/enic/vnic_rq.h
@@ -0,0 +1,204 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _VNIC_RQ_H_
21#define _VNIC_RQ_H_
22
23#include <linux/pci.h>
24
25#include "vnic_dev.h"
26#include "vnic_cq.h"
27
28/* Receive queue control */
29struct vnic_rq_ctrl {
30 u64 ring_base; /* 0x00 */
31 u32 ring_size; /* 0x08 */
32 u32 pad0;
33 u32 posted_index; /* 0x10 */
34 u32 pad1;
35 u32 cq_index; /* 0x18 */
36 u32 pad2;
37 u32 enable; /* 0x20 */
38 u32 pad3;
39 u32 running; /* 0x28 */
40 u32 pad4;
41 u32 fetch_index; /* 0x30 */
42 u32 pad5;
43 u32 error_interrupt_enable; /* 0x38 */
44 u32 pad6;
45 u32 error_interrupt_offset; /* 0x40 */
46 u32 pad7;
47 u32 error_status; /* 0x48 */
48 u32 pad8;
49 u32 dropped_packet_count; /* 0x50 */
50 u32 pad9;
51 u32 dropped_packet_count_rc; /* 0x58 */
52 u32 pad10;
53};
54
55/* Break the vnic_rq_buf allocations into blocks of 64 entries */
56#define VNIC_RQ_BUF_BLK_ENTRIES 64
57#define VNIC_RQ_BUF_BLK_SZ \
58 (VNIC_RQ_BUF_BLK_ENTRIES * sizeof(struct vnic_rq_buf))
59#define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
60 DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES)
61#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
62
63struct vnic_rq_buf {
64 struct vnic_rq_buf *next;
65 dma_addr_t dma_addr;
66 void *os_buf;
67 unsigned int os_buf_index;
68 unsigned int len;
69 unsigned int index;
70 void *desc;
71};
72
73struct vnic_rq {
74 unsigned int index;
75 struct vnic_dev *vdev;
76 struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */
77 struct vnic_dev_ring ring;
78 struct vnic_rq_buf *bufs[VNIC_RQ_BUF_BLKS_MAX];
79 struct vnic_rq_buf *to_use;
80 struct vnic_rq_buf *to_clean;
81 void *os_buf_head;
82 unsigned int buf_index;
83 unsigned int pkts_outstanding;
84};
85
86static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
87{
88 /* how many does SW own? */
89 return rq->ring.desc_avail;
90}
91
92static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq)
93{
94 /* how many does HW own? */
95 return rq->ring.desc_count - rq->ring.desc_avail - 1;
96}
97
98static inline void *vnic_rq_next_desc(struct vnic_rq *rq)
99{
100 return rq->to_use->desc;
101}
102
103static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq)
104{
105 return rq->to_use->index;
106}
107
108static inline unsigned int vnic_rq_next_buf_index(struct vnic_rq *rq)
109{
110 return rq->buf_index++;
111}
112
113static inline void vnic_rq_post(struct vnic_rq *rq,
114 void *os_buf, unsigned int os_buf_index,
115 dma_addr_t dma_addr, unsigned int len)
116{
117 struct vnic_rq_buf *buf = rq->to_use;
118
119 buf->os_buf = os_buf;
120 buf->os_buf_index = os_buf_index;
121 buf->dma_addr = dma_addr;
122 buf->len = len;
123
124 buf = buf->next;
125 rq->to_use = buf;
126 rq->ring.desc_avail--;
127
128 /* Move the posted_index every nth descriptor
129 */
130
131#ifndef VNIC_RQ_RETURN_RATE
132#define VNIC_RQ_RETURN_RATE 0xf /* keep 2^n - 1 */
133#endif
134
135 if ((buf->index & VNIC_RQ_RETURN_RATE) == 0)
136 iowrite32(buf->index, &rq->ctrl->posted_index);
137}
138
139static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
140{
141 rq->ring.desc_avail += count;
142}
143
144enum desc_return_options {
145 VNIC_RQ_RETURN_DESC,
146 VNIC_RQ_DEFER_RETURN_DESC,
147};
148
149static inline void vnic_rq_service(struct vnic_rq *rq,
150 struct cq_desc *cq_desc, u16 completed_index,
151 int desc_return, void (*buf_service)(struct vnic_rq *rq,
152 struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
153 int skipped, void *opaque), void *opaque)
154{
155 struct vnic_rq_buf *buf;
156 int skipped;
157
158 buf = rq->to_clean;
159 while (1) {
160
161 skipped = (buf->index != completed_index);
162
163 (*buf_service)(rq, cq_desc, buf, skipped, opaque);
164
165 if (desc_return == VNIC_RQ_RETURN_DESC)
166 rq->ring.desc_avail++;
167
168 rq->to_clean = buf->next;
169
170 if (!skipped)
171 break;
172
173 buf = rq->to_clean;
174 }
175}
176
177static inline int vnic_rq_fill(struct vnic_rq *rq,
178 int (*buf_fill)(struct vnic_rq *rq))
179{
180 int err;
181
182 while (vnic_rq_desc_avail(rq) > 1) {
183
184 err = (*buf_fill)(rq);
185 if (err)
186 return err;
187 }
188
189 return 0;
190}
191
192void vnic_rq_free(struct vnic_rq *rq);
193int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
194 unsigned int desc_count, unsigned int desc_size);
195void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
196 unsigned int error_interrupt_enable,
197 unsigned int error_interrupt_offset);
198unsigned int vnic_rq_error_status(struct vnic_rq *rq);
199void vnic_rq_enable(struct vnic_rq *rq);
200int vnic_rq_disable(struct vnic_rq *rq);
201void vnic_rq_clean(struct vnic_rq *rq,
202 void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf));
203
204#endif /* _VNIC_RQ_H_ */
diff --git a/drivers/net/enic/vnic_rss.h b/drivers/net/enic/vnic_rss.h
new file mode 100644
index 000000000000..e325d65d7c34
--- /dev/null
+++ b/drivers/net/enic/vnic_rss.h
@@ -0,0 +1,32 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 */
5
6#ifndef _VNIC_RSS_H_
7#define _VNIC_RSS_H_
8
9/* RSS key array */
10union vnic_rss_key {
11 struct {
12 u8 b[10];
13 u8 b_pad[6];
14 } key[4];
15 u64 raw[8];
16};
17
18/* RSS cpu array */
19union vnic_rss_cpu {
20 struct {
21 u8 b[4] ;
22 u8 b_pad[4];
23 } cpu[32];
24 u64 raw[32];
25};
26
27void vnic_set_rss_key(union vnic_rss_key *rss_key, u8 *key);
28void vnic_set_rss_cpu(union vnic_rss_cpu *rss_cpu, u8 *cpu);
29void vnic_get_rss_key(union vnic_rss_key *rss_key, u8 *key);
30void vnic_get_rss_cpu(union vnic_rss_cpu *rss_cpu, u8 *cpu);
31
32#endif /* _VNIC_RSS_H_ */
diff --git a/drivers/net/enic/vnic_stats.h b/drivers/net/enic/vnic_stats.h
new file mode 100644
index 000000000000..9ff9614d89b1
--- /dev/null
+++ b/drivers/net/enic/vnic_stats.h
@@ -0,0 +1,70 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _VNIC_STATS_H_
21#define _VNIC_STATS_H_
22
23/* Tx statistics */
24struct vnic_tx_stats {
25 u64 tx_frames_ok;
26 u64 tx_unicast_frames_ok;
27 u64 tx_multicast_frames_ok;
28 u64 tx_broadcast_frames_ok;
29 u64 tx_bytes_ok;
30 u64 tx_unicast_bytes_ok;
31 u64 tx_multicast_bytes_ok;
32 u64 tx_broadcast_bytes_ok;
33 u64 tx_drops;
34 u64 tx_errors;
35 u64 tx_tso;
36 u64 rsvd[16];
37};
38
39/* Rx statistics */
40struct vnic_rx_stats {
41 u64 rx_frames_ok;
42 u64 rx_frames_total;
43 u64 rx_unicast_frames_ok;
44 u64 rx_multicast_frames_ok;
45 u64 rx_broadcast_frames_ok;
46 u64 rx_bytes_ok;
47 u64 rx_unicast_bytes_ok;
48 u64 rx_multicast_bytes_ok;
49 u64 rx_broadcast_bytes_ok;
50 u64 rx_drop;
51 u64 rx_no_bufs;
52 u64 rx_errors;
53 u64 rx_rss;
54 u64 rx_crc_errors;
55 u64 rx_frames_64;
56 u64 rx_frames_127;
57 u64 rx_frames_255;
58 u64 rx_frames_511;
59 u64 rx_frames_1023;
60 u64 rx_frames_1518;
61 u64 rx_frames_to_max;
62 u64 rsvd[16];
63};
64
65struct vnic_stats {
66 struct vnic_tx_stats tx;
67 struct vnic_rx_stats rx;
68};
69
70#endif /* _VNIC_STATS_H_ */
diff --git a/drivers/net/enic/vnic_wq.c b/drivers/net/enic/vnic_wq.c
new file mode 100644
index 000000000000..a576d04708ef
--- /dev/null
+++ b/drivers/net/enic/vnic_wq.c
@@ -0,0 +1,184 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#include <linux/kernel.h>
21#include <linux/errno.h>
22#include <linux/types.h>
23#include <linux/pci.h>
24#include <linux/delay.h>
25
26#include "vnic_dev.h"
27#include "vnic_wq.h"
28
29static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
30{
31 struct vnic_wq_buf *buf;
32 struct vnic_dev *vdev;
33 unsigned int i, j, count = wq->ring.desc_count;
34 unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
35
36 vdev = wq->vdev;
37
38 for (i = 0; i < blks; i++) {
39 wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC);
40 if (!wq->bufs[i]) {
41 printk(KERN_ERR "Failed to alloc wq_bufs\n");
42 return -ENOMEM;
43 }
44 }
45
46 for (i = 0; i < blks; i++) {
47 buf = wq->bufs[i];
48 for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES; j++) {
49 buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES + j;
50 buf->desc = (u8 *)wq->ring.descs +
51 wq->ring.desc_size * buf->index;
52 if (buf->index + 1 == count) {
53 buf->next = wq->bufs[0];
54 break;
55 } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES) {
56 buf->next = wq->bufs[i + 1];
57 } else {
58 buf->next = buf + 1;
59 buf++;
60 }
61 }
62 }
63
64 wq->to_use = wq->to_clean = wq->bufs[0];
65
66 return 0;
67}
68
69void vnic_wq_free(struct vnic_wq *wq)
70{
71 struct vnic_dev *vdev;
72 unsigned int i;
73
74 vdev = wq->vdev;
75
76 vnic_dev_free_desc_ring(vdev, &wq->ring);
77
78 for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) {
79 kfree(wq->bufs[i]);
80 wq->bufs[i] = NULL;
81 }
82
83 wq->ctrl = NULL;
84}
85
86int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
87 unsigned int desc_count, unsigned int desc_size)
88{
89 int err;
90
91 wq->index = index;
92 wq->vdev = vdev;
93
94 wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index);
95 if (!wq->ctrl) {
96 printk(KERN_ERR "Failed to hook WQ[%d] resource\n", index);
97 return -EINVAL;
98 }
99
100 vnic_wq_disable(wq);
101
102 err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
103 if (err)
104 return err;
105
106 err = vnic_wq_alloc_bufs(wq);
107 if (err) {
108 vnic_wq_free(wq);
109 return err;
110 }
111
112 return 0;
113}
114
115void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
116 unsigned int error_interrupt_enable,
117 unsigned int error_interrupt_offset)
118{
119 u64 paddr;
120
121 paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
122 writeq(paddr, &wq->ctrl->ring_base);
123 iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size);
124 iowrite32(0, &wq->ctrl->fetch_index);
125 iowrite32(0, &wq->ctrl->posted_index);
126 iowrite32(cq_index, &wq->ctrl->cq_index);
127 iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
128 iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
129 iowrite32(0, &wq->ctrl->error_status);
130}
131
132unsigned int vnic_wq_error_status(struct vnic_wq *wq)
133{
134 return ioread32(&wq->ctrl->error_status);
135}
136
137void vnic_wq_enable(struct vnic_wq *wq)
138{
139 iowrite32(1, &wq->ctrl->enable);
140}
141
142int vnic_wq_disable(struct vnic_wq *wq)
143{
144 unsigned int wait;
145
146 iowrite32(0, &wq->ctrl->enable);
147
148 /* Wait for HW to ACK disable request */
149 for (wait = 0; wait < 100; wait++) {
150 if (!(ioread32(&wq->ctrl->running)))
151 return 0;
152 udelay(1);
153 }
154
155 printk(KERN_ERR "Failed to disable WQ[%d]\n", wq->index);
156
157 return -ETIMEDOUT;
158}
159
160void vnic_wq_clean(struct vnic_wq *wq,
161 void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf))
162{
163 struct vnic_wq_buf *buf;
164
165 BUG_ON(ioread32(&wq->ctrl->enable));
166
167 buf = wq->to_clean;
168
169 while (vnic_wq_desc_used(wq) > 0) {
170
171 (*buf_clean)(wq, buf);
172
173 buf = wq->to_clean = buf->next;
174 wq->ring.desc_avail++;
175 }
176
177 wq->to_use = wq->to_clean = wq->bufs[0];
178
179 iowrite32(0, &wq->ctrl->fetch_index);
180 iowrite32(0, &wq->ctrl->posted_index);
181 iowrite32(0, &wq->ctrl->error_status);
182
183 vnic_dev_clear_desc_ring(&wq->ring);
184}
diff --git a/drivers/net/enic/vnic_wq.h b/drivers/net/enic/vnic_wq.h
new file mode 100644
index 000000000000..7081828d8a42
--- /dev/null
+++ b/drivers/net/enic/vnic_wq.h
@@ -0,0 +1,154 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _VNIC_WQ_H_
21#define _VNIC_WQ_H_
22
23#include <linux/pci.h>
24
25#include "vnic_dev.h"
26#include "vnic_cq.h"
27
28/* Work queue control */
29struct vnic_wq_ctrl {
30 u64 ring_base; /* 0x00 */
31 u32 ring_size; /* 0x08 */
32 u32 pad0;
33 u32 posted_index; /* 0x10 */
34 u32 pad1;
35 u32 cq_index; /* 0x18 */
36 u32 pad2;
37 u32 enable; /* 0x20 */
38 u32 pad3;
39 u32 running; /* 0x28 */
40 u32 pad4;
41 u32 fetch_index; /* 0x30 */
42 u32 pad5;
43 u32 dca_value; /* 0x38 */
44 u32 pad6;
45 u32 error_interrupt_enable; /* 0x40 */
46 u32 pad7;
47 u32 error_interrupt_offset; /* 0x48 */
48 u32 pad8;
49 u32 error_status; /* 0x50 */
50 u32 pad9;
51};
52
53struct vnic_wq_buf {
54 struct vnic_wq_buf *next;
55 dma_addr_t dma_addr;
56 void *os_buf;
57 unsigned int len;
58 unsigned int index;
59 int sop;
60 void *desc;
61};
62
63/* Break the vnic_wq_buf allocations into blocks of 64 entries */
64#define VNIC_WQ_BUF_BLK_ENTRIES 64
65#define VNIC_WQ_BUF_BLK_SZ \
66 (VNIC_WQ_BUF_BLK_ENTRIES * sizeof(struct vnic_wq_buf))
67#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
68 DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES)
69#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096)
70
71struct vnic_wq {
72 unsigned int index;
73 struct vnic_dev *vdev;
74 struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */
75 struct vnic_dev_ring ring;
76 struct vnic_wq_buf *bufs[VNIC_WQ_BUF_BLKS_MAX];
77 struct vnic_wq_buf *to_use;
78 struct vnic_wq_buf *to_clean;
79 unsigned int pkts_outstanding;
80};
81
82static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq)
83{
84 /* how many does SW own? */
85 return wq->ring.desc_avail;
86}
87
88static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq)
89{
90 /* how many does HW own? */
91 return wq->ring.desc_count - wq->ring.desc_avail - 1;
92}
93
94static inline void *vnic_wq_next_desc(struct vnic_wq *wq)
95{
96 return wq->to_use->desc;
97}
98
99static inline void vnic_wq_post(struct vnic_wq *wq,
100 void *os_buf, dma_addr_t dma_addr,
101 unsigned int len, int sop, int eop)
102{
103 struct vnic_wq_buf *buf = wq->to_use;
104
105 buf->sop = sop;
106 buf->os_buf = eop ? os_buf : NULL;
107 buf->dma_addr = dma_addr;
108 buf->len = len;
109
110 buf = buf->next;
111 if (eop)
112 iowrite32(buf->index, &wq->ctrl->posted_index);
113 wq->to_use = buf;
114
115 wq->ring.desc_avail--;
116}
117
118static inline void vnic_wq_service(struct vnic_wq *wq,
119 struct cq_desc *cq_desc, u16 completed_index,
120 void (*buf_service)(struct vnic_wq *wq,
121 struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque),
122 void *opaque)
123{
124 struct vnic_wq_buf *buf;
125
126 buf = wq->to_clean;
127 while (1) {
128
129 (*buf_service)(wq, cq_desc, buf, opaque);
130
131 wq->ring.desc_avail++;
132
133 wq->to_clean = buf->next;
134
135 if (buf->index == completed_index)
136 break;
137
138 buf = wq->to_clean;
139 }
140}
141
142void vnic_wq_free(struct vnic_wq *wq);
143int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
144 unsigned int desc_count, unsigned int desc_size);
145void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
146 unsigned int error_interrupt_enable,
147 unsigned int error_interrupt_offset);
148unsigned int vnic_wq_error_status(struct vnic_wq *wq);
149void vnic_wq_enable(struct vnic_wq *wq);
150int vnic_wq_disable(struct vnic_wq *wq);
151void vnic_wq_clean(struct vnic_wq *wq,
152 void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf));
153
154#endif /* _VNIC_WQ_H_ */
diff --git a/drivers/net/enic/wq_enet_desc.h b/drivers/net/enic/wq_enet_desc.h
new file mode 100644
index 000000000000..483596c2d8bf
--- /dev/null
+++ b/drivers/net/enic/wq_enet_desc.h
@@ -0,0 +1,98 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _WQ_ENET_DESC_H_
21#define _WQ_ENET_DESC_H_
22
23/* Ethernet work queue descriptor: 16B */
24struct wq_enet_desc {
25 __le64 address;
26 __le16 length;
27 __le16 mss_loopback;
28 __le16 header_length_flags;
29 __le16 vlan_tag;
30};
31
32#define WQ_ENET_ADDR_BITS 64
33#define WQ_ENET_LEN_BITS 14
34#define WQ_ENET_LEN_MASK ((1 << WQ_ENET_LEN_BITS) - 1)
35#define WQ_ENET_MSS_BITS 14
36#define WQ_ENET_MSS_MASK ((1 << WQ_ENET_MSS_BITS) - 1)
37#define WQ_ENET_MSS_SHIFT 2
38#define WQ_ENET_LOOPBACK_SHIFT 1
39#define WQ_ENET_HDRLEN_BITS 10
40#define WQ_ENET_HDRLEN_MASK ((1 << WQ_ENET_HDRLEN_BITS) - 1)
41#define WQ_ENET_FLAGS_OM_BITS 2
42#define WQ_ENET_FLAGS_OM_MASK ((1 << WQ_ENET_FLAGS_OM_BITS) - 1)
43#define WQ_ENET_FLAGS_EOP_SHIFT 12
44#define WQ_ENET_FLAGS_CQ_ENTRY_SHIFT 13
45#define WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT 14
46#define WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT 15
47
48#define WQ_ENET_OFFLOAD_MODE_CSUM 0
49#define WQ_ENET_OFFLOAD_MODE_RESERVED 1
50#define WQ_ENET_OFFLOAD_MODE_CSUM_L4 2
51#define WQ_ENET_OFFLOAD_MODE_TSO 3
52
53static inline void wq_enet_desc_enc(struct wq_enet_desc *desc,
54 u64 address, u16 length, u16 mss, u16 header_length,
55 u8 offload_mode, u8 eop, u8 cq_entry, u8 fcoe_encap,
56 u8 vlan_tag_insert, u16 vlan_tag, u8 loopback)
57{
58 desc->address = cpu_to_le64(address);
59 desc->length = cpu_to_le16(length & WQ_ENET_LEN_MASK);
60 desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) <<
61 WQ_ENET_MSS_SHIFT | (loopback & 1) << WQ_ENET_LOOPBACK_SHIFT);
62 desc->header_length_flags = cpu_to_le16(
63 (header_length & WQ_ENET_HDRLEN_MASK) |
64 (offload_mode & WQ_ENET_FLAGS_OM_MASK) << WQ_ENET_HDRLEN_BITS |
65 (eop & 1) << WQ_ENET_FLAGS_EOP_SHIFT |
66 (cq_entry & 1) << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT |
67 (fcoe_encap & 1) << WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT |
68 (vlan_tag_insert & 1) << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT);
69 desc->vlan_tag = cpu_to_le16(vlan_tag);
70}
71
72static inline void wq_enet_desc_dec(struct wq_enet_desc *desc,
73 u64 *address, u16 *length, u16 *mss, u16 *header_length,
74 u8 *offload_mode, u8 *eop, u8 *cq_entry, u8 *fcoe_encap,
75 u8 *vlan_tag_insert, u16 *vlan_tag, u8 *loopback)
76{
77 *address = le64_to_cpu(desc->address);
78 *length = le16_to_cpu(desc->length) & WQ_ENET_LEN_MASK;
79 *mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) &
80 WQ_ENET_MSS_MASK;
81 *loopback = (u8)((le16_to_cpu(desc->mss_loopback) >>
82 WQ_ENET_LOOPBACK_SHIFT) & 1);
83 *header_length = le16_to_cpu(desc->header_length_flags) &
84 WQ_ENET_HDRLEN_MASK;
85 *offload_mode = (u8)((le16_to_cpu(desc->header_length_flags) >>
86 WQ_ENET_HDRLEN_BITS) & WQ_ENET_FLAGS_OM_MASK);
87 *eop = (u8)((le16_to_cpu(desc->header_length_flags) >>
88 WQ_ENET_FLAGS_EOP_SHIFT) & 1);
89 *cq_entry = (u8)((le16_to_cpu(desc->header_length_flags) >>
90 WQ_ENET_FLAGS_CQ_ENTRY_SHIFT) & 1);
91 *fcoe_encap = (u8)((le16_to_cpu(desc->header_length_flags) >>
92 WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT) & 1);
93 *vlan_tag_insert = (u8)((le16_to_cpu(desc->header_length_flags) >>
94 WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT) & 1);
95 *vlan_tag = le16_to_cpu(desc->vlan_tag);
96}
97
98#endif /* _WQ_ENET_DESC_H_ */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 90a132ab84a6..6f4276d461c0 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1411,6 +1411,8 @@
1411#define PCI_DEVICE_ID_EICON_MAESTRAQ_U 0xe013 1411#define PCI_DEVICE_ID_EICON_MAESTRAQ_U 0xe013
1412#define PCI_DEVICE_ID_EICON_MAESTRAP 0xe014 1412#define PCI_DEVICE_ID_EICON_MAESTRAP 0xe014
1413 1413
1414#define PCI_VENDOR_ID_CISCO 0x1137
1415
1414#define PCI_VENDOR_ID_ZIATECH 0x1138 1416#define PCI_VENDOR_ID_ZIATECH 0x1138
1415#define PCI_DEVICE_ID_ZIATECH_5550_HC 0x5550 1417#define PCI_DEVICE_ID_ZIATECH_5550_HC 0x5550
1416 1418