diff options
author | David S. Miller <davem@davemloft.net> | 2008-09-19 18:51:35 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-09-19 18:51:35 -0400 |
commit | 79b6f7ecdac7a37df72a5f354816c0dd0b6ac592 (patch) | |
tree | cb709af3ca7425768a596df97ccafbd6b8397d1d /drivers | |
parent | 02a1416478b70cd49bd74827438c8ba797784728 (diff) | |
parent | c4e84bde1d595d857d3c74b49b9c45cc770df792 (diff) |
Merge branch 'new-drivers' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6
Diffstat (limited to 'drivers')
38 files changed, 20329 insertions, 0 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 4a11296a9514..69c81da48ebc 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -1840,6 +1840,17 @@ config NE_H8300 | |||
1840 | Say Y here if you want to use the NE2000 compatible | 1840 | Say Y here if you want to use the NE2000 compatible |
1841 | controller on the Renesas H8/300 processor. | 1841 | controller on the Renesas H8/300 processor. |
1842 | 1842 | ||
1843 | config ATL2 | ||
1844 | tristate "Atheros L2 Fast Ethernet support" | ||
1845 | depends on PCI | ||
1846 | select CRC32 | ||
1847 | select MII | ||
1848 | help | ||
1849 | This driver supports the Atheros L2 fast ethernet adapter. | ||
1850 | |||
1851 | To compile this driver as a module, choose M here. The module | ||
1852 | will be called atl2. | ||
1853 | |||
1843 | source "drivers/net/fs_enet/Kconfig" | 1854 | source "drivers/net/fs_enet/Kconfig" |
1844 | 1855 | ||
1845 | endif # NET_ETHERNET | 1856 | endif # NET_ETHERNET |
@@ -2302,6 +2313,18 @@ config ATL1E | |||
2302 | To compile this driver as a module, choose M here. The module | 2313 | To compile this driver as a module, choose M here. The module |
2303 | will be called atl1e. | 2314 | will be called atl1e. |
2304 | 2315 | ||
2316 | config JME | ||
2317 | tristate "JMicron(R) PCI-Express Gigabit Ethernet support" | ||
2318 | depends on PCI | ||
2319 | select CRC32 | ||
2320 | select MII | ||
2321 | ---help--- | ||
2322 | This driver supports the PCI-Express gigabit ethernet adapters | ||
2323 | based on JMicron JMC250 chipset. | ||
2324 | |||
2325 | To compile this driver as a module, choose M here. The module | ||
2326 | will be called jme. | ||
2327 | |||
2305 | endif # NETDEV_1000 | 2328 | endif # NETDEV_1000 |
2306 | 2329 | ||
2307 | # | 2330 | # |
@@ -2377,6 +2400,13 @@ config EHEA | |||
2377 | To compile the driver as a module, choose M here. The module | 2400 | To compile the driver as a module, choose M here. The module |
2378 | will be called ehea. | 2401 | will be called ehea. |
2379 | 2402 | ||
2403 | config ENIC | ||
2404 | tristate "E, the Cisco 10G Ethernet NIC" | ||
2405 | depends on PCI && INET | ||
2406 | select INET_LRO | ||
2407 | help | ||
2408 | This enables the support for the Cisco 10G Ethernet card. | ||
2409 | |||
2380 | config IXGBE | 2410 | config IXGBE |
2381 | tristate "Intel(R) 10GbE PCI Express adapters support" | 2411 | tristate "Intel(R) 10GbE PCI Express adapters support" |
2382 | depends on PCI && INET | 2412 | depends on PCI && INET |
@@ -2496,6 +2526,15 @@ config BNX2X | |||
2496 | To compile this driver as a module, choose M here: the module | 2526 | To compile this driver as a module, choose M here: the module |
2497 | will be called bnx2x. This is recommended. | 2527 | will be called bnx2x. This is recommended. |
2498 | 2528 | ||
2529 | config QLGE | ||
2530 | tristate "QLogic QLGE 10Gb Ethernet Driver Support" | ||
2531 | depends on PCI | ||
2532 | help | ||
2533 | This driver supports QLogic ISP8XXX 10Gb Ethernet cards. | ||
2534 | |||
2535 | To compile this driver as a module, choose M here: the module | ||
2536 | will be called qlge. | ||
2537 | |||
2499 | source "drivers/net/sfc/Kconfig" | 2538 | source "drivers/net/sfc/Kconfig" |
2500 | 2539 | ||
2501 | endif # NETDEV_10000 | 2540 | endif # NETDEV_10000 |
diff --git a/drivers/net/Makefile b/drivers/net/Makefile index f66b79bd3b89..fa2510b2e609 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile | |||
@@ -15,9 +15,12 @@ obj-$(CONFIG_EHEA) += ehea/ | |||
15 | obj-$(CONFIG_CAN) += can/ | 15 | obj-$(CONFIG_CAN) += can/ |
16 | obj-$(CONFIG_BONDING) += bonding/ | 16 | obj-$(CONFIG_BONDING) += bonding/ |
17 | obj-$(CONFIG_ATL1) += atlx/ | 17 | obj-$(CONFIG_ATL1) += atlx/ |
18 | obj-$(CONFIG_ATL2) += atlx/ | ||
18 | obj-$(CONFIG_ATL1E) += atl1e/ | 19 | obj-$(CONFIG_ATL1E) += atl1e/ |
19 | obj-$(CONFIG_GIANFAR) += gianfar_driver.o | 20 | obj-$(CONFIG_GIANFAR) += gianfar_driver.o |
20 | obj-$(CONFIG_TEHUTI) += tehuti.o | 21 | obj-$(CONFIG_TEHUTI) += tehuti.o |
22 | obj-$(CONFIG_ENIC) += enic/ | ||
23 | obj-$(CONFIG_JME) += jme.o | ||
21 | 24 | ||
22 | gianfar_driver-objs := gianfar.o \ | 25 | gianfar_driver-objs := gianfar.o \ |
23 | gianfar_ethtool.o \ | 26 | gianfar_ethtool.o \ |
@@ -128,6 +131,7 @@ obj-$(CONFIG_AX88796) += ax88796.o | |||
128 | obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o | 131 | obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o |
129 | obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o | 132 | obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o |
130 | obj-$(CONFIG_QLA3XXX) += qla3xxx.o | 133 | obj-$(CONFIG_QLA3XXX) += qla3xxx.o |
134 | obj-$(CONFIG_QLGE) += qlge/ | ||
131 | 135 | ||
132 | obj-$(CONFIG_PPP) += ppp_generic.o | 136 | obj-$(CONFIG_PPP) += ppp_generic.o |
133 | obj-$(CONFIG_PPP_ASYNC) += ppp_async.o | 137 | obj-$(CONFIG_PPP_ASYNC) += ppp_async.o |
diff --git a/drivers/net/atlx/Makefile b/drivers/net/atlx/Makefile index ca45553a040d..e4f6022ca552 100644 --- a/drivers/net/atlx/Makefile +++ b/drivers/net/atlx/Makefile | |||
@@ -1 +1,3 @@ | |||
1 | obj-$(CONFIG_ATL1) += atl1.o | 1 | obj-$(CONFIG_ATL1) += atl1.o |
2 | obj-$(CONFIG_ATL2) += atl2.o | ||
3 | |||
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c new file mode 100644 index 000000000000..d548a67da1e8 --- /dev/null +++ b/drivers/net/atlx/atl2.c | |||
@@ -0,0 +1,3127 @@ | |||
1 | /* | ||
2 | * Copyright(c) 2006 - 2007 Atheros Corporation. All rights reserved. | ||
3 | * Copyright(c) 2007 - 2008 Chris Snook <csnook@redhat.com> | ||
4 | * | ||
5 | * Derived from Intel e1000 driver | ||
6 | * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the Free | ||
10 | * Software Foundation; either version 2 of the License, or (at your option) | ||
11 | * any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
16 | * more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License along with | ||
19 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
20 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
21 | */ | ||
22 | |||
23 | #include <asm/atomic.h> | ||
24 | #include <linux/crc32.h> | ||
25 | #include <linux/dma-mapping.h> | ||
26 | #include <linux/etherdevice.h> | ||
27 | #include <linux/ethtool.h> | ||
28 | #include <linux/hardirq.h> | ||
29 | #include <linux/if_vlan.h> | ||
30 | #include <linux/in.h> | ||
31 | #include <linux/interrupt.h> | ||
32 | #include <linux/ip.h> | ||
33 | #include <linux/irqflags.h> | ||
34 | #include <linux/irqreturn.h> | ||
35 | #include <linux/mii.h> | ||
36 | #include <linux/net.h> | ||
37 | #include <linux/netdevice.h> | ||
38 | #include <linux/pci.h> | ||
39 | #include <linux/pci_ids.h> | ||
40 | #include <linux/pm.h> | ||
41 | #include <linux/skbuff.h> | ||
42 | #include <linux/spinlock.h> | ||
43 | #include <linux/string.h> | ||
44 | #include <linux/tcp.h> | ||
45 | #include <linux/timer.h> | ||
46 | #include <linux/types.h> | ||
47 | #include <linux/workqueue.h> | ||
48 | |||
49 | #include "atl2.h" | ||
50 | |||
51 | #define ATL2_DRV_VERSION "2.2.3" | ||
52 | |||
53 | static char atl2_driver_name[] = "atl2"; | ||
54 | static const char atl2_driver_string[] = "Atheros(R) L2 Ethernet Driver"; | ||
55 | static char atl2_copyright[] = "Copyright (c) 2007 Atheros Corporation."; | ||
56 | static char atl2_driver_version[] = ATL2_DRV_VERSION; | ||
57 | |||
58 | MODULE_AUTHOR("Atheros Corporation <xiong.huang@atheros.com>, Chris Snook <csnook@redhat.com>"); | ||
59 | MODULE_DESCRIPTION("Atheros Fast Ethernet Network Driver"); | ||
60 | MODULE_LICENSE("GPL"); | ||
61 | MODULE_VERSION(ATL2_DRV_VERSION); | ||
62 | |||
63 | /* | ||
64 | * atl2_pci_tbl - PCI Device ID Table | ||
65 | */ | ||
66 | static struct pci_device_id atl2_pci_tbl[] = { | ||
67 | {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2)}, | ||
68 | /* required last entry */ | ||
69 | {0,} | ||
70 | }; | ||
71 | MODULE_DEVICE_TABLE(pci, atl2_pci_tbl); | ||
72 | |||
73 | static void atl2_set_ethtool_ops(struct net_device *netdev); | ||
74 | |||
75 | static void atl2_check_options(struct atl2_adapter *adapter); | ||
76 | |||
77 | /* | ||
78 | * atl2_sw_init - Initialize general software structures (struct atl2_adapter) | ||
79 | * @adapter: board private structure to initialize | ||
80 | * | ||
81 | * atl2_sw_init initializes the Adapter private data structure. | ||
82 | * Fields are initialized based on PCI device information and | ||
83 | * OS network device settings (MTU size). | ||
84 | */ | ||
85 | static int __devinit atl2_sw_init(struct atl2_adapter *adapter) | ||
86 | { | ||
87 | struct atl2_hw *hw = &adapter->hw; | ||
88 | struct pci_dev *pdev = adapter->pdev; | ||
89 | |||
90 | /* PCI config space info */ | ||
91 | hw->vendor_id = pdev->vendor; | ||
92 | hw->device_id = pdev->device; | ||
93 | hw->subsystem_vendor_id = pdev->subsystem_vendor; | ||
94 | hw->subsystem_id = pdev->subsystem_device; | ||
95 | |||
96 | pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); | ||
97 | pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); | ||
98 | |||
99 | adapter->wol = 0; | ||
100 | adapter->ict = 50000; /* ~100ms */ | ||
101 | adapter->link_speed = SPEED_0; /* hardware init */ | ||
102 | adapter->link_duplex = FULL_DUPLEX; | ||
103 | |||
104 | hw->phy_configured = false; | ||
105 | hw->preamble_len = 7; | ||
106 | hw->ipgt = 0x60; | ||
107 | hw->min_ifg = 0x50; | ||
108 | hw->ipgr1 = 0x40; | ||
109 | hw->ipgr2 = 0x60; | ||
110 | hw->retry_buf = 2; | ||
111 | hw->max_retry = 0xf; | ||
112 | hw->lcol = 0x37; | ||
113 | hw->jam_ipg = 7; | ||
114 | hw->fc_rxd_hi = 0; | ||
115 | hw->fc_rxd_lo = 0; | ||
116 | hw->max_frame_size = adapter->netdev->mtu; | ||
117 | |||
118 | spin_lock_init(&adapter->stats_lock); | ||
119 | spin_lock_init(&adapter->tx_lock); | ||
120 | |||
121 | set_bit(__ATL2_DOWN, &adapter->flags); | ||
122 | |||
123 | return 0; | ||
124 | } | ||
125 | |||
126 | /* | ||
127 | * atl2_set_multi - Multicast and Promiscuous mode set | ||
128 | * @netdev: network interface device structure | ||
129 | * | ||
130 | * The set_multi entry point is called whenever the multicast address | ||
131 | * list or the network interface flags are updated. This routine is | ||
132 | * responsible for configuring the hardware for proper multicast, | ||
133 | * promiscuous mode, and all-multi behavior. | ||
134 | */ | ||
135 | static void atl2_set_multi(struct net_device *netdev) | ||
136 | { | ||
137 | struct atl2_adapter *adapter = netdev_priv(netdev); | ||
138 | struct atl2_hw *hw = &adapter->hw; | ||
139 | struct dev_mc_list *mc_ptr; | ||
140 | u32 rctl; | ||
141 | u32 hash_value; | ||
142 | |||
143 | /* Check for Promiscuous and All Multicast modes */ | ||
144 | rctl = ATL2_READ_REG(hw, REG_MAC_CTRL); | ||
145 | |||
146 | if (netdev->flags & IFF_PROMISC) { | ||
147 | rctl |= MAC_CTRL_PROMIS_EN; | ||
148 | } else if (netdev->flags & IFF_ALLMULTI) { | ||
149 | rctl |= MAC_CTRL_MC_ALL_EN; | ||
150 | rctl &= ~MAC_CTRL_PROMIS_EN; | ||
151 | } else | ||
152 | rctl &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN); | ||
153 | |||
154 | ATL2_WRITE_REG(hw, REG_MAC_CTRL, rctl); | ||
155 | |||
156 | /* clear the old settings from the multicast hash table */ | ||
157 | ATL2_WRITE_REG(hw, REG_RX_HASH_TABLE, 0); | ||
158 | ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0); | ||
159 | |||
160 | /* comoute mc addresses' hash value ,and put it into hash table */ | ||
161 | for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) { | ||
162 | hash_value = atl2_hash_mc_addr(hw, mc_ptr->dmi_addr); | ||
163 | atl2_hash_set(hw, hash_value); | ||
164 | } | ||
165 | } | ||
166 | |||
167 | static void init_ring_ptrs(struct atl2_adapter *adapter) | ||
168 | { | ||
169 | /* Read / Write Ptr Initialize: */ | ||
170 | adapter->txd_write_ptr = 0; | ||
171 | atomic_set(&adapter->txd_read_ptr, 0); | ||
172 | |||
173 | adapter->rxd_read_ptr = 0; | ||
174 | adapter->rxd_write_ptr = 0; | ||
175 | |||
176 | atomic_set(&adapter->txs_write_ptr, 0); | ||
177 | adapter->txs_next_clear = 0; | ||
178 | } | ||
179 | |||
180 | /* | ||
181 | * atl2_configure - Configure Transmit&Receive Unit after Reset | ||
182 | * @adapter: board private structure | ||
183 | * | ||
184 | * Configure the Tx /Rx unit of the MAC after a reset. | ||
185 | */ | ||
186 | static int atl2_configure(struct atl2_adapter *adapter) | ||
187 | { | ||
188 | struct atl2_hw *hw = &adapter->hw; | ||
189 | u32 value; | ||
190 | |||
191 | /* clear interrupt status */ | ||
192 | ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0xffffffff); | ||
193 | |||
194 | /* set MAC Address */ | ||
195 | value = (((u32)hw->mac_addr[2]) << 24) | | ||
196 | (((u32)hw->mac_addr[3]) << 16) | | ||
197 | (((u32)hw->mac_addr[4]) << 8) | | ||
198 | (((u32)hw->mac_addr[5])); | ||
199 | ATL2_WRITE_REG(hw, REG_MAC_STA_ADDR, value); | ||
200 | value = (((u32)hw->mac_addr[0]) << 8) | | ||
201 | (((u32)hw->mac_addr[1])); | ||
202 | ATL2_WRITE_REG(hw, (REG_MAC_STA_ADDR+4), value); | ||
203 | |||
204 | /* HI base address */ | ||
205 | ATL2_WRITE_REG(hw, REG_DESC_BASE_ADDR_HI, | ||
206 | (u32)((adapter->ring_dma & 0xffffffff00000000ULL) >> 32)); | ||
207 | |||
208 | /* LO base address */ | ||
209 | ATL2_WRITE_REG(hw, REG_TXD_BASE_ADDR_LO, | ||
210 | (u32)(adapter->txd_dma & 0x00000000ffffffffULL)); | ||
211 | ATL2_WRITE_REG(hw, REG_TXS_BASE_ADDR_LO, | ||
212 | (u32)(adapter->txs_dma & 0x00000000ffffffffULL)); | ||
213 | ATL2_WRITE_REG(hw, REG_RXD_BASE_ADDR_LO, | ||
214 | (u32)(adapter->rxd_dma & 0x00000000ffffffffULL)); | ||
215 | |||
216 | /* element count */ | ||
217 | ATL2_WRITE_REGW(hw, REG_TXD_MEM_SIZE, (u16)(adapter->txd_ring_size/4)); | ||
218 | ATL2_WRITE_REGW(hw, REG_TXS_MEM_SIZE, (u16)adapter->txs_ring_size); | ||
219 | ATL2_WRITE_REGW(hw, REG_RXD_BUF_NUM, (u16)adapter->rxd_ring_size); | ||
220 | |||
221 | /* config Internal SRAM */ | ||
222 | /* | ||
223 | ATL2_WRITE_REGW(hw, REG_SRAM_TXRAM_END, sram_tx_end); | ||
224 | ATL2_WRITE_REGW(hw, REG_SRAM_TXRAM_END, sram_rx_end); | ||
225 | */ | ||
226 | |||
227 | /* config IPG/IFG */ | ||
228 | value = (((u32)hw->ipgt & MAC_IPG_IFG_IPGT_MASK) << | ||
229 | MAC_IPG_IFG_IPGT_SHIFT) | | ||
230 | (((u32)hw->min_ifg & MAC_IPG_IFG_MIFG_MASK) << | ||
231 | MAC_IPG_IFG_MIFG_SHIFT) | | ||
232 | (((u32)hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK) << | ||
233 | MAC_IPG_IFG_IPGR1_SHIFT)| | ||
234 | (((u32)hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK) << | ||
235 | MAC_IPG_IFG_IPGR2_SHIFT); | ||
236 | ATL2_WRITE_REG(hw, REG_MAC_IPG_IFG, value); | ||
237 | |||
238 | /* config Half-Duplex Control */ | ||
239 | value = ((u32)hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) | | ||
240 | (((u32)hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK) << | ||
241 | MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) | | ||
242 | MAC_HALF_DUPLX_CTRL_EXC_DEF_EN | | ||
243 | (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) | | ||
244 | (((u32)hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK) << | ||
245 | MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT); | ||
246 | ATL2_WRITE_REG(hw, REG_MAC_HALF_DUPLX_CTRL, value); | ||
247 | |||
248 | /* set Interrupt Moderator Timer */ | ||
249 | ATL2_WRITE_REGW(hw, REG_IRQ_MODU_TIMER_INIT, adapter->imt); | ||
250 | ATL2_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_ITIMER_EN); | ||
251 | |||
252 | /* set Interrupt Clear Timer */ | ||
253 | ATL2_WRITE_REGW(hw, REG_CMBDISDMA_TIMER, adapter->ict); | ||
254 | |||
255 | /* set MTU */ | ||
256 | ATL2_WRITE_REG(hw, REG_MTU, adapter->netdev->mtu + | ||
257 | ENET_HEADER_SIZE + VLAN_SIZE + ETHERNET_FCS_SIZE); | ||
258 | |||
259 | /* 1590 */ | ||
260 | ATL2_WRITE_REG(hw, REG_TX_CUT_THRESH, 0x177); | ||
261 | |||
262 | /* flow control */ | ||
263 | ATL2_WRITE_REGW(hw, REG_PAUSE_ON_TH, hw->fc_rxd_hi); | ||
264 | ATL2_WRITE_REGW(hw, REG_PAUSE_OFF_TH, hw->fc_rxd_lo); | ||
265 | |||
266 | /* Init mailbox */ | ||
267 | ATL2_WRITE_REGW(hw, REG_MB_TXD_WR_IDX, (u16)adapter->txd_write_ptr); | ||
268 | ATL2_WRITE_REGW(hw, REG_MB_RXD_RD_IDX, (u16)adapter->rxd_read_ptr); | ||
269 | |||
270 | /* enable DMA read/write */ | ||
271 | ATL2_WRITE_REGB(hw, REG_DMAR, DMAR_EN); | ||
272 | ATL2_WRITE_REGB(hw, REG_DMAW, DMAW_EN); | ||
273 | |||
274 | value = ATL2_READ_REG(&adapter->hw, REG_ISR); | ||
275 | if ((value & ISR_PHY_LINKDOWN) != 0) | ||
276 | value = 1; /* config failed */ | ||
277 | else | ||
278 | value = 0; | ||
279 | |||
280 | /* clear all interrupt status */ | ||
281 | ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0x3fffffff); | ||
282 | ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0); | ||
283 | return value; | ||
284 | } | ||
285 | |||
286 | /* | ||
287 | * atl2_setup_ring_resources - allocate Tx / RX descriptor resources | ||
288 | * @adapter: board private structure | ||
289 | * | ||
290 | * Return 0 on success, negative on failure | ||
291 | */ | ||
292 | static s32 atl2_setup_ring_resources(struct atl2_adapter *adapter) | ||
293 | { | ||
294 | struct pci_dev *pdev = adapter->pdev; | ||
295 | int size; | ||
296 | u8 offset = 0; | ||
297 | |||
298 | /* real ring DMA buffer */ | ||
299 | adapter->ring_size = size = | ||
300 | adapter->txd_ring_size * 1 + 7 + /* dword align */ | ||
301 | adapter->txs_ring_size * 4 + 7 + /* dword align */ | ||
302 | adapter->rxd_ring_size * 1536 + 127; /* 128bytes align */ | ||
303 | |||
304 | adapter->ring_vir_addr = pci_alloc_consistent(pdev, size, | ||
305 | &adapter->ring_dma); | ||
306 | if (!adapter->ring_vir_addr) | ||
307 | return -ENOMEM; | ||
308 | memset(adapter->ring_vir_addr, 0, adapter->ring_size); | ||
309 | |||
310 | /* Init TXD Ring */ | ||
311 | adapter->txd_dma = adapter->ring_dma ; | ||
312 | offset = (adapter->txd_dma & 0x7) ? (8 - (adapter->txd_dma & 0x7)) : 0; | ||
313 | adapter->txd_dma += offset; | ||
314 | adapter->txd_ring = (struct tx_pkt_header *) (adapter->ring_vir_addr + | ||
315 | offset); | ||
316 | |||
317 | /* Init TXS Ring */ | ||
318 | adapter->txs_dma = adapter->txd_dma + adapter->txd_ring_size; | ||
319 | offset = (adapter->txs_dma & 0x7) ? (8 - (adapter->txs_dma & 0x7)) : 0; | ||
320 | adapter->txs_dma += offset; | ||
321 | adapter->txs_ring = (struct tx_pkt_status *) | ||
322 | (((u8 *)adapter->txd_ring) + (adapter->txd_ring_size + offset)); | ||
323 | |||
324 | /* Init RXD Ring */ | ||
325 | adapter->rxd_dma = adapter->txs_dma + adapter->txs_ring_size * 4; | ||
326 | offset = (adapter->rxd_dma & 127) ? | ||
327 | (128 - (adapter->rxd_dma & 127)) : 0; | ||
328 | if (offset > 7) | ||
329 | offset -= 8; | ||
330 | else | ||
331 | offset += (128 - 8); | ||
332 | |||
333 | adapter->rxd_dma += offset; | ||
334 | adapter->rxd_ring = (struct rx_desc *) (((u8 *)adapter->txs_ring) + | ||
335 | (adapter->txs_ring_size * 4 + offset)); | ||
336 | |||
337 | /* | ||
338 | * Read / Write Ptr Initialize: | ||
339 | * init_ring_ptrs(adapter); | ||
340 | */ | ||
341 | return 0; | ||
342 | } | ||
343 | |||
344 | /* | ||
345 | * atl2_irq_enable - Enable default interrupt generation settings | ||
346 | * @adapter: board private structure | ||
347 | */ | ||
348 | static inline void atl2_irq_enable(struct atl2_adapter *adapter) | ||
349 | { | ||
350 | ATL2_WRITE_REG(&adapter->hw, REG_IMR, IMR_NORMAL_MASK); | ||
351 | ATL2_WRITE_FLUSH(&adapter->hw); | ||
352 | } | ||
353 | |||
354 | /* | ||
355 | * atl2_irq_disable - Mask off interrupt generation on the NIC | ||
356 | * @adapter: board private structure | ||
357 | */ | ||
358 | static inline void atl2_irq_disable(struct atl2_adapter *adapter) | ||
359 | { | ||
360 | ATL2_WRITE_REG(&adapter->hw, REG_IMR, 0); | ||
361 | ATL2_WRITE_FLUSH(&adapter->hw); | ||
362 | synchronize_irq(adapter->pdev->irq); | ||
363 | } | ||
364 | |||
365 | #ifdef NETIF_F_HW_VLAN_TX | ||
366 | static void atl2_vlan_rx_register(struct net_device *netdev, | ||
367 | struct vlan_group *grp) | ||
368 | { | ||
369 | struct atl2_adapter *adapter = netdev_priv(netdev); | ||
370 | u32 ctrl; | ||
371 | |||
372 | atl2_irq_disable(adapter); | ||
373 | adapter->vlgrp = grp; | ||
374 | |||
375 | if (grp) { | ||
376 | /* enable VLAN tag insert/strip */ | ||
377 | ctrl = ATL2_READ_REG(&adapter->hw, REG_MAC_CTRL); | ||
378 | ctrl |= MAC_CTRL_RMV_VLAN; | ||
379 | ATL2_WRITE_REG(&adapter->hw, REG_MAC_CTRL, ctrl); | ||
380 | } else { | ||
381 | /* disable VLAN tag insert/strip */ | ||
382 | ctrl = ATL2_READ_REG(&adapter->hw, REG_MAC_CTRL); | ||
383 | ctrl &= ~MAC_CTRL_RMV_VLAN; | ||
384 | ATL2_WRITE_REG(&adapter->hw, REG_MAC_CTRL, ctrl); | ||
385 | } | ||
386 | |||
387 | atl2_irq_enable(adapter); | ||
388 | } | ||
389 | |||
390 | static void atl2_restore_vlan(struct atl2_adapter *adapter) | ||
391 | { | ||
392 | atl2_vlan_rx_register(adapter->netdev, adapter->vlgrp); | ||
393 | } | ||
394 | #endif | ||
395 | |||
396 | static void atl2_intr_rx(struct atl2_adapter *adapter) | ||
397 | { | ||
398 | struct net_device *netdev = adapter->netdev; | ||
399 | struct rx_desc *rxd; | ||
400 | struct sk_buff *skb; | ||
401 | |||
402 | do { | ||
403 | rxd = adapter->rxd_ring+adapter->rxd_write_ptr; | ||
404 | if (!rxd->status.update) | ||
405 | break; /* end of tx */ | ||
406 | |||
407 | /* clear this flag at once */ | ||
408 | rxd->status.update = 0; | ||
409 | |||
410 | if (rxd->status.ok && rxd->status.pkt_size >= 60) { | ||
411 | int rx_size = (int)(rxd->status.pkt_size - 4); | ||
412 | /* alloc new buffer */ | ||
413 | skb = netdev_alloc_skb(netdev, rx_size + NET_IP_ALIGN); | ||
414 | if (NULL == skb) { | ||
415 | printk(KERN_WARNING | ||
416 | "%s: Mem squeeze, deferring packet.\n", | ||
417 | netdev->name); | ||
418 | /* | ||
419 | * Check that some rx space is free. If not, | ||
420 | * free one and mark stats->rx_dropped++. | ||
421 | */ | ||
422 | adapter->net_stats.rx_dropped++; | ||
423 | break; | ||
424 | } | ||
425 | skb_reserve(skb, NET_IP_ALIGN); | ||
426 | skb->dev = netdev; | ||
427 | memcpy(skb->data, rxd->packet, rx_size); | ||
428 | skb_put(skb, rx_size); | ||
429 | skb->protocol = eth_type_trans(skb, netdev); | ||
430 | #ifdef NETIF_F_HW_VLAN_TX | ||
431 | if (adapter->vlgrp && (rxd->status.vlan)) { | ||
432 | u16 vlan_tag = (rxd->status.vtag>>4) | | ||
433 | ((rxd->status.vtag&7) << 13) | | ||
434 | ((rxd->status.vtag&8) << 9); | ||
435 | vlan_hwaccel_rx(skb, adapter->vlgrp, vlan_tag); | ||
436 | } else | ||
437 | #endif | ||
438 | netif_rx(skb); | ||
439 | adapter->net_stats.rx_bytes += rx_size; | ||
440 | adapter->net_stats.rx_packets++; | ||
441 | netdev->last_rx = jiffies; | ||
442 | } else { | ||
443 | adapter->net_stats.rx_errors++; | ||
444 | |||
445 | if (rxd->status.ok && rxd->status.pkt_size <= 60) | ||
446 | adapter->net_stats.rx_length_errors++; | ||
447 | if (rxd->status.mcast) | ||
448 | adapter->net_stats.multicast++; | ||
449 | if (rxd->status.crc) | ||
450 | adapter->net_stats.rx_crc_errors++; | ||
451 | if (rxd->status.align) | ||
452 | adapter->net_stats.rx_frame_errors++; | ||
453 | } | ||
454 | |||
455 | /* advance write ptr */ | ||
456 | if (++adapter->rxd_write_ptr == adapter->rxd_ring_size) | ||
457 | adapter->rxd_write_ptr = 0; | ||
458 | } while (1); | ||
459 | |||
460 | /* update mailbox? */ | ||
461 | adapter->rxd_read_ptr = adapter->rxd_write_ptr; | ||
462 | ATL2_WRITE_REGW(&adapter->hw, REG_MB_RXD_RD_IDX, adapter->rxd_read_ptr); | ||
463 | } | ||
464 | |||
465 | static void atl2_intr_tx(struct atl2_adapter *adapter) | ||
466 | { | ||
467 | u32 txd_read_ptr; | ||
468 | u32 txs_write_ptr; | ||
469 | struct tx_pkt_status *txs; | ||
470 | struct tx_pkt_header *txph; | ||
471 | int free_hole = 0; | ||
472 | |||
473 | do { | ||
474 | txs_write_ptr = (u32) atomic_read(&adapter->txs_write_ptr); | ||
475 | txs = adapter->txs_ring + txs_write_ptr; | ||
476 | if (!txs->update) | ||
477 | break; /* tx stop here */ | ||
478 | |||
479 | free_hole = 1; | ||
480 | txs->update = 0; | ||
481 | |||
482 | if (++txs_write_ptr == adapter->txs_ring_size) | ||
483 | txs_write_ptr = 0; | ||
484 | atomic_set(&adapter->txs_write_ptr, (int)txs_write_ptr); | ||
485 | |||
486 | txd_read_ptr = (u32) atomic_read(&adapter->txd_read_ptr); | ||
487 | txph = (struct tx_pkt_header *) | ||
488 | (((u8 *)adapter->txd_ring) + txd_read_ptr); | ||
489 | |||
490 | if (txph->pkt_size != txs->pkt_size) { | ||
491 | struct tx_pkt_status *old_txs = txs; | ||
492 | printk(KERN_WARNING | ||
493 | "%s: txs packet size not consistent with txd" | ||
494 | " txd_:0x%08x, txs_:0x%08x!\n", | ||
495 | adapter->netdev->name, | ||
496 | *(u32 *)txph, *(u32 *)txs); | ||
497 | printk(KERN_WARNING | ||
498 | "txd read ptr: 0x%x\n", | ||
499 | txd_read_ptr); | ||
500 | txs = adapter->txs_ring + txs_write_ptr; | ||
501 | printk(KERN_WARNING | ||
502 | "txs-behind:0x%08x\n", | ||
503 | *(u32 *)txs); | ||
504 | if (txs_write_ptr < 2) { | ||
505 | txs = adapter->txs_ring + | ||
506 | (adapter->txs_ring_size + | ||
507 | txs_write_ptr - 2); | ||
508 | } else { | ||
509 | txs = adapter->txs_ring + (txs_write_ptr - 2); | ||
510 | } | ||
511 | printk(KERN_WARNING | ||
512 | "txs-before:0x%08x\n", | ||
513 | *(u32 *)txs); | ||
514 | txs = old_txs; | ||
515 | } | ||
516 | |||
517 | /* 4for TPH */ | ||
518 | txd_read_ptr += (((u32)(txph->pkt_size) + 7) & ~3); | ||
519 | if (txd_read_ptr >= adapter->txd_ring_size) | ||
520 | txd_read_ptr -= adapter->txd_ring_size; | ||
521 | |||
522 | atomic_set(&adapter->txd_read_ptr, (int)txd_read_ptr); | ||
523 | |||
524 | /* tx statistics: */ | ||
525 | if (txs->ok) | ||
526 | adapter->net_stats.tx_packets++; | ||
527 | else | ||
528 | adapter->net_stats.tx_errors++; | ||
529 | |||
530 | if (txs->defer) | ||
531 | adapter->net_stats.collisions++; | ||
532 | if (txs->abort_col) | ||
533 | adapter->net_stats.tx_aborted_errors++; | ||
534 | if (txs->late_col) | ||
535 | adapter->net_stats.tx_window_errors++; | ||
536 | if (txs->underun) | ||
537 | adapter->net_stats.tx_fifo_errors++; | ||
538 | } while (1); | ||
539 | |||
540 | if (free_hole) { | ||
541 | if (netif_queue_stopped(adapter->netdev) && | ||
542 | netif_carrier_ok(adapter->netdev)) | ||
543 | netif_wake_queue(adapter->netdev); | ||
544 | } | ||
545 | } | ||
546 | |||
547 | static void atl2_check_for_link(struct atl2_adapter *adapter) | ||
548 | { | ||
549 | struct net_device *netdev = adapter->netdev; | ||
550 | u16 phy_data = 0; | ||
551 | |||
552 | spin_lock(&adapter->stats_lock); | ||
553 | atl2_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); | ||
554 | atl2_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); | ||
555 | spin_unlock(&adapter->stats_lock); | ||
556 | |||
557 | /* notify upper layer link down ASAP */ | ||
558 | if (!(phy_data & BMSR_LSTATUS)) { /* Link Down */ | ||
559 | if (netif_carrier_ok(netdev)) { /* old link state: Up */ | ||
560 | printk(KERN_INFO "%s: %s NIC Link is Down\n", | ||
561 | atl2_driver_name, netdev->name); | ||
562 | adapter->link_speed = SPEED_0; | ||
563 | netif_carrier_off(netdev); | ||
564 | netif_stop_queue(netdev); | ||
565 | } | ||
566 | } | ||
567 | schedule_work(&adapter->link_chg_task); | ||
568 | } | ||
569 | |||
570 | static inline void atl2_clear_phy_int(struct atl2_adapter *adapter) | ||
571 | { | ||
572 | u16 phy_data; | ||
573 | spin_lock(&adapter->stats_lock); | ||
574 | atl2_read_phy_reg(&adapter->hw, 19, &phy_data); | ||
575 | spin_unlock(&adapter->stats_lock); | ||
576 | } | ||
577 | |||
578 | /* | ||
579 | * atl2_intr - Interrupt Handler | ||
580 | * @irq: interrupt number | ||
581 | * @data: pointer to a network interface device structure | ||
582 | * @pt_regs: CPU registers structure | ||
583 | */ | ||
584 | static irqreturn_t atl2_intr(int irq, void *data) | ||
585 | { | ||
586 | struct atl2_adapter *adapter = netdev_priv(data); | ||
587 | struct atl2_hw *hw = &adapter->hw; | ||
588 | u32 status; | ||
589 | |||
590 | status = ATL2_READ_REG(hw, REG_ISR); | ||
591 | if (0 == status) | ||
592 | return IRQ_NONE; | ||
593 | |||
594 | /* link event */ | ||
595 | if (status & ISR_PHY) | ||
596 | atl2_clear_phy_int(adapter); | ||
597 | |||
598 | /* clear ISR status, and Enable CMB DMA/Disable Interrupt */ | ||
599 | ATL2_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT); | ||
600 | |||
601 | /* check if PCIE PHY Link down */ | ||
602 | if (status & ISR_PHY_LINKDOWN) { | ||
603 | if (netif_running(adapter->netdev)) { /* reset MAC */ | ||
604 | ATL2_WRITE_REG(hw, REG_ISR, 0); | ||
605 | ATL2_WRITE_REG(hw, REG_IMR, 0); | ||
606 | ATL2_WRITE_FLUSH(hw); | ||
607 | schedule_work(&adapter->reset_task); | ||
608 | return IRQ_HANDLED; | ||
609 | } | ||
610 | } | ||
611 | |||
612 | /* check if DMA read/write error? */ | ||
613 | if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { | ||
614 | ATL2_WRITE_REG(hw, REG_ISR, 0); | ||
615 | ATL2_WRITE_REG(hw, REG_IMR, 0); | ||
616 | ATL2_WRITE_FLUSH(hw); | ||
617 | schedule_work(&adapter->reset_task); | ||
618 | return IRQ_HANDLED; | ||
619 | } | ||
620 | |||
621 | /* link event */ | ||
622 | if (status & (ISR_PHY | ISR_MANUAL)) { | ||
623 | adapter->net_stats.tx_carrier_errors++; | ||
624 | atl2_check_for_link(adapter); | ||
625 | } | ||
626 | |||
627 | /* transmit event */ | ||
628 | if (status & ISR_TX_EVENT) | ||
629 | atl2_intr_tx(adapter); | ||
630 | |||
631 | /* rx exception */ | ||
632 | if (status & ISR_RX_EVENT) | ||
633 | atl2_intr_rx(adapter); | ||
634 | |||
635 | /* re-enable Interrupt */ | ||
636 | ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0); | ||
637 | return IRQ_HANDLED; | ||
638 | } | ||
639 | |||
640 | static int atl2_request_irq(struct atl2_adapter *adapter) | ||
641 | { | ||
642 | struct net_device *netdev = adapter->netdev; | ||
643 | int flags, err = 0; | ||
644 | |||
645 | flags = IRQF_SHARED; | ||
646 | #ifdef CONFIG_PCI_MSI | ||
647 | adapter->have_msi = true; | ||
648 | err = pci_enable_msi(adapter->pdev); | ||
649 | if (err) | ||
650 | adapter->have_msi = false; | ||
651 | |||
652 | if (adapter->have_msi) | ||
653 | flags &= ~IRQF_SHARED; | ||
654 | #endif | ||
655 | |||
656 | return request_irq(adapter->pdev->irq, &atl2_intr, flags, netdev->name, | ||
657 | netdev); | ||
658 | } | ||
659 | |||
660 | /* | ||
661 | * atl2_free_ring_resources - Free Tx / RX descriptor Resources | ||
662 | * @adapter: board private structure | ||
663 | * | ||
664 | * Free all transmit software resources | ||
665 | */ | ||
666 | static void atl2_free_ring_resources(struct atl2_adapter *adapter) | ||
667 | { | ||
668 | struct pci_dev *pdev = adapter->pdev; | ||
669 | pci_free_consistent(pdev, adapter->ring_size, adapter->ring_vir_addr, | ||
670 | adapter->ring_dma); | ||
671 | } | ||
672 | |||
673 | /* | ||
674 | * atl2_open - Called when a network interface is made active | ||
675 | * @netdev: network interface device structure | ||
676 | * | ||
677 | * Returns 0 on success, negative value on failure | ||
678 | * | ||
679 | * The open entry point is called when a network interface is made | ||
680 | * active by the system (IFF_UP). At this point all resources needed | ||
681 | * for transmit and receive operations are allocated, the interrupt | ||
682 | * handler is registered with the OS, the watchdog timer is started, | ||
683 | * and the stack is notified that the interface is ready. | ||
684 | */ | ||
685 | static int atl2_open(struct net_device *netdev) | ||
686 | { | ||
687 | struct atl2_adapter *adapter = netdev_priv(netdev); | ||
688 | int err; | ||
689 | u32 val; | ||
690 | |||
691 | /* disallow open during test */ | ||
692 | if (test_bit(__ATL2_TESTING, &adapter->flags)) | ||
693 | return -EBUSY; | ||
694 | |||
695 | /* allocate transmit descriptors */ | ||
696 | err = atl2_setup_ring_resources(adapter); | ||
697 | if (err) | ||
698 | return err; | ||
699 | |||
700 | err = atl2_init_hw(&adapter->hw); | ||
701 | if (err) { | ||
702 | err = -EIO; | ||
703 | goto err_init_hw; | ||
704 | } | ||
705 | |||
706 | /* hardware has been reset, we need to reload some things */ | ||
707 | atl2_set_multi(netdev); | ||
708 | init_ring_ptrs(adapter); | ||
709 | |||
710 | #ifdef NETIF_F_HW_VLAN_TX | ||
711 | atl2_restore_vlan(adapter); | ||
712 | #endif | ||
713 | |||
714 | if (atl2_configure(adapter)) { | ||
715 | err = -EIO; | ||
716 | goto err_config; | ||
717 | } | ||
718 | |||
719 | err = atl2_request_irq(adapter); | ||
720 | if (err) | ||
721 | goto err_req_irq; | ||
722 | |||
723 | clear_bit(__ATL2_DOWN, &adapter->flags); | ||
724 | |||
725 | mod_timer(&adapter->watchdog_timer, jiffies + 4*HZ); | ||
726 | |||
727 | val = ATL2_READ_REG(&adapter->hw, REG_MASTER_CTRL); | ||
728 | ATL2_WRITE_REG(&adapter->hw, REG_MASTER_CTRL, | ||
729 | val | MASTER_CTRL_MANUAL_INT); | ||
730 | |||
731 | atl2_irq_enable(adapter); | ||
732 | |||
733 | return 0; | ||
734 | |||
735 | err_init_hw: | ||
736 | err_req_irq: | ||
737 | err_config: | ||
738 | atl2_free_ring_resources(adapter); | ||
739 | atl2_reset_hw(&adapter->hw); | ||
740 | |||
741 | return err; | ||
742 | } | ||
743 | |||
744 | static void atl2_down(struct atl2_adapter *adapter) | ||
745 | { | ||
746 | struct net_device *netdev = adapter->netdev; | ||
747 | |||
748 | /* signal that we're down so the interrupt handler does not | ||
749 | * reschedule our watchdog timer */ | ||
750 | set_bit(__ATL2_DOWN, &adapter->flags); | ||
751 | |||
752 | #ifdef NETIF_F_LLTX | ||
753 | netif_stop_queue(netdev); | ||
754 | #else | ||
755 | netif_tx_disable(netdev); | ||
756 | #endif | ||
757 | |||
758 | /* reset MAC to disable all RX/TX */ | ||
759 | atl2_reset_hw(&adapter->hw); | ||
760 | msleep(1); | ||
761 | |||
762 | atl2_irq_disable(adapter); | ||
763 | |||
764 | del_timer_sync(&adapter->watchdog_timer); | ||
765 | del_timer_sync(&adapter->phy_config_timer); | ||
766 | clear_bit(0, &adapter->cfg_phy); | ||
767 | |||
768 | netif_carrier_off(netdev); | ||
769 | adapter->link_speed = SPEED_0; | ||
770 | adapter->link_duplex = -1; | ||
771 | } | ||
772 | |||
773 | static void atl2_free_irq(struct atl2_adapter *adapter) | ||
774 | { | ||
775 | struct net_device *netdev = adapter->netdev; | ||
776 | |||
777 | free_irq(adapter->pdev->irq, netdev); | ||
778 | |||
779 | #ifdef CONFIG_PCI_MSI | ||
780 | if (adapter->have_msi) | ||
781 | pci_disable_msi(adapter->pdev); | ||
782 | #endif | ||
783 | } | ||
784 | |||
785 | /* | ||
786 | * atl2_close - Disables a network interface | ||
787 | * @netdev: network interface device structure | ||
788 | * | ||
789 | * Returns 0, this is not allowed to fail | ||
790 | * | ||
791 | * The close entry point is called when an interface is de-activated | ||
792 | * by the OS. The hardware is still under the drivers control, but | ||
793 | * needs to be disabled. A global MAC reset is issued to stop the | ||
794 | * hardware, and all transmit and receive resources are freed. | ||
795 | */ | ||
796 | static int atl2_close(struct net_device *netdev) | ||
797 | { | ||
798 | struct atl2_adapter *adapter = netdev_priv(netdev); | ||
799 | |||
800 | WARN_ON(test_bit(__ATL2_RESETTING, &adapter->flags)); | ||
801 | |||
802 | atl2_down(adapter); | ||
803 | atl2_free_irq(adapter); | ||
804 | atl2_free_ring_resources(adapter); | ||
805 | |||
806 | return 0; | ||
807 | } | ||
808 | |||
809 | static inline int TxsFreeUnit(struct atl2_adapter *adapter) | ||
810 | { | ||
811 | u32 txs_write_ptr = (u32) atomic_read(&adapter->txs_write_ptr); | ||
812 | |||
813 | return (adapter->txs_next_clear >= txs_write_ptr) ? | ||
814 | (int) (adapter->txs_ring_size - adapter->txs_next_clear + | ||
815 | txs_write_ptr - 1) : | ||
816 | (int) (txs_write_ptr - adapter->txs_next_clear - 1); | ||
817 | } | ||
818 | |||
819 | static inline int TxdFreeBytes(struct atl2_adapter *adapter) | ||
820 | { | ||
821 | u32 txd_read_ptr = (u32)atomic_read(&adapter->txd_read_ptr); | ||
822 | |||
823 | return (adapter->txd_write_ptr >= txd_read_ptr) ? | ||
824 | (int) (adapter->txd_ring_size - adapter->txd_write_ptr + | ||
825 | txd_read_ptr - 1) : | ||
826 | (int) (txd_read_ptr - adapter->txd_write_ptr - 1); | ||
827 | } | ||
828 | |||
829 | static int atl2_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | ||
830 | { | ||
831 | struct atl2_adapter *adapter = netdev_priv(netdev); | ||
832 | unsigned long flags; | ||
833 | struct tx_pkt_header *txph; | ||
834 | u32 offset, copy_len; | ||
835 | int txs_unused; | ||
836 | int txbuf_unused; | ||
837 | |||
838 | if (test_bit(__ATL2_DOWN, &adapter->flags)) { | ||
839 | dev_kfree_skb_any(skb); | ||
840 | return NETDEV_TX_OK; | ||
841 | } | ||
842 | |||
843 | if (unlikely(skb->len <= 0)) { | ||
844 | dev_kfree_skb_any(skb); | ||
845 | return NETDEV_TX_OK; | ||
846 | } | ||
847 | |||
848 | #ifdef NETIF_F_LLTX | ||
849 | local_irq_save(flags); | ||
850 | if (!spin_trylock(&adapter->tx_lock)) { | ||
851 | /* Collision - tell upper layer to requeue */ | ||
852 | local_irq_restore(flags); | ||
853 | return NETDEV_TX_LOCKED; | ||
854 | } | ||
855 | #else | ||
856 | spin_lock_irqsave(&adapter->tx_lock, flags); | ||
857 | #endif | ||
858 | txs_unused = TxsFreeUnit(adapter); | ||
859 | txbuf_unused = TxdFreeBytes(adapter); | ||
860 | |||
861 | if (skb->len + sizeof(struct tx_pkt_header) + 4 > txbuf_unused || | ||
862 | txs_unused < 1) { | ||
863 | /* not enough resources */ | ||
864 | netif_stop_queue(netdev); | ||
865 | spin_unlock_irqrestore(&adapter->tx_lock, flags); | ||
866 | return NETDEV_TX_BUSY; | ||
867 | } | ||
868 | |||
869 | offset = adapter->txd_write_ptr; | ||
870 | |||
871 | txph = (struct tx_pkt_header *) (((u8 *)adapter->txd_ring) + offset); | ||
872 | |||
873 | *(u32 *)txph = 0; | ||
874 | txph->pkt_size = skb->len; | ||
875 | |||
876 | offset += 4; | ||
877 | if (offset >= adapter->txd_ring_size) | ||
878 | offset -= adapter->txd_ring_size; | ||
879 | copy_len = adapter->txd_ring_size - offset; | ||
880 | if (copy_len >= skb->len) { | ||
881 | memcpy(((u8 *)adapter->txd_ring) + offset, skb->data, skb->len); | ||
882 | offset += ((u32)(skb->len + 3) & ~3); | ||
883 | } else { | ||
884 | memcpy(((u8 *)adapter->txd_ring)+offset, skb->data, copy_len); | ||
885 | memcpy((u8 *)adapter->txd_ring, skb->data+copy_len, | ||
886 | skb->len-copy_len); | ||
887 | offset = ((u32)(skb->len-copy_len + 3) & ~3); | ||
888 | } | ||
889 | #ifdef NETIF_F_HW_VLAN_TX | ||
890 | if (adapter->vlgrp && vlan_tx_tag_present(skb)) { | ||
891 | u16 vlan_tag = vlan_tx_tag_get(skb); | ||
892 | vlan_tag = (vlan_tag << 4) | | ||
893 | (vlan_tag >> 13) | | ||
894 | ((vlan_tag >> 9) & 0x8); | ||
895 | txph->ins_vlan = 1; | ||
896 | txph->vlan = vlan_tag; | ||
897 | } | ||
898 | #endif | ||
899 | if (offset >= adapter->txd_ring_size) | ||
900 | offset -= adapter->txd_ring_size; | ||
901 | adapter->txd_write_ptr = offset; | ||
902 | |||
903 | /* clear txs before send */ | ||
904 | adapter->txs_ring[adapter->txs_next_clear].update = 0; | ||
905 | if (++adapter->txs_next_clear == adapter->txs_ring_size) | ||
906 | adapter->txs_next_clear = 0; | ||
907 | |||
908 | ATL2_WRITE_REGW(&adapter->hw, REG_MB_TXD_WR_IDX, | ||
909 | (adapter->txd_write_ptr >> 2)); | ||
910 | |||
911 | spin_unlock_irqrestore(&adapter->tx_lock, flags); | ||
912 | |||
913 | netdev->trans_start = jiffies; | ||
914 | dev_kfree_skb_any(skb); | ||
915 | return NETDEV_TX_OK; | ||
916 | } | ||
917 | |||
918 | /* | ||
919 | * atl2_get_stats - Get System Network Statistics | ||
920 | * @netdev: network interface device structure | ||
921 | * | ||
922 | * Returns the address of the device statistics structure. | ||
923 | * The statistics are actually updated from the timer callback. | ||
924 | */ | ||
925 | static struct net_device_stats *atl2_get_stats(struct net_device *netdev) | ||
926 | { | ||
927 | struct atl2_adapter *adapter = netdev_priv(netdev); | ||
928 | return &adapter->net_stats; | ||
929 | } | ||
930 | |||
931 | /* | ||
932 | * atl2_change_mtu - Change the Maximum Transfer Unit | ||
933 | * @netdev: network interface device structure | ||
934 | * @new_mtu: new value for maximum frame size | ||
935 | * | ||
936 | * Returns 0 on success, negative on failure | ||
937 | */ | ||
938 | static int atl2_change_mtu(struct net_device *netdev, int new_mtu) | ||
939 | { | ||
940 | struct atl2_adapter *adapter = netdev_priv(netdev); | ||
941 | struct atl2_hw *hw = &adapter->hw; | ||
942 | |||
943 | if ((new_mtu < 40) || (new_mtu > (ETH_DATA_LEN + VLAN_SIZE))) | ||
944 | return -EINVAL; | ||
945 | |||
946 | /* set MTU */ | ||
947 | if (hw->max_frame_size != new_mtu) { | ||
948 | netdev->mtu = new_mtu; | ||
949 | ATL2_WRITE_REG(hw, REG_MTU, new_mtu + ENET_HEADER_SIZE + | ||
950 | VLAN_SIZE + ETHERNET_FCS_SIZE); | ||
951 | } | ||
952 | |||
953 | return 0; | ||
954 | } | ||
955 | |||
956 | /* | ||
957 | * atl2_set_mac - Change the Ethernet Address of the NIC | ||
958 | * @netdev: network interface device structure | ||
959 | * @p: pointer to an address structure | ||
960 | * | ||
961 | * Returns 0 on success, negative on failure | ||
962 | */ | ||
963 | static int atl2_set_mac(struct net_device *netdev, void *p) | ||
964 | { | ||
965 | struct atl2_adapter *adapter = netdev_priv(netdev); | ||
966 | struct sockaddr *addr = p; | ||
967 | |||
968 | if (!is_valid_ether_addr(addr->sa_data)) | ||
969 | return -EADDRNOTAVAIL; | ||
970 | |||
971 | if (netif_running(netdev)) | ||
972 | return -EBUSY; | ||
973 | |||
974 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | ||
975 | memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); | ||
976 | |||
977 | atl2_set_mac_addr(&adapter->hw); | ||
978 | |||
979 | return 0; | ||
980 | } | ||
981 | |||
982 | /* | ||
983 | * atl2_mii_ioctl - | ||
984 | * @netdev: | ||
985 | * @ifreq: | ||
986 | * @cmd: | ||
987 | */ | ||
988 | static int atl2_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | ||
989 | { | ||
990 | struct atl2_adapter *adapter = netdev_priv(netdev); | ||
991 | struct mii_ioctl_data *data = if_mii(ifr); | ||
992 | unsigned long flags; | ||
993 | |||
994 | switch (cmd) { | ||
995 | case SIOCGMIIPHY: | ||
996 | data->phy_id = 0; | ||
997 | break; | ||
998 | case SIOCGMIIREG: | ||
999 | if (!capable(CAP_NET_ADMIN)) | ||
1000 | return -EPERM; | ||
1001 | spin_lock_irqsave(&adapter->stats_lock, flags); | ||
1002 | if (atl2_read_phy_reg(&adapter->hw, | ||
1003 | data->reg_num & 0x1F, &data->val_out)) { | ||
1004 | spin_unlock_irqrestore(&adapter->stats_lock, flags); | ||
1005 | return -EIO; | ||
1006 | } | ||
1007 | spin_unlock_irqrestore(&adapter->stats_lock, flags); | ||
1008 | break; | ||
1009 | case SIOCSMIIREG: | ||
1010 | if (!capable(CAP_NET_ADMIN)) | ||
1011 | return -EPERM; | ||
1012 | if (data->reg_num & ~(0x1F)) | ||
1013 | return -EFAULT; | ||
1014 | spin_lock_irqsave(&adapter->stats_lock, flags); | ||
1015 | if (atl2_write_phy_reg(&adapter->hw, data->reg_num, | ||
1016 | data->val_in)) { | ||
1017 | spin_unlock_irqrestore(&adapter->stats_lock, flags); | ||
1018 | return -EIO; | ||
1019 | } | ||
1020 | spin_unlock_irqrestore(&adapter->stats_lock, flags); | ||
1021 | break; | ||
1022 | default: | ||
1023 | return -EOPNOTSUPP; | ||
1024 | } | ||
1025 | return 0; | ||
1026 | } | ||
1027 | |||
1028 | /* | ||
1029 | * atl2_ioctl - | ||
1030 | * @netdev: | ||
1031 | * @ifreq: | ||
1032 | * @cmd: | ||
1033 | */ | ||
1034 | static int atl2_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | ||
1035 | { | ||
1036 | switch (cmd) { | ||
1037 | case SIOCGMIIPHY: | ||
1038 | case SIOCGMIIREG: | ||
1039 | case SIOCSMIIREG: | ||
1040 | return atl2_mii_ioctl(netdev, ifr, cmd); | ||
1041 | #ifdef ETHTOOL_OPS_COMPAT | ||
1042 | case SIOCETHTOOL: | ||
1043 | return ethtool_ioctl(ifr); | ||
1044 | #endif | ||
1045 | default: | ||
1046 | return -EOPNOTSUPP; | ||
1047 | } | ||
1048 | } | ||
1049 | |||
1050 | /* | ||
1051 | * atl2_tx_timeout - Respond to a Tx Hang | ||
1052 | * @netdev: network interface device structure | ||
1053 | */ | ||
1054 | static void atl2_tx_timeout(struct net_device *netdev) | ||
1055 | { | ||
1056 | struct atl2_adapter *adapter = netdev_priv(netdev); | ||
1057 | |||
1058 | /* Do the reset outside of interrupt context */ | ||
1059 | schedule_work(&adapter->reset_task); | ||
1060 | } | ||
1061 | |||
1062 | /* | ||
1063 | * atl2_watchdog - Timer Call-back | ||
1064 | * @data: pointer to netdev cast into an unsigned long | ||
1065 | */ | ||
1066 | static void atl2_watchdog(unsigned long data) | ||
1067 | { | ||
1068 | struct atl2_adapter *adapter = (struct atl2_adapter *) data; | ||
1069 | u32 drop_rxd, drop_rxs; | ||
1070 | unsigned long flags; | ||
1071 | |||
1072 | if (!test_bit(__ATL2_DOWN, &adapter->flags)) { | ||
1073 | spin_lock_irqsave(&adapter->stats_lock, flags); | ||
1074 | drop_rxd = ATL2_READ_REG(&adapter->hw, REG_STS_RXD_OV); | ||
1075 | drop_rxs = ATL2_READ_REG(&adapter->hw, REG_STS_RXS_OV); | ||
1076 | adapter->net_stats.rx_over_errors += (drop_rxd+drop_rxs); | ||
1077 | spin_unlock_irqrestore(&adapter->stats_lock, flags); | ||
1078 | |||
1079 | /* Reset the timer */ | ||
1080 | mod_timer(&adapter->watchdog_timer, jiffies + 4 * HZ); | ||
1081 | } | ||
1082 | } | ||
1083 | |||
1084 | /* | ||
1085 | * atl2_phy_config - Timer Call-back | ||
1086 | * @data: pointer to netdev cast into an unsigned long | ||
1087 | */ | ||
1088 | static void atl2_phy_config(unsigned long data) | ||
1089 | { | ||
1090 | struct atl2_adapter *adapter = (struct atl2_adapter *) data; | ||
1091 | struct atl2_hw *hw = &adapter->hw; | ||
1092 | unsigned long flags; | ||
1093 | |||
1094 | spin_lock_irqsave(&adapter->stats_lock, flags); | ||
1095 | atl2_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg); | ||
1096 | atl2_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN | | ||
1097 | MII_CR_RESTART_AUTO_NEG); | ||
1098 | spin_unlock_irqrestore(&adapter->stats_lock, flags); | ||
1099 | clear_bit(0, &adapter->cfg_phy); | ||
1100 | } | ||
1101 | |||
1102 | static int atl2_up(struct atl2_adapter *adapter) | ||
1103 | { | ||
1104 | struct net_device *netdev = adapter->netdev; | ||
1105 | int err = 0; | ||
1106 | u32 val; | ||
1107 | |||
1108 | /* hardware has been reset, we need to reload some things */ | ||
1109 | |||
1110 | err = atl2_init_hw(&adapter->hw); | ||
1111 | if (err) { | ||
1112 | err = -EIO; | ||
1113 | return err; | ||
1114 | } | ||
1115 | |||
1116 | atl2_set_multi(netdev); | ||
1117 | init_ring_ptrs(adapter); | ||
1118 | |||
1119 | #ifdef NETIF_F_HW_VLAN_TX | ||
1120 | atl2_restore_vlan(adapter); | ||
1121 | #endif | ||
1122 | |||
1123 | if (atl2_configure(adapter)) { | ||
1124 | err = -EIO; | ||
1125 | goto err_up; | ||
1126 | } | ||
1127 | |||
1128 | clear_bit(__ATL2_DOWN, &adapter->flags); | ||
1129 | |||
1130 | val = ATL2_READ_REG(&adapter->hw, REG_MASTER_CTRL); | ||
1131 | ATL2_WRITE_REG(&adapter->hw, REG_MASTER_CTRL, val | | ||
1132 | MASTER_CTRL_MANUAL_INT); | ||
1133 | |||
1134 | atl2_irq_enable(adapter); | ||
1135 | |||
1136 | err_up: | ||
1137 | return err; | ||
1138 | } | ||
1139 | |||
1140 | static void atl2_reinit_locked(struct atl2_adapter *adapter) | ||
1141 | { | ||
1142 | WARN_ON(in_interrupt()); | ||
1143 | while (test_and_set_bit(__ATL2_RESETTING, &adapter->flags)) | ||
1144 | msleep(1); | ||
1145 | atl2_down(adapter); | ||
1146 | atl2_up(adapter); | ||
1147 | clear_bit(__ATL2_RESETTING, &adapter->flags); | ||
1148 | } | ||
1149 | |||
1150 | static void atl2_reset_task(struct work_struct *work) | ||
1151 | { | ||
1152 | struct atl2_adapter *adapter; | ||
1153 | adapter = container_of(work, struct atl2_adapter, reset_task); | ||
1154 | |||
1155 | atl2_reinit_locked(adapter); | ||
1156 | } | ||
1157 | |||
1158 | static void atl2_setup_mac_ctrl(struct atl2_adapter *adapter) | ||
1159 | { | ||
1160 | u32 value; | ||
1161 | struct atl2_hw *hw = &adapter->hw; | ||
1162 | struct net_device *netdev = adapter->netdev; | ||
1163 | |||
1164 | /* Config MAC CTRL Register */ | ||
1165 | value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN | MAC_CTRL_MACLP_CLK_PHY; | ||
1166 | |||
1167 | /* duplex */ | ||
1168 | if (FULL_DUPLEX == adapter->link_duplex) | ||
1169 | value |= MAC_CTRL_DUPLX; | ||
1170 | |||
1171 | /* flow control */ | ||
1172 | value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW); | ||
1173 | |||
1174 | /* PAD & CRC */ | ||
1175 | value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD); | ||
1176 | |||
1177 | /* preamble length */ | ||
1178 | value |= (((u32)adapter->hw.preamble_len & MAC_CTRL_PRMLEN_MASK) << | ||
1179 | MAC_CTRL_PRMLEN_SHIFT); | ||
1180 | |||
1181 | /* vlan */ | ||
1182 | if (adapter->vlgrp) | ||
1183 | value |= MAC_CTRL_RMV_VLAN; | ||
1184 | |||
1185 | /* filter mode */ | ||
1186 | value |= MAC_CTRL_BC_EN; | ||
1187 | if (netdev->flags & IFF_PROMISC) | ||
1188 | value |= MAC_CTRL_PROMIS_EN; | ||
1189 | else if (netdev->flags & IFF_ALLMULTI) | ||
1190 | value |= MAC_CTRL_MC_ALL_EN; | ||
1191 | |||
1192 | /* half retry buffer */ | ||
1193 | value |= (((u32)(adapter->hw.retry_buf & | ||
1194 | MAC_CTRL_HALF_LEFT_BUF_MASK)) << MAC_CTRL_HALF_LEFT_BUF_SHIFT); | ||
1195 | |||
1196 | ATL2_WRITE_REG(hw, REG_MAC_CTRL, value); | ||
1197 | } | ||
1198 | |||
1199 | static int atl2_check_link(struct atl2_adapter *adapter) | ||
1200 | { | ||
1201 | struct atl2_hw *hw = &adapter->hw; | ||
1202 | struct net_device *netdev = adapter->netdev; | ||
1203 | int ret_val; | ||
1204 | u16 speed, duplex, phy_data; | ||
1205 | int reconfig = 0; | ||
1206 | |||
1207 | /* MII_BMSR must read twise */ | ||
1208 | atl2_read_phy_reg(hw, MII_BMSR, &phy_data); | ||
1209 | atl2_read_phy_reg(hw, MII_BMSR, &phy_data); | ||
1210 | if (!(phy_data&BMSR_LSTATUS)) { /* link down */ | ||
1211 | if (netif_carrier_ok(netdev)) { /* old link state: Up */ | ||
1212 | u32 value; | ||
1213 | /* disable rx */ | ||
1214 | value = ATL2_READ_REG(hw, REG_MAC_CTRL); | ||
1215 | value &= ~MAC_CTRL_RX_EN; | ||
1216 | ATL2_WRITE_REG(hw, REG_MAC_CTRL, value); | ||
1217 | adapter->link_speed = SPEED_0; | ||
1218 | netif_carrier_off(netdev); | ||
1219 | netif_stop_queue(netdev); | ||
1220 | } | ||
1221 | return 0; | ||
1222 | } | ||
1223 | |||
1224 | /* Link Up */ | ||
1225 | ret_val = atl2_get_speed_and_duplex(hw, &speed, &duplex); | ||
1226 | if (ret_val) | ||
1227 | return ret_val; | ||
1228 | switch (hw->MediaType) { | ||
1229 | case MEDIA_TYPE_100M_FULL: | ||
1230 | if (speed != SPEED_100 || duplex != FULL_DUPLEX) | ||
1231 | reconfig = 1; | ||
1232 | break; | ||
1233 | case MEDIA_TYPE_100M_HALF: | ||
1234 | if (speed != SPEED_100 || duplex != HALF_DUPLEX) | ||
1235 | reconfig = 1; | ||
1236 | break; | ||
1237 | case MEDIA_TYPE_10M_FULL: | ||
1238 | if (speed != SPEED_10 || duplex != FULL_DUPLEX) | ||
1239 | reconfig = 1; | ||
1240 | break; | ||
1241 | case MEDIA_TYPE_10M_HALF: | ||
1242 | if (speed != SPEED_10 || duplex != HALF_DUPLEX) | ||
1243 | reconfig = 1; | ||
1244 | break; | ||
1245 | } | ||
1246 | /* link result is our setting */ | ||
1247 | if (reconfig == 0) { | ||
1248 | if (adapter->link_speed != speed || | ||
1249 | adapter->link_duplex != duplex) { | ||
1250 | adapter->link_speed = speed; | ||
1251 | adapter->link_duplex = duplex; | ||
1252 | atl2_setup_mac_ctrl(adapter); | ||
1253 | printk(KERN_INFO "%s: %s NIC Link is Up<%d Mbps %s>\n", | ||
1254 | atl2_driver_name, netdev->name, | ||
1255 | adapter->link_speed, | ||
1256 | adapter->link_duplex == FULL_DUPLEX ? | ||
1257 | "Full Duplex" : "Half Duplex"); | ||
1258 | } | ||
1259 | |||
1260 | if (!netif_carrier_ok(netdev)) { /* Link down -> Up */ | ||
1261 | netif_carrier_on(netdev); | ||
1262 | netif_wake_queue(netdev); | ||
1263 | } | ||
1264 | return 0; | ||
1265 | } | ||
1266 | |||
1267 | /* change original link status */ | ||
1268 | if (netif_carrier_ok(netdev)) { | ||
1269 | u32 value; | ||
1270 | /* disable rx */ | ||
1271 | value = ATL2_READ_REG(hw, REG_MAC_CTRL); | ||
1272 | value &= ~MAC_CTRL_RX_EN; | ||
1273 | ATL2_WRITE_REG(hw, REG_MAC_CTRL, value); | ||
1274 | |||
1275 | adapter->link_speed = SPEED_0; | ||
1276 | netif_carrier_off(netdev); | ||
1277 | netif_stop_queue(netdev); | ||
1278 | } | ||
1279 | |||
1280 | /* auto-neg, insert timer to re-config phy | ||
1281 | * (if interval smaller than 5 seconds, something strange) */ | ||
1282 | if (!test_bit(__ATL2_DOWN, &adapter->flags)) { | ||
1283 | if (!test_and_set_bit(0, &adapter->cfg_phy)) | ||
1284 | mod_timer(&adapter->phy_config_timer, jiffies + 5 * HZ); | ||
1285 | } | ||
1286 | |||
1287 | return 0; | ||
1288 | } | ||
1289 | |||
1290 | /* | ||
1291 | * atl2_link_chg_task - deal with link change event Out of interrupt context | ||
1292 | * @netdev: network interface device structure | ||
1293 | */ | ||
1294 | static void atl2_link_chg_task(struct work_struct *work) | ||
1295 | { | ||
1296 | struct atl2_adapter *adapter; | ||
1297 | unsigned long flags; | ||
1298 | |||
1299 | adapter = container_of(work, struct atl2_adapter, link_chg_task); | ||
1300 | |||
1301 | spin_lock_irqsave(&adapter->stats_lock, flags); | ||
1302 | atl2_check_link(adapter); | ||
1303 | spin_unlock_irqrestore(&adapter->stats_lock, flags); | ||
1304 | } | ||
1305 | |||
1306 | static void atl2_setup_pcicmd(struct pci_dev *pdev) | ||
1307 | { | ||
1308 | u16 cmd; | ||
1309 | |||
1310 | pci_read_config_word(pdev, PCI_COMMAND, &cmd); | ||
1311 | |||
1312 | if (cmd & PCI_COMMAND_INTX_DISABLE) | ||
1313 | cmd &= ~PCI_COMMAND_INTX_DISABLE; | ||
1314 | if (cmd & PCI_COMMAND_IO) | ||
1315 | cmd &= ~PCI_COMMAND_IO; | ||
1316 | if (0 == (cmd & PCI_COMMAND_MEMORY)) | ||
1317 | cmd |= PCI_COMMAND_MEMORY; | ||
1318 | if (0 == (cmd & PCI_COMMAND_MASTER)) | ||
1319 | cmd |= PCI_COMMAND_MASTER; | ||
1320 | pci_write_config_word(pdev, PCI_COMMAND, cmd); | ||
1321 | |||
1322 | /* | ||
1323 | * some motherboards BIOS(PXE/EFI) driver may set PME | ||
1324 | * while they transfer control to OS (Windows/Linux) | ||
1325 | * so we should clear this bit before NIC work normally | ||
1326 | */ | ||
1327 | pci_write_config_dword(pdev, REG_PM_CTRLSTAT, 0); | ||
1328 | } | ||
1329 | |||
1330 | /* | ||
1331 | * atl2_probe - Device Initialization Routine | ||
1332 | * @pdev: PCI device information struct | ||
1333 | * @ent: entry in atl2_pci_tbl | ||
1334 | * | ||
1335 | * Returns 0 on success, negative on failure | ||
1336 | * | ||
1337 | * atl2_probe initializes an adapter identified by a pci_dev structure. | ||
1338 | * The OS initialization, configuring of the adapter private structure, | ||
1339 | * and a hardware reset occur. | ||
1340 | */ | ||
1341 | static int __devinit atl2_probe(struct pci_dev *pdev, | ||
1342 | const struct pci_device_id *ent) | ||
1343 | { | ||
1344 | struct net_device *netdev; | ||
1345 | struct atl2_adapter *adapter; | ||
1346 | static int cards_found; | ||
1347 | unsigned long mmio_start; | ||
1348 | int mmio_len; | ||
1349 | int err; | ||
1350 | |||
1351 | cards_found = 0; | ||
1352 | |||
1353 | err = pci_enable_device(pdev); | ||
1354 | if (err) | ||
1355 | return err; | ||
1356 | |||
1357 | /* | ||
1358 | * atl2 is a shared-high-32-bit device, so we're stuck with 32-bit DMA | ||
1359 | * until the kernel has the proper infrastructure to support 64-bit DMA | ||
1360 | * on these devices. | ||
1361 | */ | ||
1362 | if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) && | ||
1363 | pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) { | ||
1364 | printk(KERN_ERR "atl2: No usable DMA configuration, aborting\n"); | ||
1365 | goto err_dma; | ||
1366 | } | ||
1367 | |||
1368 | /* Mark all PCI regions associated with PCI device | ||
1369 | * pdev as being reserved by owner atl2_driver_name */ | ||
1370 | err = pci_request_regions(pdev, atl2_driver_name); | ||
1371 | if (err) | ||
1372 | goto err_pci_reg; | ||
1373 | |||
1374 | /* Enables bus-mastering on the device and calls | ||
1375 | * pcibios_set_master to do the needed arch specific settings */ | ||
1376 | pci_set_master(pdev); | ||
1377 | |||
1378 | err = -ENOMEM; | ||
1379 | netdev = alloc_etherdev(sizeof(struct atl2_adapter)); | ||
1380 | if (!netdev) | ||
1381 | goto err_alloc_etherdev; | ||
1382 | |||
1383 | SET_NETDEV_DEV(netdev, &pdev->dev); | ||
1384 | |||
1385 | pci_set_drvdata(pdev, netdev); | ||
1386 | adapter = netdev_priv(netdev); | ||
1387 | adapter->netdev = netdev; | ||
1388 | adapter->pdev = pdev; | ||
1389 | adapter->hw.back = adapter; | ||
1390 | |||
1391 | mmio_start = pci_resource_start(pdev, 0x0); | ||
1392 | mmio_len = pci_resource_len(pdev, 0x0); | ||
1393 | |||
1394 | adapter->hw.mem_rang = (u32)mmio_len; | ||
1395 | adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); | ||
1396 | if (!adapter->hw.hw_addr) { | ||
1397 | err = -EIO; | ||
1398 | goto err_ioremap; | ||
1399 | } | ||
1400 | |||
1401 | atl2_setup_pcicmd(pdev); | ||
1402 | |||
1403 | netdev->open = &atl2_open; | ||
1404 | netdev->stop = &atl2_close; | ||
1405 | netdev->hard_start_xmit = &atl2_xmit_frame; | ||
1406 | netdev->get_stats = &atl2_get_stats; | ||
1407 | netdev->set_multicast_list = &atl2_set_multi; | ||
1408 | netdev->set_mac_address = &atl2_set_mac; | ||
1409 | netdev->change_mtu = &atl2_change_mtu; | ||
1410 | netdev->do_ioctl = &atl2_ioctl; | ||
1411 | atl2_set_ethtool_ops(netdev); | ||
1412 | |||
1413 | #ifdef HAVE_TX_TIMEOUT | ||
1414 | netdev->tx_timeout = &atl2_tx_timeout; | ||
1415 | netdev->watchdog_timeo = 5 * HZ; | ||
1416 | #endif | ||
1417 | #ifdef NETIF_F_HW_VLAN_TX | ||
1418 | netdev->vlan_rx_register = atl2_vlan_rx_register; | ||
1419 | #endif | ||
1420 | strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); | ||
1421 | |||
1422 | netdev->mem_start = mmio_start; | ||
1423 | netdev->mem_end = mmio_start + mmio_len; | ||
1424 | adapter->bd_number = cards_found; | ||
1425 | adapter->pci_using_64 = false; | ||
1426 | |||
1427 | /* setup the private structure */ | ||
1428 | err = atl2_sw_init(adapter); | ||
1429 | if (err) | ||
1430 | goto err_sw_init; | ||
1431 | |||
1432 | err = -EIO; | ||
1433 | |||
1434 | #ifdef NETIF_F_HW_VLAN_TX | ||
1435 | netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); | ||
1436 | #endif | ||
1437 | |||
1438 | #ifdef NETIF_F_LLTX | ||
1439 | netdev->features |= NETIF_F_LLTX; | ||
1440 | #endif | ||
1441 | |||
1442 | /* Init PHY as early as possible due to power saving issue */ | ||
1443 | atl2_phy_init(&adapter->hw); | ||
1444 | |||
1445 | /* reset the controller to | ||
1446 | * put the device in a known good starting state */ | ||
1447 | |||
1448 | if (atl2_reset_hw(&adapter->hw)) { | ||
1449 | err = -EIO; | ||
1450 | goto err_reset; | ||
1451 | } | ||
1452 | |||
1453 | /* copy the MAC address out of the EEPROM */ | ||
1454 | atl2_read_mac_addr(&adapter->hw); | ||
1455 | memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); | ||
1456 | /* FIXME: do we still need this? */ | ||
1457 | #ifdef ETHTOOL_GPERMADDR | ||
1458 | memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len); | ||
1459 | |||
1460 | if (!is_valid_ether_addr(netdev->perm_addr)) { | ||
1461 | #else | ||
1462 | if (!is_valid_ether_addr(netdev->dev_addr)) { | ||
1463 | #endif | ||
1464 | err = -EIO; | ||
1465 | goto err_eeprom; | ||
1466 | } | ||
1467 | |||
1468 | atl2_check_options(adapter); | ||
1469 | |||
1470 | init_timer(&adapter->watchdog_timer); | ||
1471 | adapter->watchdog_timer.function = &atl2_watchdog; | ||
1472 | adapter->watchdog_timer.data = (unsigned long) adapter; | ||
1473 | |||
1474 | init_timer(&adapter->phy_config_timer); | ||
1475 | adapter->phy_config_timer.function = &atl2_phy_config; | ||
1476 | adapter->phy_config_timer.data = (unsigned long) adapter; | ||
1477 | |||
1478 | INIT_WORK(&adapter->reset_task, atl2_reset_task); | ||
1479 | INIT_WORK(&adapter->link_chg_task, atl2_link_chg_task); | ||
1480 | |||
1481 | strcpy(netdev->name, "eth%d"); /* ?? */ | ||
1482 | err = register_netdev(netdev); | ||
1483 | if (err) | ||
1484 | goto err_register; | ||
1485 | |||
1486 | /* assume we have no link for now */ | ||
1487 | netif_carrier_off(netdev); | ||
1488 | netif_stop_queue(netdev); | ||
1489 | |||
1490 | cards_found++; | ||
1491 | |||
1492 | return 0; | ||
1493 | |||
1494 | err_reset: | ||
1495 | err_register: | ||
1496 | err_sw_init: | ||
1497 | err_eeprom: | ||
1498 | iounmap(adapter->hw.hw_addr); | ||
1499 | err_ioremap: | ||
1500 | free_netdev(netdev); | ||
1501 | err_alloc_etherdev: | ||
1502 | pci_release_regions(pdev); | ||
1503 | err_pci_reg: | ||
1504 | err_dma: | ||
1505 | pci_disable_device(pdev); | ||
1506 | return err; | ||
1507 | } | ||
1508 | |||
1509 | /* | ||
1510 | * atl2_remove - Device Removal Routine | ||
1511 | * @pdev: PCI device information struct | ||
1512 | * | ||
1513 | * atl2_remove is called by the PCI subsystem to alert the driver | ||
1514 | * that it should release a PCI device. The could be caused by a | ||
1515 | * Hot-Plug event, or because the driver is going to be removed from | ||
1516 | * memory. | ||
1517 | */ | ||
1518 | /* FIXME: write the original MAC address back in case it was changed from a | ||
1519 | * BIOS-set value, as in atl1 -- CHS */ | ||
1520 | static void __devexit atl2_remove(struct pci_dev *pdev) | ||
1521 | { | ||
1522 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
1523 | struct atl2_adapter *adapter = netdev_priv(netdev); | ||
1524 | |||
1525 | /* flush_scheduled work may reschedule our watchdog task, so | ||
1526 | * explicitly disable watchdog tasks from being rescheduled */ | ||
1527 | set_bit(__ATL2_DOWN, &adapter->flags); | ||
1528 | |||
1529 | del_timer_sync(&adapter->watchdog_timer); | ||
1530 | del_timer_sync(&adapter->phy_config_timer); | ||
1531 | |||
1532 | flush_scheduled_work(); | ||
1533 | |||
1534 | unregister_netdev(netdev); | ||
1535 | |||
1536 | atl2_force_ps(&adapter->hw); | ||
1537 | |||
1538 | iounmap(adapter->hw.hw_addr); | ||
1539 | pci_release_regions(pdev); | ||
1540 | |||
1541 | free_netdev(netdev); | ||
1542 | |||
1543 | pci_disable_device(pdev); | ||
1544 | } | ||
1545 | |||
1546 | static int atl2_suspend(struct pci_dev *pdev, pm_message_t state) | ||
1547 | { | ||
1548 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
1549 | struct atl2_adapter *adapter = netdev_priv(netdev); | ||
1550 | struct atl2_hw *hw = &adapter->hw; | ||
1551 | u16 speed, duplex; | ||
1552 | u32 ctrl = 0; | ||
1553 | u32 wufc = adapter->wol; | ||
1554 | |||
1555 | #ifdef CONFIG_PM | ||
1556 | int retval = 0; | ||
1557 | #endif | ||
1558 | |||
1559 | netif_device_detach(netdev); | ||
1560 | |||
1561 | if (netif_running(netdev)) { | ||
1562 | WARN_ON(test_bit(__ATL2_RESETTING, &adapter->flags)); | ||
1563 | atl2_down(adapter); | ||
1564 | } | ||
1565 | |||
1566 | #ifdef CONFIG_PM | ||
1567 | retval = pci_save_state(pdev); | ||
1568 | if (retval) | ||
1569 | return retval; | ||
1570 | #endif | ||
1571 | |||
1572 | atl2_read_phy_reg(hw, MII_BMSR, (u16 *)&ctrl); | ||
1573 | atl2_read_phy_reg(hw, MII_BMSR, (u16 *)&ctrl); | ||
1574 | if (ctrl & BMSR_LSTATUS) | ||
1575 | wufc &= ~ATLX_WUFC_LNKC; | ||
1576 | |||
1577 | if (0 != (ctrl & BMSR_LSTATUS) && 0 != wufc) { | ||
1578 | u32 ret_val; | ||
1579 | /* get current link speed & duplex */ | ||
1580 | ret_val = atl2_get_speed_and_duplex(hw, &speed, &duplex); | ||
1581 | if (ret_val) { | ||
1582 | printk(KERN_DEBUG | ||
1583 | "%s: get speed&duplex error while suspend\n", | ||
1584 | atl2_driver_name); | ||
1585 | goto wol_dis; | ||
1586 | } | ||
1587 | |||
1588 | ctrl = 0; | ||
1589 | |||
1590 | /* turn on magic packet wol */ | ||
1591 | if (wufc & ATLX_WUFC_MAG) | ||
1592 | ctrl |= (WOL_MAGIC_EN | WOL_MAGIC_PME_EN); | ||
1593 | |||
1594 | /* ignore Link Chg event when Link is up */ | ||
1595 | ATL2_WRITE_REG(hw, REG_WOL_CTRL, ctrl); | ||
1596 | |||
1597 | /* Config MAC CTRL Register */ | ||
1598 | ctrl = MAC_CTRL_RX_EN | MAC_CTRL_MACLP_CLK_PHY; | ||
1599 | if (FULL_DUPLEX == adapter->link_duplex) | ||
1600 | ctrl |= MAC_CTRL_DUPLX; | ||
1601 | ctrl |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD); | ||
1602 | ctrl |= (((u32)adapter->hw.preamble_len & | ||
1603 | MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT); | ||
1604 | ctrl |= (((u32)(adapter->hw.retry_buf & | ||
1605 | MAC_CTRL_HALF_LEFT_BUF_MASK)) << | ||
1606 | MAC_CTRL_HALF_LEFT_BUF_SHIFT); | ||
1607 | if (wufc & ATLX_WUFC_MAG) { | ||
1608 | /* magic packet maybe Broadcast&multicast&Unicast */ | ||
1609 | ctrl |= MAC_CTRL_BC_EN; | ||
1610 | } | ||
1611 | |||
1612 | ATL2_WRITE_REG(hw, REG_MAC_CTRL, ctrl); | ||
1613 | |||
1614 | /* pcie patch */ | ||
1615 | ctrl = ATL2_READ_REG(hw, REG_PCIE_PHYMISC); | ||
1616 | ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; | ||
1617 | ATL2_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl); | ||
1618 | ctrl = ATL2_READ_REG(hw, REG_PCIE_DLL_TX_CTRL1); | ||
1619 | ctrl |= PCIE_DLL_TX_CTRL1_SEL_NOR_CLK; | ||
1620 | ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, ctrl); | ||
1621 | |||
1622 | pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); | ||
1623 | goto suspend_exit; | ||
1624 | } | ||
1625 | |||
1626 | if (0 == (ctrl&BMSR_LSTATUS) && 0 != (wufc&ATLX_WUFC_LNKC)) { | ||
1627 | /* link is down, so only LINK CHG WOL event enable */ | ||
1628 | ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN); | ||
1629 | ATL2_WRITE_REG(hw, REG_WOL_CTRL, ctrl); | ||
1630 | ATL2_WRITE_REG(hw, REG_MAC_CTRL, 0); | ||
1631 | |||
1632 | /* pcie patch */ | ||
1633 | ctrl = ATL2_READ_REG(hw, REG_PCIE_PHYMISC); | ||
1634 | ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; | ||
1635 | ATL2_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl); | ||
1636 | ctrl = ATL2_READ_REG(hw, REG_PCIE_DLL_TX_CTRL1); | ||
1637 | ctrl |= PCIE_DLL_TX_CTRL1_SEL_NOR_CLK; | ||
1638 | ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, ctrl); | ||
1639 | |||
1640 | hw->phy_configured = false; /* re-init PHY when resume */ | ||
1641 | |||
1642 | pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); | ||
1643 | |||
1644 | goto suspend_exit; | ||
1645 | } | ||
1646 | |||
1647 | wol_dis: | ||
1648 | /* WOL disabled */ | ||
1649 | ATL2_WRITE_REG(hw, REG_WOL_CTRL, 0); | ||
1650 | |||
1651 | /* pcie patch */ | ||
1652 | ctrl = ATL2_READ_REG(hw, REG_PCIE_PHYMISC); | ||
1653 | ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; | ||
1654 | ATL2_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl); | ||
1655 | ctrl = ATL2_READ_REG(hw, REG_PCIE_DLL_TX_CTRL1); | ||
1656 | ctrl |= PCIE_DLL_TX_CTRL1_SEL_NOR_CLK; | ||
1657 | ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, ctrl); | ||
1658 | |||
1659 | atl2_force_ps(hw); | ||
1660 | hw->phy_configured = false; /* re-init PHY when resume */ | ||
1661 | |||
1662 | pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); | ||
1663 | |||
1664 | suspend_exit: | ||
1665 | if (netif_running(netdev)) | ||
1666 | atl2_free_irq(adapter); | ||
1667 | |||
1668 | pci_disable_device(pdev); | ||
1669 | |||
1670 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | ||
1671 | |||
1672 | return 0; | ||
1673 | } | ||
1674 | |||
1675 | #ifdef CONFIG_PM | ||
1676 | static int atl2_resume(struct pci_dev *pdev) | ||
1677 | { | ||
1678 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
1679 | struct atl2_adapter *adapter = netdev_priv(netdev); | ||
1680 | u32 err; | ||
1681 | |||
1682 | pci_set_power_state(pdev, PCI_D0); | ||
1683 | pci_restore_state(pdev); | ||
1684 | |||
1685 | err = pci_enable_device(pdev); | ||
1686 | if (err) { | ||
1687 | printk(KERN_ERR | ||
1688 | "atl2: Cannot enable PCI device from suspend\n"); | ||
1689 | return err; | ||
1690 | } | ||
1691 | |||
1692 | pci_set_master(pdev); | ||
1693 | |||
1694 | ATL2_READ_REG(&adapter->hw, REG_WOL_CTRL); /* clear WOL status */ | ||
1695 | |||
1696 | pci_enable_wake(pdev, PCI_D3hot, 0); | ||
1697 | pci_enable_wake(pdev, PCI_D3cold, 0); | ||
1698 | |||
1699 | ATL2_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0); | ||
1700 | |||
1701 | err = atl2_request_irq(adapter); | ||
1702 | if (netif_running(netdev) && err) | ||
1703 | return err; | ||
1704 | |||
1705 | atl2_reset_hw(&adapter->hw); | ||
1706 | |||
1707 | if (netif_running(netdev)) | ||
1708 | atl2_up(adapter); | ||
1709 | |||
1710 | netif_device_attach(netdev); | ||
1711 | |||
1712 | return 0; | ||
1713 | } | ||
1714 | #endif | ||
1715 | |||
1716 | static void atl2_shutdown(struct pci_dev *pdev) | ||
1717 | { | ||
1718 | atl2_suspend(pdev, PMSG_SUSPEND); | ||
1719 | } | ||
1720 | |||
1721 | static struct pci_driver atl2_driver = { | ||
1722 | .name = atl2_driver_name, | ||
1723 | .id_table = atl2_pci_tbl, | ||
1724 | .probe = atl2_probe, | ||
1725 | .remove = __devexit_p(atl2_remove), | ||
1726 | /* Power Managment Hooks */ | ||
1727 | .suspend = atl2_suspend, | ||
1728 | #ifdef CONFIG_PM | ||
1729 | .resume = atl2_resume, | ||
1730 | #endif | ||
1731 | .shutdown = atl2_shutdown, | ||
1732 | }; | ||
1733 | |||
1734 | /* | ||
1735 | * atl2_init_module - Driver Registration Routine | ||
1736 | * | ||
1737 | * atl2_init_module is the first routine called when the driver is | ||
1738 | * loaded. All it does is register with the PCI subsystem. | ||
1739 | */ | ||
1740 | static int __init atl2_init_module(void) | ||
1741 | { | ||
1742 | printk(KERN_INFO "%s - version %s\n", atl2_driver_string, | ||
1743 | atl2_driver_version); | ||
1744 | printk(KERN_INFO "%s\n", atl2_copyright); | ||
1745 | return pci_register_driver(&atl2_driver); | ||
1746 | } | ||
1747 | module_init(atl2_init_module); | ||
1748 | |||
1749 | /* | ||
1750 | * atl2_exit_module - Driver Exit Cleanup Routine | ||
1751 | * | ||
1752 | * atl2_exit_module is called just before the driver is removed | ||
1753 | * from memory. | ||
1754 | */ | ||
1755 | static void __exit atl2_exit_module(void) | ||
1756 | { | ||
1757 | pci_unregister_driver(&atl2_driver); | ||
1758 | } | ||
1759 | module_exit(atl2_exit_module); | ||
1760 | |||
1761 | static void atl2_read_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value) | ||
1762 | { | ||
1763 | struct atl2_adapter *adapter = hw->back; | ||
1764 | pci_read_config_word(adapter->pdev, reg, value); | ||
1765 | } | ||
1766 | |||
1767 | static void atl2_write_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value) | ||
1768 | { | ||
1769 | struct atl2_adapter *adapter = hw->back; | ||
1770 | pci_write_config_word(adapter->pdev, reg, *value); | ||
1771 | } | ||
1772 | |||
1773 | static int atl2_get_settings(struct net_device *netdev, | ||
1774 | struct ethtool_cmd *ecmd) | ||
1775 | { | ||
1776 | struct atl2_adapter *adapter = netdev_priv(netdev); | ||
1777 | struct atl2_hw *hw = &adapter->hw; | ||
1778 | |||
1779 | ecmd->supported = (SUPPORTED_10baseT_Half | | ||
1780 | SUPPORTED_10baseT_Full | | ||
1781 | SUPPORTED_100baseT_Half | | ||
1782 | SUPPORTED_100baseT_Full | | ||
1783 | SUPPORTED_Autoneg | | ||
1784 | SUPPORTED_TP); | ||
1785 | ecmd->advertising = ADVERTISED_TP; | ||
1786 | |||
1787 | ecmd->advertising |= ADVERTISED_Autoneg; | ||
1788 | ecmd->advertising |= hw->autoneg_advertised; | ||
1789 | |||
1790 | ecmd->port = PORT_TP; | ||
1791 | ecmd->phy_address = 0; | ||
1792 | ecmd->transceiver = XCVR_INTERNAL; | ||
1793 | |||
1794 | if (adapter->link_speed != SPEED_0) { | ||
1795 | ecmd->speed = adapter->link_speed; | ||
1796 | if (adapter->link_duplex == FULL_DUPLEX) | ||
1797 | ecmd->duplex = DUPLEX_FULL; | ||
1798 | else | ||
1799 | ecmd->duplex = DUPLEX_HALF; | ||
1800 | } else { | ||
1801 | ecmd->speed = -1; | ||
1802 | ecmd->duplex = -1; | ||
1803 | } | ||
1804 | |||
1805 | ecmd->autoneg = AUTONEG_ENABLE; | ||
1806 | return 0; | ||
1807 | } | ||
1808 | |||
1809 | static int atl2_set_settings(struct net_device *netdev, | ||
1810 | struct ethtool_cmd *ecmd) | ||
1811 | { | ||
1812 | struct atl2_adapter *adapter = netdev_priv(netdev); | ||
1813 | struct atl2_hw *hw = &adapter->hw; | ||
1814 | |||
1815 | while (test_and_set_bit(__ATL2_RESETTING, &adapter->flags)) | ||
1816 | msleep(1); | ||
1817 | |||
1818 | if (ecmd->autoneg == AUTONEG_ENABLE) { | ||
1819 | #define MY_ADV_MASK (ADVERTISE_10_HALF | \ | ||
1820 | ADVERTISE_10_FULL | \ | ||
1821 | ADVERTISE_100_HALF| \ | ||
1822 | ADVERTISE_100_FULL) | ||
1823 | |||
1824 | if ((ecmd->advertising & MY_ADV_MASK) == MY_ADV_MASK) { | ||
1825 | hw->MediaType = MEDIA_TYPE_AUTO_SENSOR; | ||
1826 | hw->autoneg_advertised = MY_ADV_MASK; | ||
1827 | } else if ((ecmd->advertising & MY_ADV_MASK) == | ||
1828 | ADVERTISE_100_FULL) { | ||
1829 | hw->MediaType = MEDIA_TYPE_100M_FULL; | ||
1830 | hw->autoneg_advertised = ADVERTISE_100_FULL; | ||
1831 | } else if ((ecmd->advertising & MY_ADV_MASK) == | ||
1832 | ADVERTISE_100_HALF) { | ||
1833 | hw->MediaType = MEDIA_TYPE_100M_HALF; | ||
1834 | hw->autoneg_advertised = ADVERTISE_100_HALF; | ||
1835 | } else if ((ecmd->advertising & MY_ADV_MASK) == | ||
1836 | ADVERTISE_10_FULL) { | ||
1837 | hw->MediaType = MEDIA_TYPE_10M_FULL; | ||
1838 | hw->autoneg_advertised = ADVERTISE_10_FULL; | ||
1839 | } else if ((ecmd->advertising & MY_ADV_MASK) == | ||
1840 | ADVERTISE_10_HALF) { | ||
1841 | hw->MediaType = MEDIA_TYPE_10M_HALF; | ||
1842 | hw->autoneg_advertised = ADVERTISE_10_HALF; | ||
1843 | } else { | ||
1844 | clear_bit(__ATL2_RESETTING, &adapter->flags); | ||
1845 | return -EINVAL; | ||
1846 | } | ||
1847 | ecmd->advertising = hw->autoneg_advertised | | ||
1848 | ADVERTISED_TP | ADVERTISED_Autoneg; | ||
1849 | } else { | ||
1850 | clear_bit(__ATL2_RESETTING, &adapter->flags); | ||
1851 | return -EINVAL; | ||
1852 | } | ||
1853 | |||
1854 | /* reset the link */ | ||
1855 | if (netif_running(adapter->netdev)) { | ||
1856 | atl2_down(adapter); | ||
1857 | atl2_up(adapter); | ||
1858 | } else | ||
1859 | atl2_reset_hw(&adapter->hw); | ||
1860 | |||
1861 | clear_bit(__ATL2_RESETTING, &adapter->flags); | ||
1862 | return 0; | ||
1863 | } | ||
1864 | |||
1865 | static u32 atl2_get_tx_csum(struct net_device *netdev) | ||
1866 | { | ||
1867 | return (netdev->features & NETIF_F_HW_CSUM) != 0; | ||
1868 | } | ||
1869 | |||
1870 | static u32 atl2_get_msglevel(struct net_device *netdev) | ||
1871 | { | ||
1872 | return 0; | ||
1873 | } | ||
1874 | |||
1875 | /* | ||
1876 | * It's sane for this to be empty, but we might want to take advantage of this. | ||
1877 | */ | ||
1878 | static void atl2_set_msglevel(struct net_device *netdev, u32 data) | ||
1879 | { | ||
1880 | } | ||
1881 | |||
1882 | static int atl2_get_regs_len(struct net_device *netdev) | ||
1883 | { | ||
1884 | #define ATL2_REGS_LEN 42 | ||
1885 | return sizeof(u32) * ATL2_REGS_LEN; | ||
1886 | } | ||
1887 | |||
1888 | static void atl2_get_regs(struct net_device *netdev, | ||
1889 | struct ethtool_regs *regs, void *p) | ||
1890 | { | ||
1891 | struct atl2_adapter *adapter = netdev_priv(netdev); | ||
1892 | struct atl2_hw *hw = &adapter->hw; | ||
1893 | u32 *regs_buff = p; | ||
1894 | u16 phy_data; | ||
1895 | |||
1896 | memset(p, 0, sizeof(u32) * ATL2_REGS_LEN); | ||
1897 | |||
1898 | regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; | ||
1899 | |||
1900 | regs_buff[0] = ATL2_READ_REG(hw, REG_VPD_CAP); | ||
1901 | regs_buff[1] = ATL2_READ_REG(hw, REG_SPI_FLASH_CTRL); | ||
1902 | regs_buff[2] = ATL2_READ_REG(hw, REG_SPI_FLASH_CONFIG); | ||
1903 | regs_buff[3] = ATL2_READ_REG(hw, REG_TWSI_CTRL); | ||
1904 | regs_buff[4] = ATL2_READ_REG(hw, REG_PCIE_DEV_MISC_CTRL); | ||
1905 | regs_buff[5] = ATL2_READ_REG(hw, REG_MASTER_CTRL); | ||
1906 | regs_buff[6] = ATL2_READ_REG(hw, REG_MANUAL_TIMER_INIT); | ||
1907 | regs_buff[7] = ATL2_READ_REG(hw, REG_IRQ_MODU_TIMER_INIT); | ||
1908 | regs_buff[8] = ATL2_READ_REG(hw, REG_PHY_ENABLE); | ||
1909 | regs_buff[9] = ATL2_READ_REG(hw, REG_CMBDISDMA_TIMER); | ||
1910 | regs_buff[10] = ATL2_READ_REG(hw, REG_IDLE_STATUS); | ||
1911 | regs_buff[11] = ATL2_READ_REG(hw, REG_MDIO_CTRL); | ||
1912 | regs_buff[12] = ATL2_READ_REG(hw, REG_SERDES_LOCK); | ||
1913 | regs_buff[13] = ATL2_READ_REG(hw, REG_MAC_CTRL); | ||
1914 | regs_buff[14] = ATL2_READ_REG(hw, REG_MAC_IPG_IFG); | ||
1915 | regs_buff[15] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR); | ||
1916 | regs_buff[16] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR+4); | ||
1917 | regs_buff[17] = ATL2_READ_REG(hw, REG_RX_HASH_TABLE); | ||
1918 | regs_buff[18] = ATL2_READ_REG(hw, REG_RX_HASH_TABLE+4); | ||
1919 | regs_buff[19] = ATL2_READ_REG(hw, REG_MAC_HALF_DUPLX_CTRL); | ||
1920 | regs_buff[20] = ATL2_READ_REG(hw, REG_MTU); | ||
1921 | regs_buff[21] = ATL2_READ_REG(hw, REG_WOL_CTRL); | ||
1922 | regs_buff[22] = ATL2_READ_REG(hw, REG_SRAM_TXRAM_END); | ||
1923 | regs_buff[23] = ATL2_READ_REG(hw, REG_DESC_BASE_ADDR_HI); | ||
1924 | regs_buff[24] = ATL2_READ_REG(hw, REG_TXD_BASE_ADDR_LO); | ||
1925 | regs_buff[25] = ATL2_READ_REG(hw, REG_TXD_MEM_SIZE); | ||
1926 | regs_buff[26] = ATL2_READ_REG(hw, REG_TXS_BASE_ADDR_LO); | ||
1927 | regs_buff[27] = ATL2_READ_REG(hw, REG_TXS_MEM_SIZE); | ||
1928 | regs_buff[28] = ATL2_READ_REG(hw, REG_RXD_BASE_ADDR_LO); | ||
1929 | regs_buff[29] = ATL2_READ_REG(hw, REG_RXD_BUF_NUM); | ||
1930 | regs_buff[30] = ATL2_READ_REG(hw, REG_DMAR); | ||
1931 | regs_buff[31] = ATL2_READ_REG(hw, REG_TX_CUT_THRESH); | ||
1932 | regs_buff[32] = ATL2_READ_REG(hw, REG_DMAW); | ||
1933 | regs_buff[33] = ATL2_READ_REG(hw, REG_PAUSE_ON_TH); | ||
1934 | regs_buff[34] = ATL2_READ_REG(hw, REG_PAUSE_OFF_TH); | ||
1935 | regs_buff[35] = ATL2_READ_REG(hw, REG_MB_TXD_WR_IDX); | ||
1936 | regs_buff[36] = ATL2_READ_REG(hw, REG_MB_RXD_RD_IDX); | ||
1937 | regs_buff[38] = ATL2_READ_REG(hw, REG_ISR); | ||
1938 | regs_buff[39] = ATL2_READ_REG(hw, REG_IMR); | ||
1939 | |||
1940 | atl2_read_phy_reg(hw, MII_BMCR, &phy_data); | ||
1941 | regs_buff[40] = (u32)phy_data; | ||
1942 | atl2_read_phy_reg(hw, MII_BMSR, &phy_data); | ||
1943 | regs_buff[41] = (u32)phy_data; | ||
1944 | } | ||
1945 | |||
1946 | static int atl2_get_eeprom_len(struct net_device *netdev) | ||
1947 | { | ||
1948 | struct atl2_adapter *adapter = netdev_priv(netdev); | ||
1949 | |||
1950 | if (!atl2_check_eeprom_exist(&adapter->hw)) | ||
1951 | return 512; | ||
1952 | else | ||
1953 | return 0; | ||
1954 | } | ||
1955 | |||
1956 | static int atl2_get_eeprom(struct net_device *netdev, | ||
1957 | struct ethtool_eeprom *eeprom, u8 *bytes) | ||
1958 | { | ||
1959 | struct atl2_adapter *adapter = netdev_priv(netdev); | ||
1960 | struct atl2_hw *hw = &adapter->hw; | ||
1961 | u32 *eeprom_buff; | ||
1962 | int first_dword, last_dword; | ||
1963 | int ret_val = 0; | ||
1964 | int i; | ||
1965 | |||
1966 | if (eeprom->len == 0) | ||
1967 | return -EINVAL; | ||
1968 | |||
1969 | if (atl2_check_eeprom_exist(hw)) | ||
1970 | return -EINVAL; | ||
1971 | |||
1972 | eeprom->magic = hw->vendor_id | (hw->device_id << 16); | ||
1973 | |||
1974 | first_dword = eeprom->offset >> 2; | ||
1975 | last_dword = (eeprom->offset + eeprom->len - 1) >> 2; | ||
1976 | |||
1977 | eeprom_buff = kmalloc(sizeof(u32) * (last_dword - first_dword + 1), | ||
1978 | GFP_KERNEL); | ||
1979 | if (!eeprom_buff) | ||
1980 | return -ENOMEM; | ||
1981 | |||
1982 | for (i = first_dword; i < last_dword; i++) { | ||
1983 | if (!atl2_read_eeprom(hw, i*4, &(eeprom_buff[i-first_dword]))) | ||
1984 | return -EIO; | ||
1985 | } | ||
1986 | |||
1987 | memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 3), | ||
1988 | eeprom->len); | ||
1989 | kfree(eeprom_buff); | ||
1990 | |||
1991 | return ret_val; | ||
1992 | } | ||
1993 | |||
1994 | static int atl2_set_eeprom(struct net_device *netdev, | ||
1995 | struct ethtool_eeprom *eeprom, u8 *bytes) | ||
1996 | { | ||
1997 | struct atl2_adapter *adapter = netdev_priv(netdev); | ||
1998 | struct atl2_hw *hw = &adapter->hw; | ||
1999 | u32 *eeprom_buff; | ||
2000 | u32 *ptr; | ||
2001 | int max_len, first_dword, last_dword, ret_val = 0; | ||
2002 | int i; | ||
2003 | |||
2004 | if (eeprom->len == 0) | ||
2005 | return -EOPNOTSUPP; | ||
2006 | |||
2007 | if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) | ||
2008 | return -EFAULT; | ||
2009 | |||
2010 | max_len = 512; | ||
2011 | |||
2012 | first_dword = eeprom->offset >> 2; | ||
2013 | last_dword = (eeprom->offset + eeprom->len - 1) >> 2; | ||
2014 | eeprom_buff = kmalloc(max_len, GFP_KERNEL); | ||
2015 | if (!eeprom_buff) | ||
2016 | return -ENOMEM; | ||
2017 | |||
2018 | ptr = (u32 *)eeprom_buff; | ||
2019 | |||
2020 | if (eeprom->offset & 3) { | ||
2021 | /* need read/modify/write of first changed EEPROM word */ | ||
2022 | /* only the second byte of the word is being modified */ | ||
2023 | if (!atl2_read_eeprom(hw, first_dword*4, &(eeprom_buff[0]))) | ||
2024 | return -EIO; | ||
2025 | ptr++; | ||
2026 | } | ||
2027 | if (((eeprom->offset + eeprom->len) & 3)) { | ||
2028 | /* | ||
2029 | * need read/modify/write of last changed EEPROM word | ||
2030 | * only the first byte of the word is being modified | ||
2031 | */ | ||
2032 | if (!atl2_read_eeprom(hw, last_dword * 4, | ||
2033 | &(eeprom_buff[last_dword - first_dword]))) | ||
2034 | return -EIO; | ||
2035 | } | ||
2036 | |||
2037 | /* Device's eeprom is always little-endian, word addressable */ | ||
2038 | memcpy(ptr, bytes, eeprom->len); | ||
2039 | |||
2040 | for (i = 0; i < last_dword - first_dword + 1; i++) { | ||
2041 | if (!atl2_write_eeprom(hw, ((first_dword+i)*4), eeprom_buff[i])) | ||
2042 | return -EIO; | ||
2043 | } | ||
2044 | |||
2045 | kfree(eeprom_buff); | ||
2046 | return ret_val; | ||
2047 | } | ||
2048 | |||
2049 | static void atl2_get_drvinfo(struct net_device *netdev, | ||
2050 | struct ethtool_drvinfo *drvinfo) | ||
2051 | { | ||
2052 | struct atl2_adapter *adapter = netdev_priv(netdev); | ||
2053 | |||
2054 | strncpy(drvinfo->driver, atl2_driver_name, 32); | ||
2055 | strncpy(drvinfo->version, atl2_driver_version, 32); | ||
2056 | strncpy(drvinfo->fw_version, "L2", 32); | ||
2057 | strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); | ||
2058 | drvinfo->n_stats = 0; | ||
2059 | drvinfo->testinfo_len = 0; | ||
2060 | drvinfo->regdump_len = atl2_get_regs_len(netdev); | ||
2061 | drvinfo->eedump_len = atl2_get_eeprom_len(netdev); | ||
2062 | } | ||
2063 | |||
2064 | static void atl2_get_wol(struct net_device *netdev, | ||
2065 | struct ethtool_wolinfo *wol) | ||
2066 | { | ||
2067 | struct atl2_adapter *adapter = netdev_priv(netdev); | ||
2068 | |||
2069 | wol->supported = WAKE_MAGIC; | ||
2070 | wol->wolopts = 0; | ||
2071 | |||
2072 | if (adapter->wol & ATLX_WUFC_EX) | ||
2073 | wol->wolopts |= WAKE_UCAST; | ||
2074 | if (adapter->wol & ATLX_WUFC_MC) | ||
2075 | wol->wolopts |= WAKE_MCAST; | ||
2076 | if (adapter->wol & ATLX_WUFC_BC) | ||
2077 | wol->wolopts |= WAKE_BCAST; | ||
2078 | if (adapter->wol & ATLX_WUFC_MAG) | ||
2079 | wol->wolopts |= WAKE_MAGIC; | ||
2080 | if (adapter->wol & ATLX_WUFC_LNKC) | ||
2081 | wol->wolopts |= WAKE_PHY; | ||
2082 | } | ||
2083 | |||
2084 | static int atl2_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | ||
2085 | { | ||
2086 | struct atl2_adapter *adapter = netdev_priv(netdev); | ||
2087 | |||
2088 | if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE)) | ||
2089 | return -EOPNOTSUPP; | ||
2090 | |||
2091 | if (wol->wolopts & (WAKE_MCAST|WAKE_BCAST|WAKE_MCAST)) | ||
2092 | return -EOPNOTSUPP; | ||
2093 | |||
2094 | /* these settings will always override what we currently have */ | ||
2095 | adapter->wol = 0; | ||
2096 | |||
2097 | if (wol->wolopts & WAKE_MAGIC) | ||
2098 | adapter->wol |= ATLX_WUFC_MAG; | ||
2099 | if (wol->wolopts & WAKE_PHY) | ||
2100 | adapter->wol |= ATLX_WUFC_LNKC; | ||
2101 | |||
2102 | return 0; | ||
2103 | } | ||
2104 | |||
2105 | static int atl2_nway_reset(struct net_device *netdev) | ||
2106 | { | ||
2107 | struct atl2_adapter *adapter = netdev_priv(netdev); | ||
2108 | if (netif_running(netdev)) | ||
2109 | atl2_reinit_locked(adapter); | ||
2110 | return 0; | ||
2111 | } | ||
2112 | |||
2113 | static struct ethtool_ops atl2_ethtool_ops = { | ||
2114 | .get_settings = atl2_get_settings, | ||
2115 | .set_settings = atl2_set_settings, | ||
2116 | .get_drvinfo = atl2_get_drvinfo, | ||
2117 | .get_regs_len = atl2_get_regs_len, | ||
2118 | .get_regs = atl2_get_regs, | ||
2119 | .get_wol = atl2_get_wol, | ||
2120 | .set_wol = atl2_set_wol, | ||
2121 | .get_msglevel = atl2_get_msglevel, | ||
2122 | .set_msglevel = atl2_set_msglevel, | ||
2123 | .nway_reset = atl2_nway_reset, | ||
2124 | .get_link = ethtool_op_get_link, | ||
2125 | .get_eeprom_len = atl2_get_eeprom_len, | ||
2126 | .get_eeprom = atl2_get_eeprom, | ||
2127 | .set_eeprom = atl2_set_eeprom, | ||
2128 | .get_tx_csum = atl2_get_tx_csum, | ||
2129 | .get_sg = ethtool_op_get_sg, | ||
2130 | .set_sg = ethtool_op_set_sg, | ||
2131 | #ifdef NETIF_F_TSO | ||
2132 | .get_tso = ethtool_op_get_tso, | ||
2133 | #endif | ||
2134 | }; | ||
2135 | |||
2136 | static void atl2_set_ethtool_ops(struct net_device *netdev) | ||
2137 | { | ||
2138 | SET_ETHTOOL_OPS(netdev, &atl2_ethtool_ops); | ||
2139 | } | ||
2140 | |||
2141 | #define LBYTESWAP(a) ((((a) & 0x00ff00ff) << 8) | \ | ||
2142 | (((a) & 0xff00ff00) >> 8)) | ||
2143 | #define LONGSWAP(a) ((LBYTESWAP(a) << 16) | (LBYTESWAP(a) >> 16)) | ||
2144 | #define SHORTSWAP(a) (((a) << 8) | ((a) >> 8)) | ||
2145 | |||
2146 | /* | ||
2147 | * Reset the transmit and receive units; mask and clear all interrupts. | ||
2148 | * | ||
2149 | * hw - Struct containing variables accessed by shared code | ||
2150 | * return : 0 or idle status (if error) | ||
2151 | */ | ||
2152 | static s32 atl2_reset_hw(struct atl2_hw *hw) | ||
2153 | { | ||
2154 | u32 icr; | ||
2155 | u16 pci_cfg_cmd_word; | ||
2156 | int i; | ||
2157 | |||
2158 | /* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */ | ||
2159 | atl2_read_pci_cfg(hw, PCI_REG_COMMAND, &pci_cfg_cmd_word); | ||
2160 | if ((pci_cfg_cmd_word & | ||
2161 | (CMD_IO_SPACE|CMD_MEMORY_SPACE|CMD_BUS_MASTER)) != | ||
2162 | (CMD_IO_SPACE|CMD_MEMORY_SPACE|CMD_BUS_MASTER)) { | ||
2163 | pci_cfg_cmd_word |= | ||
2164 | (CMD_IO_SPACE|CMD_MEMORY_SPACE|CMD_BUS_MASTER); | ||
2165 | atl2_write_pci_cfg(hw, PCI_REG_COMMAND, &pci_cfg_cmd_word); | ||
2166 | } | ||
2167 | |||
2168 | /* Clear Interrupt mask to stop board from generating | ||
2169 | * interrupts & Clear any pending interrupt events | ||
2170 | */ | ||
2171 | /* FIXME */ | ||
2172 | /* ATL2_WRITE_REG(hw, REG_IMR, 0); */ | ||
2173 | /* ATL2_WRITE_REG(hw, REG_ISR, 0xffffffff); */ | ||
2174 | |||
2175 | /* Issue Soft Reset to the MAC. This will reset the chip's | ||
2176 | * transmit, receive, DMA. It will not effect | ||
2177 | * the current PCI configuration. The global reset bit is self- | ||
2178 | * clearing, and should clear within a microsecond. | ||
2179 | */ | ||
2180 | ATL2_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_SOFT_RST); | ||
2181 | wmb(); | ||
2182 | msleep(1); /* delay about 1ms */ | ||
2183 | |||
2184 | /* Wait at least 10ms for All module to be Idle */ | ||
2185 | for (i = 0; i < 10; i++) { | ||
2186 | icr = ATL2_READ_REG(hw, REG_IDLE_STATUS); | ||
2187 | if (!icr) | ||
2188 | break; | ||
2189 | msleep(1); /* delay 1 ms */ | ||
2190 | cpu_relax(); | ||
2191 | } | ||
2192 | |||
2193 | if (icr) | ||
2194 | return icr; | ||
2195 | |||
2196 | return 0; | ||
2197 | } | ||
2198 | |||
2199 | #define CUSTOM_SPI_CS_SETUP 2 | ||
2200 | #define CUSTOM_SPI_CLK_HI 2 | ||
2201 | #define CUSTOM_SPI_CLK_LO 2 | ||
2202 | #define CUSTOM_SPI_CS_HOLD 2 | ||
2203 | #define CUSTOM_SPI_CS_HI 3 | ||
2204 | |||
2205 | static struct atl2_spi_flash_dev flash_table[] = | ||
2206 | { | ||
2207 | /* MFR WRSR READ PROGRAM WREN WRDI RDSR RDID SECTOR_ERASE CHIP_ERASE */ | ||
2208 | {"Atmel", 0x0, 0x03, 0x02, 0x06, 0x04, 0x05, 0x15, 0x52, 0x62 }, | ||
2209 | {"SST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0x90, 0x20, 0x60 }, | ||
2210 | {"ST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0xAB, 0xD8, 0xC7 }, | ||
2211 | }; | ||
2212 | |||
2213 | static bool atl2_spi_read(struct atl2_hw *hw, u32 addr, u32 *buf) | ||
2214 | { | ||
2215 | int i; | ||
2216 | u32 value; | ||
2217 | |||
2218 | ATL2_WRITE_REG(hw, REG_SPI_DATA, 0); | ||
2219 | ATL2_WRITE_REG(hw, REG_SPI_ADDR, addr); | ||
2220 | |||
2221 | value = SPI_FLASH_CTRL_WAIT_READY | | ||
2222 | (CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) << | ||
2223 | SPI_FLASH_CTRL_CS_SETUP_SHIFT | | ||
2224 | (CUSTOM_SPI_CLK_HI & SPI_FLASH_CTRL_CLK_HI_MASK) << | ||
2225 | SPI_FLASH_CTRL_CLK_HI_SHIFT | | ||
2226 | (CUSTOM_SPI_CLK_LO & SPI_FLASH_CTRL_CLK_LO_MASK) << | ||
2227 | SPI_FLASH_CTRL_CLK_LO_SHIFT | | ||
2228 | (CUSTOM_SPI_CS_HOLD & SPI_FLASH_CTRL_CS_HOLD_MASK) << | ||
2229 | SPI_FLASH_CTRL_CS_HOLD_SHIFT | | ||
2230 | (CUSTOM_SPI_CS_HI & SPI_FLASH_CTRL_CS_HI_MASK) << | ||
2231 | SPI_FLASH_CTRL_CS_HI_SHIFT | | ||
2232 | (0x1 & SPI_FLASH_CTRL_INS_MASK) << SPI_FLASH_CTRL_INS_SHIFT; | ||
2233 | |||
2234 | ATL2_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value); | ||
2235 | |||
2236 | value |= SPI_FLASH_CTRL_START; | ||
2237 | |||
2238 | ATL2_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value); | ||
2239 | |||
2240 | for (i = 0; i < 10; i++) { | ||
2241 | msleep(1); | ||
2242 | value = ATL2_READ_REG(hw, REG_SPI_FLASH_CTRL); | ||
2243 | if (!(value & SPI_FLASH_CTRL_START)) | ||
2244 | break; | ||
2245 | } | ||
2246 | |||
2247 | if (value & SPI_FLASH_CTRL_START) | ||
2248 | return false; | ||
2249 | |||
2250 | *buf = ATL2_READ_REG(hw, REG_SPI_DATA); | ||
2251 | |||
2252 | return true; | ||
2253 | } | ||
2254 | |||
2255 | /* | ||
2256 | * get_permanent_address | ||
2257 | * return 0 if get valid mac address, | ||
2258 | */ | ||
2259 | static int get_permanent_address(struct atl2_hw *hw) | ||
2260 | { | ||
2261 | u32 Addr[2]; | ||
2262 | u32 i, Control; | ||
2263 | u16 Register; | ||
2264 | u8 EthAddr[NODE_ADDRESS_SIZE]; | ||
2265 | bool KeyValid; | ||
2266 | |||
2267 | if (is_valid_ether_addr(hw->perm_mac_addr)) | ||
2268 | return 0; | ||
2269 | |||
2270 | Addr[0] = 0; | ||
2271 | Addr[1] = 0; | ||
2272 | |||
2273 | if (!atl2_check_eeprom_exist(hw)) { /* eeprom exists */ | ||
2274 | Register = 0; | ||
2275 | KeyValid = false; | ||
2276 | |||
2277 | /* Read out all EEPROM content */ | ||
2278 | i = 0; | ||
2279 | while (1) { | ||
2280 | if (atl2_read_eeprom(hw, i + 0x100, &Control)) { | ||
2281 | if (KeyValid) { | ||
2282 | if (Register == REG_MAC_STA_ADDR) | ||
2283 | Addr[0] = Control; | ||
2284 | else if (Register == | ||
2285 | (REG_MAC_STA_ADDR + 4)) | ||
2286 | Addr[1] = Control; | ||
2287 | KeyValid = false; | ||
2288 | } else if ((Control & 0xff) == 0x5A) { | ||
2289 | KeyValid = true; | ||
2290 | Register = (u16) (Control >> 16); | ||
2291 | } else { | ||
2292 | /* assume data end while encount an invalid KEYWORD */ | ||
2293 | break; | ||
2294 | } | ||
2295 | } else { | ||
2296 | break; /* read error */ | ||
2297 | } | ||
2298 | i += 4; | ||
2299 | } | ||
2300 | |||
2301 | *(u32 *) &EthAddr[2] = LONGSWAP(Addr[0]); | ||
2302 | *(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *) &Addr[1]); | ||
2303 | |||
2304 | if (is_valid_ether_addr(EthAddr)) { | ||
2305 | memcpy(hw->perm_mac_addr, EthAddr, NODE_ADDRESS_SIZE); | ||
2306 | return 0; | ||
2307 | } | ||
2308 | return 1; | ||
2309 | } | ||
2310 | |||
2311 | /* see if SPI flash exists? */ | ||
2312 | Addr[0] = 0; | ||
2313 | Addr[1] = 0; | ||
2314 | Register = 0; | ||
2315 | KeyValid = false; | ||
2316 | i = 0; | ||
2317 | while (1) { | ||
2318 | if (atl2_spi_read(hw, i + 0x1f000, &Control)) { | ||
2319 | if (KeyValid) { | ||
2320 | if (Register == REG_MAC_STA_ADDR) | ||
2321 | Addr[0] = Control; | ||
2322 | else if (Register == (REG_MAC_STA_ADDR + 4)) | ||
2323 | Addr[1] = Control; | ||
2324 | KeyValid = false; | ||
2325 | } else if ((Control & 0xff) == 0x5A) { | ||
2326 | KeyValid = true; | ||
2327 | Register = (u16) (Control >> 16); | ||
2328 | } else { | ||
2329 | break; /* data end */ | ||
2330 | } | ||
2331 | } else { | ||
2332 | break; /* read error */ | ||
2333 | } | ||
2334 | i += 4; | ||
2335 | } | ||
2336 | |||
2337 | *(u32 *) &EthAddr[2] = LONGSWAP(Addr[0]); | ||
2338 | *(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *)&Addr[1]); | ||
2339 | if (is_valid_ether_addr(EthAddr)) { | ||
2340 | memcpy(hw->perm_mac_addr, EthAddr, NODE_ADDRESS_SIZE); | ||
2341 | return 0; | ||
2342 | } | ||
2343 | /* maybe MAC-address is from BIOS */ | ||
2344 | Addr[0] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR); | ||
2345 | Addr[1] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR + 4); | ||
2346 | *(u32 *) &EthAddr[2] = LONGSWAP(Addr[0]); | ||
2347 | *(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *) &Addr[1]); | ||
2348 | |||
2349 | if (is_valid_ether_addr(EthAddr)) { | ||
2350 | memcpy(hw->perm_mac_addr, EthAddr, NODE_ADDRESS_SIZE); | ||
2351 | return 0; | ||
2352 | } | ||
2353 | |||
2354 | return 1; | ||
2355 | } | ||
2356 | |||
2357 | /* | ||
2358 | * Reads the adapter's MAC address from the EEPROM | ||
2359 | * | ||
2360 | * hw - Struct containing variables accessed by shared code | ||
2361 | */ | ||
2362 | static s32 atl2_read_mac_addr(struct atl2_hw *hw) | ||
2363 | { | ||
2364 | u16 i; | ||
2365 | |||
2366 | if (get_permanent_address(hw)) { | ||
2367 | /* for test */ | ||
2368 | /* FIXME: shouldn't we use random_ether_addr() here? */ | ||
2369 | hw->perm_mac_addr[0] = 0x00; | ||
2370 | hw->perm_mac_addr[1] = 0x13; | ||
2371 | hw->perm_mac_addr[2] = 0x74; | ||
2372 | hw->perm_mac_addr[3] = 0x00; | ||
2373 | hw->perm_mac_addr[4] = 0x5c; | ||
2374 | hw->perm_mac_addr[5] = 0x38; | ||
2375 | } | ||
2376 | |||
2377 | for (i = 0; i < NODE_ADDRESS_SIZE; i++) | ||
2378 | hw->mac_addr[i] = hw->perm_mac_addr[i]; | ||
2379 | |||
2380 | return 0; | ||
2381 | } | ||
2382 | |||
2383 | /* | ||
2384 | * Hashes an address to determine its location in the multicast table | ||
2385 | * | ||
2386 | * hw - Struct containing variables accessed by shared code | ||
2387 | * mc_addr - the multicast address to hash | ||
2388 | * | ||
2389 | * atl2_hash_mc_addr | ||
2390 | * purpose | ||
2391 | * set hash value for a multicast address | ||
2392 | * hash calcu processing : | ||
2393 | * 1. calcu 32bit CRC for multicast address | ||
2394 | * 2. reverse crc with MSB to LSB | ||
2395 | */ | ||
2396 | static u32 atl2_hash_mc_addr(struct atl2_hw *hw, u8 *mc_addr) | ||
2397 | { | ||
2398 | u32 crc32, value; | ||
2399 | int i; | ||
2400 | |||
2401 | value = 0; | ||
2402 | crc32 = ether_crc_le(6, mc_addr); | ||
2403 | |||
2404 | for (i = 0; i < 32; i++) | ||
2405 | value |= (((crc32 >> i) & 1) << (31 - i)); | ||
2406 | |||
2407 | return value; | ||
2408 | } | ||
2409 | |||
2410 | /* | ||
2411 | * Sets the bit in the multicast table corresponding to the hash value. | ||
2412 | * | ||
2413 | * hw - Struct containing variables accessed by shared code | ||
2414 | * hash_value - Multicast address hash value | ||
2415 | */ | ||
2416 | static void atl2_hash_set(struct atl2_hw *hw, u32 hash_value) | ||
2417 | { | ||
2418 | u32 hash_bit, hash_reg; | ||
2419 | u32 mta; | ||
2420 | |||
2421 | /* The HASH Table is a register array of 2 32-bit registers. | ||
2422 | * It is treated like an array of 64 bits. We want to set | ||
2423 | * bit BitArray[hash_value]. So we figure out what register | ||
2424 | * the bit is in, read it, OR in the new bit, then write | ||
2425 | * back the new value. The register is determined by the | ||
2426 | * upper 7 bits of the hash value and the bit within that | ||
2427 | * register are determined by the lower 5 bits of the value. | ||
2428 | */ | ||
2429 | hash_reg = (hash_value >> 31) & 0x1; | ||
2430 | hash_bit = (hash_value >> 26) & 0x1F; | ||
2431 | |||
2432 | mta = ATL2_READ_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg); | ||
2433 | |||
2434 | mta |= (1 << hash_bit); | ||
2435 | |||
2436 | ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg, mta); | ||
2437 | } | ||
2438 | |||
2439 | /* | ||
2440 | * atl2_init_pcie - init PCIE module | ||
2441 | */ | ||
2442 | static void atl2_init_pcie(struct atl2_hw *hw) | ||
2443 | { | ||
2444 | u32 value; | ||
2445 | value = LTSSM_TEST_MODE_DEF; | ||
2446 | ATL2_WRITE_REG(hw, REG_LTSSM_TEST_MODE, value); | ||
2447 | |||
2448 | value = PCIE_DLL_TX_CTRL1_DEF; | ||
2449 | ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, value); | ||
2450 | } | ||
2451 | |||
2452 | static void atl2_init_flash_opcode(struct atl2_hw *hw) | ||
2453 | { | ||
2454 | if (hw->flash_vendor >= ARRAY_SIZE(flash_table)) | ||
2455 | hw->flash_vendor = 0; /* ATMEL */ | ||
2456 | |||
2457 | /* Init OP table */ | ||
2458 | ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_PROGRAM, | ||
2459 | flash_table[hw->flash_vendor].cmdPROGRAM); | ||
2460 | ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_SC_ERASE, | ||
2461 | flash_table[hw->flash_vendor].cmdSECTOR_ERASE); | ||
2462 | ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_CHIP_ERASE, | ||
2463 | flash_table[hw->flash_vendor].cmdCHIP_ERASE); | ||
2464 | ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_RDID, | ||
2465 | flash_table[hw->flash_vendor].cmdRDID); | ||
2466 | ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_WREN, | ||
2467 | flash_table[hw->flash_vendor].cmdWREN); | ||
2468 | ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_RDSR, | ||
2469 | flash_table[hw->flash_vendor].cmdRDSR); | ||
2470 | ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_WRSR, | ||
2471 | flash_table[hw->flash_vendor].cmdWRSR); | ||
2472 | ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_READ, | ||
2473 | flash_table[hw->flash_vendor].cmdREAD); | ||
2474 | } | ||
2475 | |||
2476 | /******************************************************************** | ||
2477 | * Performs basic configuration of the adapter. | ||
2478 | * | ||
2479 | * hw - Struct containing variables accessed by shared code | ||
2480 | * Assumes that the controller has previously been reset and is in a | ||
2481 | * post-reset uninitialized state. Initializes multicast table, | ||
2482 | * and Calls routines to setup link | ||
2483 | * Leaves the transmit and receive units disabled and uninitialized. | ||
2484 | ********************************************************************/ | ||
2485 | static s32 atl2_init_hw(struct atl2_hw *hw) | ||
2486 | { | ||
2487 | u32 ret_val = 0; | ||
2488 | |||
2489 | atl2_init_pcie(hw); | ||
2490 | |||
2491 | /* Zero out the Multicast HASH table */ | ||
2492 | /* clear the old settings from the multicast hash table */ | ||
2493 | ATL2_WRITE_REG(hw, REG_RX_HASH_TABLE, 0); | ||
2494 | ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0); | ||
2495 | |||
2496 | atl2_init_flash_opcode(hw); | ||
2497 | |||
2498 | ret_val = atl2_phy_init(hw); | ||
2499 | |||
2500 | return ret_val; | ||
2501 | } | ||
2502 | |||
2503 | /* | ||
2504 | * Detects the current speed and duplex settings of the hardware. | ||
2505 | * | ||
2506 | * hw - Struct containing variables accessed by shared code | ||
2507 | * speed - Speed of the connection | ||
2508 | * duplex - Duplex setting of the connection | ||
2509 | */ | ||
2510 | static s32 atl2_get_speed_and_duplex(struct atl2_hw *hw, u16 *speed, | ||
2511 | u16 *duplex) | ||
2512 | { | ||
2513 | s32 ret_val; | ||
2514 | u16 phy_data; | ||
2515 | |||
2516 | /* Read PHY Specific Status Register (17) */ | ||
2517 | ret_val = atl2_read_phy_reg(hw, MII_ATLX_PSSR, &phy_data); | ||
2518 | if (ret_val) | ||
2519 | return ret_val; | ||
2520 | |||
2521 | if (!(phy_data & MII_ATLX_PSSR_SPD_DPLX_RESOLVED)) | ||
2522 | return ATLX_ERR_PHY_RES; | ||
2523 | |||
2524 | switch (phy_data & MII_ATLX_PSSR_SPEED) { | ||
2525 | case MII_ATLX_PSSR_100MBS: | ||
2526 | *speed = SPEED_100; | ||
2527 | break; | ||
2528 | case MII_ATLX_PSSR_10MBS: | ||
2529 | *speed = SPEED_10; | ||
2530 | break; | ||
2531 | default: | ||
2532 | return ATLX_ERR_PHY_SPEED; | ||
2533 | break; | ||
2534 | } | ||
2535 | |||
2536 | if (phy_data & MII_ATLX_PSSR_DPLX) | ||
2537 | *duplex = FULL_DUPLEX; | ||
2538 | else | ||
2539 | *duplex = HALF_DUPLEX; | ||
2540 | |||
2541 | return 0; | ||
2542 | } | ||
2543 | |||
2544 | /* | ||
2545 | * Reads the value from a PHY register | ||
2546 | * hw - Struct containing variables accessed by shared code | ||
2547 | * reg_addr - address of the PHY register to read | ||
2548 | */ | ||
2549 | static s32 atl2_read_phy_reg(struct atl2_hw *hw, u16 reg_addr, u16 *phy_data) | ||
2550 | { | ||
2551 | u32 val; | ||
2552 | int i; | ||
2553 | |||
2554 | val = ((u32)(reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT | | ||
2555 | MDIO_START | | ||
2556 | MDIO_SUP_PREAMBLE | | ||
2557 | MDIO_RW | | ||
2558 | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; | ||
2559 | ATL2_WRITE_REG(hw, REG_MDIO_CTRL, val); | ||
2560 | |||
2561 | wmb(); | ||
2562 | |||
2563 | for (i = 0; i < MDIO_WAIT_TIMES; i++) { | ||
2564 | udelay(2); | ||
2565 | val = ATL2_READ_REG(hw, REG_MDIO_CTRL); | ||
2566 | if (!(val & (MDIO_START | MDIO_BUSY))) | ||
2567 | break; | ||
2568 | wmb(); | ||
2569 | } | ||
2570 | if (!(val & (MDIO_START | MDIO_BUSY))) { | ||
2571 | *phy_data = (u16)val; | ||
2572 | return 0; | ||
2573 | } | ||
2574 | |||
2575 | return ATLX_ERR_PHY; | ||
2576 | } | ||
2577 | |||
2578 | /* | ||
2579 | * Writes a value to a PHY register | ||
2580 | * hw - Struct containing variables accessed by shared code | ||
2581 | * reg_addr - address of the PHY register to write | ||
2582 | * data - data to write to the PHY | ||
2583 | */ | ||
2584 | static s32 atl2_write_phy_reg(struct atl2_hw *hw, u32 reg_addr, u16 phy_data) | ||
2585 | { | ||
2586 | int i; | ||
2587 | u32 val; | ||
2588 | |||
2589 | val = ((u32)(phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT | | ||
2590 | (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT | | ||
2591 | MDIO_SUP_PREAMBLE | | ||
2592 | MDIO_START | | ||
2593 | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; | ||
2594 | ATL2_WRITE_REG(hw, REG_MDIO_CTRL, val); | ||
2595 | |||
2596 | wmb(); | ||
2597 | |||
2598 | for (i = 0; i < MDIO_WAIT_TIMES; i++) { | ||
2599 | udelay(2); | ||
2600 | val = ATL2_READ_REG(hw, REG_MDIO_CTRL); | ||
2601 | if (!(val & (MDIO_START | MDIO_BUSY))) | ||
2602 | break; | ||
2603 | |||
2604 | wmb(); | ||
2605 | } | ||
2606 | |||
2607 | if (!(val & (MDIO_START | MDIO_BUSY))) | ||
2608 | return 0; | ||
2609 | |||
2610 | return ATLX_ERR_PHY; | ||
2611 | } | ||
2612 | |||
2613 | /* | ||
2614 | * Configures PHY autoneg and flow control advertisement settings | ||
2615 | * | ||
2616 | * hw - Struct containing variables accessed by shared code | ||
2617 | */ | ||
2618 | static s32 atl2_phy_setup_autoneg_adv(struct atl2_hw *hw) | ||
2619 | { | ||
2620 | s32 ret_val; | ||
2621 | s16 mii_autoneg_adv_reg; | ||
2622 | |||
2623 | /* Read the MII Auto-Neg Advertisement Register (Address 4). */ | ||
2624 | mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK; | ||
2625 | |||
2626 | /* Need to parse autoneg_advertised and set up | ||
2627 | * the appropriate PHY registers. First we will parse for | ||
2628 | * autoneg_advertised software override. Since we can advertise | ||
2629 | * a plethora of combinations, we need to check each bit | ||
2630 | * individually. | ||
2631 | */ | ||
2632 | |||
2633 | /* First we clear all the 10/100 mb speed bits in the Auto-Neg | ||
2634 | * Advertisement Register (Address 4) and the 1000 mb speed bits in | ||
2635 | * the 1000Base-T Control Register (Address 9). */ | ||
2636 | mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK; | ||
2637 | |||
2638 | /* Need to parse MediaType and setup the | ||
2639 | * appropriate PHY registers. */ | ||
2640 | switch (hw->MediaType) { | ||
2641 | case MEDIA_TYPE_AUTO_SENSOR: | ||
2642 | mii_autoneg_adv_reg |= | ||
2643 | (MII_AR_10T_HD_CAPS | | ||
2644 | MII_AR_10T_FD_CAPS | | ||
2645 | MII_AR_100TX_HD_CAPS| | ||
2646 | MII_AR_100TX_FD_CAPS); | ||
2647 | hw->autoneg_advertised = | ||
2648 | ADVERTISE_10_HALF | | ||
2649 | ADVERTISE_10_FULL | | ||
2650 | ADVERTISE_100_HALF| | ||
2651 | ADVERTISE_100_FULL; | ||
2652 | break; | ||
2653 | case MEDIA_TYPE_100M_FULL: | ||
2654 | mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS; | ||
2655 | hw->autoneg_advertised = ADVERTISE_100_FULL; | ||
2656 | break; | ||
2657 | case MEDIA_TYPE_100M_HALF: | ||
2658 | mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS; | ||
2659 | hw->autoneg_advertised = ADVERTISE_100_HALF; | ||
2660 | break; | ||
2661 | case MEDIA_TYPE_10M_FULL: | ||
2662 | mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS; | ||
2663 | hw->autoneg_advertised = ADVERTISE_10_FULL; | ||
2664 | break; | ||
2665 | default: | ||
2666 | mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS; | ||
2667 | hw->autoneg_advertised = ADVERTISE_10_HALF; | ||
2668 | break; | ||
2669 | } | ||
2670 | |||
2671 | /* flow control fixed to enable all */ | ||
2672 | mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE); | ||
2673 | |||
2674 | hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg; | ||
2675 | |||
2676 | ret_val = atl2_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg); | ||
2677 | |||
2678 | if (ret_val) | ||
2679 | return ret_val; | ||
2680 | |||
2681 | return 0; | ||
2682 | } | ||
2683 | |||
2684 | /* | ||
2685 | * Resets the PHY and make all config validate | ||
2686 | * | ||
2687 | * hw - Struct containing variables accessed by shared code | ||
2688 | * | ||
2689 | * Sets bit 15 and 12 of the MII Control regiser (for F001 bug) | ||
2690 | */ | ||
2691 | static s32 atl2_phy_commit(struct atl2_hw *hw) | ||
2692 | { | ||
2693 | s32 ret_val; | ||
2694 | u16 phy_data; | ||
2695 | |||
2696 | phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG; | ||
2697 | ret_val = atl2_write_phy_reg(hw, MII_BMCR, phy_data); | ||
2698 | if (ret_val) { | ||
2699 | u32 val; | ||
2700 | int i; | ||
2701 | /* pcie serdes link may be down ! */ | ||
2702 | for (i = 0; i < 25; i++) { | ||
2703 | msleep(1); | ||
2704 | val = ATL2_READ_REG(hw, REG_MDIO_CTRL); | ||
2705 | if (!(val & (MDIO_START | MDIO_BUSY))) | ||
2706 | break; | ||
2707 | } | ||
2708 | |||
2709 | if (0 != (val & (MDIO_START | MDIO_BUSY))) { | ||
2710 | printk(KERN_ERR "atl2: PCIe link down for at least 25ms !\n"); | ||
2711 | return ret_val; | ||
2712 | } | ||
2713 | } | ||
2714 | return 0; | ||
2715 | } | ||
2716 | |||
2717 | static s32 atl2_phy_init(struct atl2_hw *hw) | ||
2718 | { | ||
2719 | s32 ret_val; | ||
2720 | u16 phy_val; | ||
2721 | |||
2722 | if (hw->phy_configured) | ||
2723 | return 0; | ||
2724 | |||
2725 | /* Enable PHY */ | ||
2726 | ATL2_WRITE_REGW(hw, REG_PHY_ENABLE, 1); | ||
2727 | ATL2_WRITE_FLUSH(hw); | ||
2728 | msleep(1); | ||
2729 | |||
2730 | /* check if the PHY is in powersaving mode */ | ||
2731 | atl2_write_phy_reg(hw, MII_DBG_ADDR, 0); | ||
2732 | atl2_read_phy_reg(hw, MII_DBG_DATA, &phy_val); | ||
2733 | |||
2734 | /* 024E / 124E 0r 0274 / 1274 ? */ | ||
2735 | if (phy_val & 0x1000) { | ||
2736 | phy_val &= ~0x1000; | ||
2737 | atl2_write_phy_reg(hw, MII_DBG_DATA, phy_val); | ||
2738 | } | ||
2739 | |||
2740 | msleep(1); | ||
2741 | |||
2742 | /*Enable PHY LinkChange Interrupt */ | ||
2743 | ret_val = atl2_write_phy_reg(hw, 18, 0xC00); | ||
2744 | if (ret_val) | ||
2745 | return ret_val; | ||
2746 | |||
2747 | /* setup AutoNeg parameters */ | ||
2748 | ret_val = atl2_phy_setup_autoneg_adv(hw); | ||
2749 | if (ret_val) | ||
2750 | return ret_val; | ||
2751 | |||
2752 | /* SW.Reset & En-Auto-Neg to restart Auto-Neg */ | ||
2753 | ret_val = atl2_phy_commit(hw); | ||
2754 | if (ret_val) | ||
2755 | return ret_val; | ||
2756 | |||
2757 | hw->phy_configured = true; | ||
2758 | |||
2759 | return ret_val; | ||
2760 | } | ||
2761 | |||
2762 | static void atl2_set_mac_addr(struct atl2_hw *hw) | ||
2763 | { | ||
2764 | u32 value; | ||
2765 | /* 00-0B-6A-F6-00-DC | ||
2766 | * 0: 6AF600DC 1: 000B | ||
2767 | * low dword */ | ||
2768 | value = (((u32)hw->mac_addr[2]) << 24) | | ||
2769 | (((u32)hw->mac_addr[3]) << 16) | | ||
2770 | (((u32)hw->mac_addr[4]) << 8) | | ||
2771 | (((u32)hw->mac_addr[5])); | ||
2772 | ATL2_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 0, value); | ||
2773 | /* hight dword */ | ||
2774 | value = (((u32)hw->mac_addr[0]) << 8) | | ||
2775 | (((u32)hw->mac_addr[1])); | ||
2776 | ATL2_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 1, value); | ||
2777 | } | ||
2778 | |||
2779 | /* | ||
2780 | * check_eeprom_exist | ||
2781 | * return 0 if eeprom exist | ||
2782 | */ | ||
2783 | static int atl2_check_eeprom_exist(struct atl2_hw *hw) | ||
2784 | { | ||
2785 | u32 value; | ||
2786 | |||
2787 | value = ATL2_READ_REG(hw, REG_SPI_FLASH_CTRL); | ||
2788 | if (value & SPI_FLASH_CTRL_EN_VPD) { | ||
2789 | value &= ~SPI_FLASH_CTRL_EN_VPD; | ||
2790 | ATL2_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value); | ||
2791 | } | ||
2792 | value = ATL2_READ_REGW(hw, REG_PCIE_CAP_LIST); | ||
2793 | return ((value & 0xFF00) == 0x6C00) ? 0 : 1; | ||
2794 | } | ||
2795 | |||
2796 | /* FIXME: This doesn't look right. -- CHS */ | ||
2797 | static bool atl2_write_eeprom(struct atl2_hw *hw, u32 offset, u32 value) | ||
2798 | { | ||
2799 | return true; | ||
2800 | } | ||
2801 | |||
2802 | static bool atl2_read_eeprom(struct atl2_hw *hw, u32 Offset, u32 *pValue) | ||
2803 | { | ||
2804 | int i; | ||
2805 | u32 Control; | ||
2806 | |||
2807 | if (Offset & 0x3) | ||
2808 | return false; /* address do not align */ | ||
2809 | |||
2810 | ATL2_WRITE_REG(hw, REG_VPD_DATA, 0); | ||
2811 | Control = (Offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT; | ||
2812 | ATL2_WRITE_REG(hw, REG_VPD_CAP, Control); | ||
2813 | |||
2814 | for (i = 0; i < 10; i++) { | ||
2815 | msleep(2); | ||
2816 | Control = ATL2_READ_REG(hw, REG_VPD_CAP); | ||
2817 | if (Control & VPD_CAP_VPD_FLAG) | ||
2818 | break; | ||
2819 | } | ||
2820 | |||
2821 | if (Control & VPD_CAP_VPD_FLAG) { | ||
2822 | *pValue = ATL2_READ_REG(hw, REG_VPD_DATA); | ||
2823 | return true; | ||
2824 | } | ||
2825 | return false; /* timeout */ | ||
2826 | } | ||
2827 | |||
2828 | static void atl2_force_ps(struct atl2_hw *hw) | ||
2829 | { | ||
2830 | u16 phy_val; | ||
2831 | |||
2832 | atl2_write_phy_reg(hw, MII_DBG_ADDR, 0); | ||
2833 | atl2_read_phy_reg(hw, MII_DBG_DATA, &phy_val); | ||
2834 | atl2_write_phy_reg(hw, MII_DBG_DATA, phy_val | 0x1000); | ||
2835 | |||
2836 | atl2_write_phy_reg(hw, MII_DBG_ADDR, 2); | ||
2837 | atl2_write_phy_reg(hw, MII_DBG_DATA, 0x3000); | ||
2838 | atl2_write_phy_reg(hw, MII_DBG_ADDR, 3); | ||
2839 | atl2_write_phy_reg(hw, MII_DBG_DATA, 0); | ||
2840 | } | ||
2841 | |||
2842 | /* This is the only thing that needs to be changed to adjust the | ||
2843 | * maximum number of ports that the driver can manage. | ||
2844 | */ | ||
2845 | #define ATL2_MAX_NIC 4 | ||
2846 | |||
2847 | #define OPTION_UNSET -1 | ||
2848 | #define OPTION_DISABLED 0 | ||
2849 | #define OPTION_ENABLED 1 | ||
2850 | |||
2851 | /* All parameters are treated the same, as an integer array of values. | ||
2852 | * This macro just reduces the need to repeat the same declaration code | ||
2853 | * over and over (plus this helps to avoid typo bugs). | ||
2854 | */ | ||
2855 | #define ATL2_PARAM_INIT {[0 ... ATL2_MAX_NIC] = OPTION_UNSET} | ||
2856 | #ifndef module_param_array | ||
2857 | /* Module Parameters are always initialized to -1, so that the driver | ||
2858 | * can tell the difference between no user specified value or the | ||
2859 | * user asking for the default value. | ||
2860 | * The true default values are loaded in when atl2_check_options is called. | ||
2861 | * | ||
2862 | * This is a GCC extension to ANSI C. | ||
2863 | * See the item "Labeled Elements in Initializers" in the section | ||
2864 | * "Extensions to the C Language Family" of the GCC documentation. | ||
2865 | */ | ||
2866 | |||
2867 | #define ATL2_PARAM(X, desc) \ | ||
2868 | static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \ | ||
2869 | MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \ | ||
2870 | MODULE_PARM_DESC(X, desc); | ||
2871 | #else | ||
2872 | #define ATL2_PARAM(X, desc) \ | ||
2873 | static int __devinitdata X[ATL2_MAX_NIC+1] = ATL2_PARAM_INIT; \ | ||
2874 | static int num_##X = 0; \ | ||
2875 | module_param_array_named(X, X, int, &num_##X, 0); \ | ||
2876 | MODULE_PARM_DESC(X, desc); | ||
2877 | #endif | ||
2878 | |||
2879 | /* | ||
2880 | * Transmit Memory Size | ||
2881 | * Valid Range: 64-2048 | ||
2882 | * Default Value: 128 | ||
2883 | */ | ||
2884 | #define ATL2_MIN_TX_MEMSIZE 4 /* 4KB */ | ||
2885 | #define ATL2_MAX_TX_MEMSIZE 64 /* 64KB */ | ||
2886 | #define ATL2_DEFAULT_TX_MEMSIZE 8 /* 8KB */ | ||
2887 | ATL2_PARAM(TxMemSize, "Bytes of Transmit Memory"); | ||
2888 | |||
2889 | /* | ||
2890 | * Receive Memory Block Count | ||
2891 | * Valid Range: 16-512 | ||
2892 | * Default Value: 128 | ||
2893 | */ | ||
2894 | #define ATL2_MIN_RXD_COUNT 16 | ||
2895 | #define ATL2_MAX_RXD_COUNT 512 | ||
2896 | #define ATL2_DEFAULT_RXD_COUNT 64 | ||
2897 | ATL2_PARAM(RxMemBlock, "Number of receive memory block"); | ||
2898 | |||
2899 | /* | ||
2900 | * User Specified MediaType Override | ||
2901 | * | ||
2902 | * Valid Range: 0-5 | ||
2903 | * - 0 - auto-negotiate at all supported speeds | ||
2904 | * - 1 - only link at 1000Mbps Full Duplex | ||
2905 | * - 2 - only link at 100Mbps Full Duplex | ||
2906 | * - 3 - only link at 100Mbps Half Duplex | ||
2907 | * - 4 - only link at 10Mbps Full Duplex | ||
2908 | * - 5 - only link at 10Mbps Half Duplex | ||
2909 | * Default Value: 0 | ||
2910 | */ | ||
2911 | ATL2_PARAM(MediaType, "MediaType Select"); | ||
2912 | |||
2913 | /* | ||
2914 | * Interrupt Moderate Timer in units of 2048 ns (~2 us) | ||
2915 | * Valid Range: 10-65535 | ||
2916 | * Default Value: 45000(90ms) | ||
2917 | */ | ||
2918 | #define INT_MOD_DEFAULT_CNT 100 /* 200us */ | ||
2919 | #define INT_MOD_MAX_CNT 65000 | ||
2920 | #define INT_MOD_MIN_CNT 50 | ||
2921 | ATL2_PARAM(IntModTimer, "Interrupt Moderator Timer"); | ||
2922 | |||
2923 | /* | ||
2924 | * FlashVendor | ||
2925 | * Valid Range: 0-2 | ||
2926 | * 0 - Atmel | ||
2927 | * 1 - SST | ||
2928 | * 2 - ST | ||
2929 | */ | ||
2930 | ATL2_PARAM(FlashVendor, "SPI Flash Vendor"); | ||
2931 | |||
2932 | #define AUTONEG_ADV_DEFAULT 0x2F | ||
2933 | #define AUTONEG_ADV_MASK 0x2F | ||
2934 | #define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL | ||
2935 | |||
2936 | #define FLASH_VENDOR_DEFAULT 0 | ||
2937 | #define FLASH_VENDOR_MIN 0 | ||
2938 | #define FLASH_VENDOR_MAX 2 | ||
2939 | |||
2940 | struct atl2_option { | ||
2941 | enum { enable_option, range_option, list_option } type; | ||
2942 | char *name; | ||
2943 | char *err; | ||
2944 | int def; | ||
2945 | union { | ||
2946 | struct { /* range_option info */ | ||
2947 | int min; | ||
2948 | int max; | ||
2949 | } r; | ||
2950 | struct { /* list_option info */ | ||
2951 | int nr; | ||
2952 | struct atl2_opt_list { int i; char *str; } *p; | ||
2953 | } l; | ||
2954 | } arg; | ||
2955 | }; | ||
2956 | |||
2957 | static int __devinit atl2_validate_option(int *value, struct atl2_option *opt) | ||
2958 | { | ||
2959 | int i; | ||
2960 | struct atl2_opt_list *ent; | ||
2961 | |||
2962 | if (*value == OPTION_UNSET) { | ||
2963 | *value = opt->def; | ||
2964 | return 0; | ||
2965 | } | ||
2966 | |||
2967 | switch (opt->type) { | ||
2968 | case enable_option: | ||
2969 | switch (*value) { | ||
2970 | case OPTION_ENABLED: | ||
2971 | printk(KERN_INFO "%s Enabled\n", opt->name); | ||
2972 | return 0; | ||
2973 | break; | ||
2974 | case OPTION_DISABLED: | ||
2975 | printk(KERN_INFO "%s Disabled\n", opt->name); | ||
2976 | return 0; | ||
2977 | break; | ||
2978 | } | ||
2979 | break; | ||
2980 | case range_option: | ||
2981 | if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { | ||
2982 | printk(KERN_INFO "%s set to %i\n", opt->name, *value); | ||
2983 | return 0; | ||
2984 | } | ||
2985 | break; | ||
2986 | case list_option: | ||
2987 | for (i = 0; i < opt->arg.l.nr; i++) { | ||
2988 | ent = &opt->arg.l.p[i]; | ||
2989 | if (*value == ent->i) { | ||
2990 | if (ent->str[0] != '\0') | ||
2991 | printk(KERN_INFO "%s\n", ent->str); | ||
2992 | return 0; | ||
2993 | } | ||
2994 | } | ||
2995 | break; | ||
2996 | default: | ||
2997 | BUG(); | ||
2998 | } | ||
2999 | |||
3000 | printk(KERN_INFO "Invalid %s specified (%i) %s\n", | ||
3001 | opt->name, *value, opt->err); | ||
3002 | *value = opt->def; | ||
3003 | return -1; | ||
3004 | } | ||
3005 | |||
3006 | /* | ||
3007 | * atl2_check_options - Range Checking for Command Line Parameters | ||
3008 | * @adapter: board private structure | ||
3009 | * | ||
3010 | * This routine checks all command line parameters for valid user | ||
3011 | * input. If an invalid value is given, or if no user specified | ||
3012 | * value exists, a default value is used. The final value is stored | ||
3013 | * in a variable in the adapter structure. | ||
3014 | */ | ||
3015 | static void __devinit atl2_check_options(struct atl2_adapter *adapter) | ||
3016 | { | ||
3017 | int val; | ||
3018 | struct atl2_option opt; | ||
3019 | int bd = adapter->bd_number; | ||
3020 | if (bd >= ATL2_MAX_NIC) { | ||
3021 | printk(KERN_NOTICE "Warning: no configuration for board #%i\n", | ||
3022 | bd); | ||
3023 | printk(KERN_NOTICE "Using defaults for all values\n"); | ||
3024 | #ifndef module_param_array | ||
3025 | bd = ATL2_MAX_NIC; | ||
3026 | #endif | ||
3027 | } | ||
3028 | |||
3029 | /* Bytes of Transmit Memory */ | ||
3030 | opt.type = range_option; | ||
3031 | opt.name = "Bytes of Transmit Memory"; | ||
3032 | opt.err = "using default of " __MODULE_STRING(ATL2_DEFAULT_TX_MEMSIZE); | ||
3033 | opt.def = ATL2_DEFAULT_TX_MEMSIZE; | ||
3034 | opt.arg.r.min = ATL2_MIN_TX_MEMSIZE; | ||
3035 | opt.arg.r.max = ATL2_MAX_TX_MEMSIZE; | ||
3036 | #ifdef module_param_array | ||
3037 | if (num_TxMemSize > bd) { | ||
3038 | #endif | ||
3039 | val = TxMemSize[bd]; | ||
3040 | atl2_validate_option(&val, &opt); | ||
3041 | adapter->txd_ring_size = ((u32) val) * 1024; | ||
3042 | #ifdef module_param_array | ||
3043 | } else | ||
3044 | adapter->txd_ring_size = ((u32)opt.def) * 1024; | ||
3045 | #endif | ||
3046 | /* txs ring size: */ | ||
3047 | adapter->txs_ring_size = adapter->txd_ring_size / 128; | ||
3048 | if (adapter->txs_ring_size > 160) | ||
3049 | adapter->txs_ring_size = 160; | ||
3050 | |||
3051 | /* Receive Memory Block Count */ | ||
3052 | opt.type = range_option; | ||
3053 | opt.name = "Number of receive memory block"; | ||
3054 | opt.err = "using default of " __MODULE_STRING(ATL2_DEFAULT_RXD_COUNT); | ||
3055 | opt.def = ATL2_DEFAULT_RXD_COUNT; | ||
3056 | opt.arg.r.min = ATL2_MIN_RXD_COUNT; | ||
3057 | opt.arg.r.max = ATL2_MAX_RXD_COUNT; | ||
3058 | #ifdef module_param_array | ||
3059 | if (num_RxMemBlock > bd) { | ||
3060 | #endif | ||
3061 | val = RxMemBlock[bd]; | ||
3062 | atl2_validate_option(&val, &opt); | ||
3063 | adapter->rxd_ring_size = (u32)val; | ||
3064 | /* FIXME */ | ||
3065 | /* ((u16)val)&~1; */ /* even number */ | ||
3066 | #ifdef module_param_array | ||
3067 | } else | ||
3068 | adapter->rxd_ring_size = (u32)opt.def; | ||
3069 | #endif | ||
3070 | /* init RXD Flow control value */ | ||
3071 | adapter->hw.fc_rxd_hi = (adapter->rxd_ring_size / 8) * 7; | ||
3072 | adapter->hw.fc_rxd_lo = (ATL2_MIN_RXD_COUNT / 8) > | ||
3073 | (adapter->rxd_ring_size / 12) ? (ATL2_MIN_RXD_COUNT / 8) : | ||
3074 | (adapter->rxd_ring_size / 12); | ||
3075 | |||
3076 | /* Interrupt Moderate Timer */ | ||
3077 | opt.type = range_option; | ||
3078 | opt.name = "Interrupt Moderate Timer"; | ||
3079 | opt.err = "using default of " __MODULE_STRING(INT_MOD_DEFAULT_CNT); | ||
3080 | opt.def = INT_MOD_DEFAULT_CNT; | ||
3081 | opt.arg.r.min = INT_MOD_MIN_CNT; | ||
3082 | opt.arg.r.max = INT_MOD_MAX_CNT; | ||
3083 | #ifdef module_param_array | ||
3084 | if (num_IntModTimer > bd) { | ||
3085 | #endif | ||
3086 | val = IntModTimer[bd]; | ||
3087 | atl2_validate_option(&val, &opt); | ||
3088 | adapter->imt = (u16) val; | ||
3089 | #ifdef module_param_array | ||
3090 | } else | ||
3091 | adapter->imt = (u16)(opt.def); | ||
3092 | #endif | ||
3093 | /* Flash Vendor */ | ||
3094 | opt.type = range_option; | ||
3095 | opt.name = "SPI Flash Vendor"; | ||
3096 | opt.err = "using default of " __MODULE_STRING(FLASH_VENDOR_DEFAULT); | ||
3097 | opt.def = FLASH_VENDOR_DEFAULT; | ||
3098 | opt.arg.r.min = FLASH_VENDOR_MIN; | ||
3099 | opt.arg.r.max = FLASH_VENDOR_MAX; | ||
3100 | #ifdef module_param_array | ||
3101 | if (num_FlashVendor > bd) { | ||
3102 | #endif | ||
3103 | val = FlashVendor[bd]; | ||
3104 | atl2_validate_option(&val, &opt); | ||
3105 | adapter->hw.flash_vendor = (u8) val; | ||
3106 | #ifdef module_param_array | ||
3107 | } else | ||
3108 | adapter->hw.flash_vendor = (u8)(opt.def); | ||
3109 | #endif | ||
3110 | /* MediaType */ | ||
3111 | opt.type = range_option; | ||
3112 | opt.name = "Speed/Duplex Selection"; | ||
3113 | opt.err = "using default of " __MODULE_STRING(MEDIA_TYPE_AUTO_SENSOR); | ||
3114 | opt.def = MEDIA_TYPE_AUTO_SENSOR; | ||
3115 | opt.arg.r.min = MEDIA_TYPE_AUTO_SENSOR; | ||
3116 | opt.arg.r.max = MEDIA_TYPE_10M_HALF; | ||
3117 | #ifdef module_param_array | ||
3118 | if (num_MediaType > bd) { | ||
3119 | #endif | ||
3120 | val = MediaType[bd]; | ||
3121 | atl2_validate_option(&val, &opt); | ||
3122 | adapter->hw.MediaType = (u16) val; | ||
3123 | #ifdef module_param_array | ||
3124 | } else | ||
3125 | adapter->hw.MediaType = (u16)(opt.def); | ||
3126 | #endif | ||
3127 | } | ||
diff --git a/drivers/net/atlx/atl2.h b/drivers/net/atlx/atl2.h new file mode 100644 index 000000000000..6e1f28ff227b --- /dev/null +++ b/drivers/net/atlx/atl2.h | |||
@@ -0,0 +1,530 @@ | |||
1 | /* atl2.h -- atl2 driver definitions | ||
2 | * | ||
3 | * Copyright(c) 2007 Atheros Corporation. All rights reserved. | ||
4 | * Copyright(c) 2006 xiong huang <xiong.huang@atheros.com> | ||
5 | * Copyright(c) 2007 Chris Snook <csnook@redhat.com> | ||
6 | * | ||
7 | * Derived from Intel e1000 driver | ||
8 | * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms of the GNU General Public License as published by the Free | ||
12 | * Software Foundation; either version 2 of the License, or (at your option) | ||
13 | * any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
16 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
18 | * more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License along with | ||
21 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
22 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
23 | */ | ||
24 | |||
25 | #ifndef _ATL2_H_ | ||
26 | #define _ATL2_H_ | ||
27 | |||
28 | #include <asm/atomic.h> | ||
29 | #include <linux/netdevice.h> | ||
30 | |||
31 | #ifndef _ATL2_HW_H_ | ||
32 | #define _ATL2_HW_H_ | ||
33 | |||
34 | #ifndef _ATL2_OSDEP_H_ | ||
35 | #define _ATL2_OSDEP_H_ | ||
36 | |||
37 | #include <linux/pci.h> | ||
38 | #include <linux/delay.h> | ||
39 | #include <linux/interrupt.h> | ||
40 | #include <linux/if_ether.h> | ||
41 | |||
42 | #include "atlx.h" | ||
43 | |||
44 | #ifdef ETHTOOL_OPS_COMPAT | ||
45 | extern int ethtool_ioctl(struct ifreq *ifr); | ||
46 | #endif | ||
47 | |||
48 | #define PCI_COMMAND_REGISTER PCI_COMMAND | ||
49 | #define CMD_MEM_WRT_INVALIDATE PCI_COMMAND_INVALIDATE | ||
50 | #define ETH_ADDR_LEN ETH_ALEN | ||
51 | |||
52 | #define ATL2_WRITE_REG(a, reg, value) (iowrite32((value), \ | ||
53 | ((a)->hw_addr + (reg)))) | ||
54 | |||
55 | #define ATL2_WRITE_FLUSH(a) (ioread32((a)->hw_addr)) | ||
56 | |||
57 | #define ATL2_READ_REG(a, reg) (ioread32((a)->hw_addr + (reg))) | ||
58 | |||
59 | #define ATL2_WRITE_REGB(a, reg, value) (iowrite8((value), \ | ||
60 | ((a)->hw_addr + (reg)))) | ||
61 | |||
62 | #define ATL2_READ_REGB(a, reg) (ioread8((a)->hw_addr + (reg))) | ||
63 | |||
64 | #define ATL2_WRITE_REGW(a, reg, value) (iowrite16((value), \ | ||
65 | ((a)->hw_addr + (reg)))) | ||
66 | |||
67 | #define ATL2_READ_REGW(a, reg) (ioread16((a)->hw_addr + (reg))) | ||
68 | |||
69 | #define ATL2_WRITE_REG_ARRAY(a, reg, offset, value) \ | ||
70 | (iowrite32((value), (((a)->hw_addr + (reg)) + ((offset) << 2)))) | ||
71 | |||
72 | #define ATL2_READ_REG_ARRAY(a, reg, offset) \ | ||
73 | (ioread32(((a)->hw_addr + (reg)) + ((offset) << 2))) | ||
74 | |||
75 | #endif /* _ATL2_OSDEP_H_ */ | ||
76 | |||
77 | struct atl2_adapter; | ||
78 | struct atl2_hw; | ||
79 | |||
80 | /* function prototype */ | ||
81 | static s32 atl2_reset_hw(struct atl2_hw *hw); | ||
82 | static s32 atl2_read_mac_addr(struct atl2_hw *hw); | ||
83 | static s32 atl2_init_hw(struct atl2_hw *hw); | ||
84 | static s32 atl2_get_speed_and_duplex(struct atl2_hw *hw, u16 *speed, | ||
85 | u16 *duplex); | ||
86 | static u32 atl2_hash_mc_addr(struct atl2_hw *hw, u8 *mc_addr); | ||
87 | static void atl2_hash_set(struct atl2_hw *hw, u32 hash_value); | ||
88 | static s32 atl2_read_phy_reg(struct atl2_hw *hw, u16 reg_addr, u16 *phy_data); | ||
89 | static s32 atl2_write_phy_reg(struct atl2_hw *hw, u32 reg_addr, u16 phy_data); | ||
90 | static void atl2_read_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value); | ||
91 | static void atl2_write_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value); | ||
92 | static void atl2_set_mac_addr(struct atl2_hw *hw); | ||
93 | static bool atl2_read_eeprom(struct atl2_hw *hw, u32 Offset, u32 *pValue); | ||
94 | static bool atl2_write_eeprom(struct atl2_hw *hw, u32 offset, u32 value); | ||
95 | static s32 atl2_phy_init(struct atl2_hw *hw); | ||
96 | static int atl2_check_eeprom_exist(struct atl2_hw *hw); | ||
97 | static void atl2_force_ps(struct atl2_hw *hw); | ||
98 | |||
99 | /* register definition */ | ||
100 | |||
101 | /* Block IDLE Status Register */ | ||
102 | #define IDLE_STATUS_RXMAC 1 /* 1: RXMAC is non-IDLE */ | ||
103 | #define IDLE_STATUS_TXMAC 2 /* 1: TXMAC is non-IDLE */ | ||
104 | #define IDLE_STATUS_DMAR 8 /* 1: DMAR is non-IDLE */ | ||
105 | #define IDLE_STATUS_DMAW 4 /* 1: DMAW is non-IDLE */ | ||
106 | |||
107 | /* MDIO Control Register */ | ||
108 | #define MDIO_WAIT_TIMES 10 | ||
109 | |||
110 | /* MAC Control Register */ | ||
111 | #define MAC_CTRL_DBG_TX_BKPRESURE 0x100000 /* 1: TX max backoff */ | ||
112 | #define MAC_CTRL_MACLP_CLK_PHY 0x8000000 /* 1: 25MHz from phy */ | ||
113 | #define MAC_CTRL_HALF_LEFT_BUF_SHIFT 28 | ||
114 | #define MAC_CTRL_HALF_LEFT_BUF_MASK 0xF /* MAC retry buf x32B */ | ||
115 | |||
116 | /* Internal SRAM Partition Register */ | ||
117 | #define REG_SRAM_TXRAM_END 0x1500 /* Internal tail address of TXRAM | ||
118 | * default: 2byte*1024 */ | ||
119 | #define REG_SRAM_RXRAM_END 0x1502 /* Internal tail address of RXRAM | ||
120 | * default: 2byte*1024 */ | ||
121 | |||
122 | /* Descriptor Control register */ | ||
123 | #define REG_TXD_BASE_ADDR_LO 0x1544 /* The base address of the Transmit | ||
124 | * Data Mem low 32-bit(dword align) */ | ||
125 | #define REG_TXD_MEM_SIZE 0x1548 /* Transmit Data Memory size(by | ||
126 | * double word , max 256KB) */ | ||
127 | #define REG_TXS_BASE_ADDR_LO 0x154C /* The base address of the Transmit | ||
128 | * Status Memory low 32-bit(dword word | ||
129 | * align) */ | ||
130 | #define REG_TXS_MEM_SIZE 0x1550 /* double word unit, max 4*2047 | ||
131 | * bytes. */ | ||
132 | #define REG_RXD_BASE_ADDR_LO 0x1554 /* The base address of the Transmit | ||
133 | * Status Memory low 32-bit(unit 8 | ||
134 | * bytes) */ | ||
135 | #define REG_RXD_BUF_NUM 0x1558 /* Receive Data & Status Memory buffer | ||
136 | * number (unit 1536bytes, max | ||
137 | * 1536*2047) */ | ||
138 | |||
139 | /* DMAR Control Register */ | ||
140 | #define REG_DMAR 0x1580 | ||
141 | #define DMAR_EN 0x1 /* 1: Enable DMAR */ | ||
142 | |||
143 | /* TX Cur-Through (early tx threshold) Control Register */ | ||
144 | #define REG_TX_CUT_THRESH 0x1590 /* TxMac begin transmit packet | ||
145 | * threshold(unit word) */ | ||
146 | |||
147 | /* DMAW Control Register */ | ||
148 | #define REG_DMAW 0x15A0 | ||
149 | #define DMAW_EN 0x1 | ||
150 | |||
151 | /* Flow control register */ | ||
152 | #define REG_PAUSE_ON_TH 0x15A8 /* RXD high watermark of overflow | ||
153 | * threshold configuration register */ | ||
154 | #define REG_PAUSE_OFF_TH 0x15AA /* RXD lower watermark of overflow | ||
155 | * threshold configuration register */ | ||
156 | |||
157 | /* Mailbox Register */ | ||
158 | #define REG_MB_TXD_WR_IDX 0x15f0 /* double word align */ | ||
159 | #define REG_MB_RXD_RD_IDX 0x15F4 /* RXD Read index (unit: 1536byets) */ | ||
160 | |||
161 | /* Interrupt Status Register */ | ||
162 | #define ISR_TIMER 1 /* Interrupt when Timer counts down to zero */ | ||
163 | #define ISR_MANUAL 2 /* Software manual interrupt, for debug. Set | ||
164 | * when SW_MAN_INT_EN is set in Table 51 | ||
165 | * Selene Master Control Register | ||
166 | * (Offset 0x1400). */ | ||
167 | #define ISR_RXF_OV 4 /* RXF overflow interrupt */ | ||
168 | #define ISR_TXF_UR 8 /* TXF underrun interrupt */ | ||
169 | #define ISR_TXS_OV 0x10 /* Internal transmit status buffer full | ||
170 | * interrupt */ | ||
171 | #define ISR_RXS_OV 0x20 /* Internal receive status buffer full | ||
172 | * interrupt */ | ||
173 | #define ISR_LINK_CHG 0x40 /* Link Status Change Interrupt */ | ||
174 | #define ISR_HOST_TXD_UR 0x80 | ||
175 | #define ISR_HOST_RXD_OV 0x100 /* Host rx data memory full , one pulse */ | ||
176 | #define ISR_DMAR_TO_RST 0x200 /* DMAR op timeout interrupt. SW should | ||
177 | * do Reset */ | ||
178 | #define ISR_DMAW_TO_RST 0x400 | ||
179 | #define ISR_PHY 0x800 /* phy interrupt */ | ||
180 | #define ISR_TS_UPDATE 0x10000 /* interrupt after new tx pkt status written | ||
181 | * to host */ | ||
182 | #define ISR_RS_UPDATE 0x20000 /* interrupt ater new rx pkt status written | ||
183 | * to host. */ | ||
184 | #define ISR_TX_EARLY 0x40000 /* interrupt when txmac begin transmit one | ||
185 | * packet */ | ||
186 | |||
187 | #define ISR_TX_EVENT (ISR_TXF_UR | ISR_TXS_OV | ISR_HOST_TXD_UR |\ | ||
188 | ISR_TS_UPDATE | ISR_TX_EARLY) | ||
189 | #define ISR_RX_EVENT (ISR_RXF_OV | ISR_RXS_OV | ISR_HOST_RXD_OV |\ | ||
190 | ISR_RS_UPDATE) | ||
191 | |||
192 | #define IMR_NORMAL_MASK (\ | ||
193 | /*ISR_LINK_CHG |*/\ | ||
194 | ISR_MANUAL |\ | ||
195 | ISR_DMAR_TO_RST |\ | ||
196 | ISR_DMAW_TO_RST |\ | ||
197 | ISR_PHY |\ | ||
198 | ISR_PHY_LINKDOWN |\ | ||
199 | ISR_TS_UPDATE |\ | ||
200 | ISR_RS_UPDATE) | ||
201 | |||
202 | /* Receive MAC Statistics Registers */ | ||
203 | #define REG_STS_RX_PAUSE 0x1700 /* Num pause packets received */ | ||
204 | #define REG_STS_RXD_OV 0x1704 /* Num frames dropped due to RX | ||
205 | * FIFO overflow */ | ||
206 | #define REG_STS_RXS_OV 0x1708 /* Num frames dropped due to RX | ||
207 | * Status Buffer Overflow */ | ||
208 | #define REG_STS_RX_FILTER 0x170C /* Num packets dropped due to | ||
209 | * address filtering */ | ||
210 | |||
211 | /* MII definitions */ | ||
212 | |||
213 | /* PHY Common Register */ | ||
214 | #define MII_SMARTSPEED 0x14 | ||
215 | #define MII_DBG_ADDR 0x1D | ||
216 | #define MII_DBG_DATA 0x1E | ||
217 | |||
218 | /* PCI Command Register Bit Definitions */ | ||
219 | #define PCI_REG_COMMAND 0x04 | ||
220 | #define CMD_IO_SPACE 0x0001 | ||
221 | #define CMD_MEMORY_SPACE 0x0002 | ||
222 | #define CMD_BUS_MASTER 0x0004 | ||
223 | |||
224 | #define MEDIA_TYPE_100M_FULL 1 | ||
225 | #define MEDIA_TYPE_100M_HALF 2 | ||
226 | #define MEDIA_TYPE_10M_FULL 3 | ||
227 | #define MEDIA_TYPE_10M_HALF 4 | ||
228 | |||
229 | #define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x000F /* Everything */ | ||
230 | |||
231 | /* The size (in bytes) of a ethernet packet */ | ||
232 | #define ENET_HEADER_SIZE 14 | ||
233 | #define MAXIMUM_ETHERNET_FRAME_SIZE 1518 /* with FCS */ | ||
234 | #define MINIMUM_ETHERNET_FRAME_SIZE 64 /* with FCS */ | ||
235 | #define ETHERNET_FCS_SIZE 4 | ||
236 | #define MAX_JUMBO_FRAME_SIZE 0x2000 | ||
237 | #define VLAN_SIZE 4 | ||
238 | |||
239 | struct tx_pkt_header { | ||
240 | unsigned pkt_size:11; | ||
241 | unsigned:4; /* reserved */ | ||
242 | unsigned ins_vlan:1; /* txmac should insert vlan */ | ||
243 | unsigned short vlan; /* vlan tag */ | ||
244 | }; | ||
245 | /* FIXME: replace above bitfields with MASK/SHIFT defines below */ | ||
246 | #define TX_PKT_HEADER_SIZE_MASK 0x7FF | ||
247 | #define TX_PKT_HEADER_SIZE_SHIFT 0 | ||
248 | #define TX_PKT_HEADER_INS_VLAN_MASK 0x1 | ||
249 | #define TX_PKT_HEADER_INS_VLAN_SHIFT 15 | ||
250 | #define TX_PKT_HEADER_VLAN_TAG_MASK 0xFFFF | ||
251 | #define TX_PKT_HEADER_VLAN_TAG_SHIFT 16 | ||
252 | |||
253 | struct tx_pkt_status { | ||
254 | unsigned pkt_size:11; | ||
255 | unsigned:5; /* reserved */ | ||
256 | unsigned ok:1; /* current packet transmitted without error */ | ||
257 | unsigned bcast:1; /* broadcast packet */ | ||
258 | unsigned mcast:1; /* multicast packet */ | ||
259 | unsigned pause:1; /* transmiited a pause frame */ | ||
260 | unsigned ctrl:1; | ||
261 | unsigned defer:1; /* current packet is xmitted with defer */ | ||
262 | unsigned exc_defer:1; | ||
263 | unsigned single_col:1; | ||
264 | unsigned multi_col:1; | ||
265 | unsigned late_col:1; | ||
266 | unsigned abort_col:1; | ||
267 | unsigned underun:1; /* current packet is aborted | ||
268 | * due to txram underrun */ | ||
269 | unsigned:3; /* reserved */ | ||
270 | unsigned update:1; /* always 1'b1 in tx_status_buf */ | ||
271 | }; | ||
272 | /* FIXME: replace above bitfields with MASK/SHIFT defines below */ | ||
273 | #define TX_PKT_STATUS_SIZE_MASK 0x7FF | ||
274 | #define TX_PKT_STATUS_SIZE_SHIFT 0 | ||
275 | #define TX_PKT_STATUS_OK_MASK 0x1 | ||
276 | #define TX_PKT_STATUS_OK_SHIFT 16 | ||
277 | #define TX_PKT_STATUS_BCAST_MASK 0x1 | ||
278 | #define TX_PKT_STATUS_BCAST_SHIFT 17 | ||
279 | #define TX_PKT_STATUS_MCAST_MASK 0x1 | ||
280 | #define TX_PKT_STATUS_MCAST_SHIFT 18 | ||
281 | #define TX_PKT_STATUS_PAUSE_MASK 0x1 | ||
282 | #define TX_PKT_STATUS_PAUSE_SHIFT 19 | ||
283 | #define TX_PKT_STATUS_CTRL_MASK 0x1 | ||
284 | #define TX_PKT_STATUS_CTRL_SHIFT 20 | ||
285 | #define TX_PKT_STATUS_DEFER_MASK 0x1 | ||
286 | #define TX_PKT_STATUS_DEFER_SHIFT 21 | ||
287 | #define TX_PKT_STATUS_EXC_DEFER_MASK 0x1 | ||
288 | #define TX_PKT_STATUS_EXC_DEFER_SHIFT 22 | ||
289 | #define TX_PKT_STATUS_SINGLE_COL_MASK 0x1 | ||
290 | #define TX_PKT_STATUS_SINGLE_COL_SHIFT 23 | ||
291 | #define TX_PKT_STATUS_MULTI_COL_MASK 0x1 | ||
292 | #define TX_PKT_STATUS_MULTI_COL_SHIFT 24 | ||
293 | #define TX_PKT_STATUS_LATE_COL_MASK 0x1 | ||
294 | #define TX_PKT_STATUS_LATE_COL_SHIFT 25 | ||
295 | #define TX_PKT_STATUS_ABORT_COL_MASK 0x1 | ||
296 | #define TX_PKT_STATUS_ABORT_COL_SHIFT 26 | ||
297 | #define TX_PKT_STATUS_UNDERRUN_MASK 0x1 | ||
298 | #define TX_PKT_STATUS_UNDERRUN_SHIFT 27 | ||
299 | #define TX_PKT_STATUS_UPDATE_MASK 0x1 | ||
300 | #define TX_PKT_STATUS_UPDATE_SHIFT 31 | ||
301 | |||
302 | struct rx_pkt_status { | ||
303 | unsigned pkt_size:11; /* packet size, max 2047 bytes */ | ||
304 | unsigned:5; /* reserved */ | ||
305 | unsigned ok:1; /* current packet received ok without error */ | ||
306 | unsigned bcast:1; /* current packet is broadcast */ | ||
307 | unsigned mcast:1; /* current packet is multicast */ | ||
308 | unsigned pause:1; | ||
309 | unsigned ctrl:1; | ||
310 | unsigned crc:1; /* received a packet with crc error */ | ||
311 | unsigned code:1; /* received a packet with code error */ | ||
312 | unsigned runt:1; /* received a packet less than 64 bytes | ||
313 | * with good crc */ | ||
314 | unsigned frag:1; /* received a packet less than 64 bytes | ||
315 | * with bad crc */ | ||
316 | unsigned trunc:1; /* current frame truncated due to rxram full */ | ||
317 | unsigned align:1; /* this packet is alignment error */ | ||
318 | unsigned vlan:1; /* this packet has vlan */ | ||
319 | unsigned:3; /* reserved */ | ||
320 | unsigned update:1; | ||
321 | unsigned short vtag; /* vlan tag */ | ||
322 | unsigned:16; | ||
323 | }; | ||
324 | /* FIXME: replace above bitfields with MASK/SHIFT defines below */ | ||
325 | #define RX_PKT_STATUS_SIZE_MASK 0x7FF | ||
326 | #define RX_PKT_STATUS_SIZE_SHIFT 0 | ||
327 | #define RX_PKT_STATUS_OK_MASK 0x1 | ||
328 | #define RX_PKT_STATUS_OK_SHIFT 16 | ||
329 | #define RX_PKT_STATUS_BCAST_MASK 0x1 | ||
330 | #define RX_PKT_STATUS_BCAST_SHIFT 17 | ||
331 | #define RX_PKT_STATUS_MCAST_MASK 0x1 | ||
332 | #define RX_PKT_STATUS_MCAST_SHIFT 18 | ||
333 | #define RX_PKT_STATUS_PAUSE_MASK 0x1 | ||
334 | #define RX_PKT_STATUS_PAUSE_SHIFT 19 | ||
335 | #define RX_PKT_STATUS_CTRL_MASK 0x1 | ||
336 | #define RX_PKT_STATUS_CTRL_SHIFT 20 | ||
337 | #define RX_PKT_STATUS_CRC_MASK 0x1 | ||
338 | #define RX_PKT_STATUS_CRC_SHIFT 21 | ||
339 | #define RX_PKT_STATUS_CODE_MASK 0x1 | ||
340 | #define RX_PKT_STATUS_CODE_SHIFT 22 | ||
341 | #define RX_PKT_STATUS_RUNT_MASK 0x1 | ||
342 | #define RX_PKT_STATUS_RUNT_SHIFT 23 | ||
343 | #define RX_PKT_STATUS_FRAG_MASK 0x1 | ||
344 | #define RX_PKT_STATUS_FRAG_SHIFT 24 | ||
345 | #define RX_PKT_STATUS_TRUNK_MASK 0x1 | ||
346 | #define RX_PKT_STATUS_TRUNK_SHIFT 25 | ||
347 | #define RX_PKT_STATUS_ALIGN_MASK 0x1 | ||
348 | #define RX_PKT_STATUS_ALIGN_SHIFT 26 | ||
349 | #define RX_PKT_STATUS_VLAN_MASK 0x1 | ||
350 | #define RX_PKT_STATUS_VLAN_SHIFT 27 | ||
351 | #define RX_PKT_STATUS_UPDATE_MASK 0x1 | ||
352 | #define RX_PKT_STATUS_UPDATE_SHIFT 31 | ||
353 | #define RX_PKT_STATUS_VLAN_TAG_MASK 0xFFFF | ||
354 | #define RX_PKT_STATUS_VLAN_TAG_SHIFT 32 | ||
355 | |||
356 | struct rx_desc { | ||
357 | struct rx_pkt_status status; | ||
358 | unsigned char packet[1536-sizeof(struct rx_pkt_status)]; | ||
359 | }; | ||
360 | |||
361 | enum atl2_speed_duplex { | ||
362 | atl2_10_half = 0, | ||
363 | atl2_10_full = 1, | ||
364 | atl2_100_half = 2, | ||
365 | atl2_100_full = 3 | ||
366 | }; | ||
367 | |||
368 | struct atl2_spi_flash_dev { | ||
369 | const char *manu_name; /* manufacturer id */ | ||
370 | /* op-code */ | ||
371 | u8 cmdWRSR; | ||
372 | u8 cmdREAD; | ||
373 | u8 cmdPROGRAM; | ||
374 | u8 cmdWREN; | ||
375 | u8 cmdWRDI; | ||
376 | u8 cmdRDSR; | ||
377 | u8 cmdRDID; | ||
378 | u8 cmdSECTOR_ERASE; | ||
379 | u8 cmdCHIP_ERASE; | ||
380 | }; | ||
381 | |||
382 | /* Structure containing variables used by the shared code (atl2_hw.c) */ | ||
383 | struct atl2_hw { | ||
384 | u8 __iomem *hw_addr; | ||
385 | void *back; | ||
386 | |||
387 | u8 preamble_len; | ||
388 | u8 max_retry; /* Retransmission maximum, afterwards the | ||
389 | * packet will be discarded. */ | ||
390 | u8 jam_ipg; /* IPG to start JAM for collision based flow | ||
391 | * control in half-duplex mode. In unit of | ||
392 | * 8-bit time. */ | ||
393 | u8 ipgt; /* Desired back to back inter-packet gap. The | ||
394 | * default is 96-bit time. */ | ||
395 | u8 min_ifg; /* Minimum number of IFG to enforce in between | ||
396 | * RX frames. Frame gap below such IFP is | ||
397 | * dropped. */ | ||
398 | u8 ipgr1; /* 64bit Carrier-Sense window */ | ||
399 | u8 ipgr2; /* 96-bit IPG window */ | ||
400 | u8 retry_buf; /* When half-duplex mode, should hold some | ||
401 | * bytes for mac retry . (8*4bytes unit) */ | ||
402 | |||
403 | u16 fc_rxd_hi; | ||
404 | u16 fc_rxd_lo; | ||
405 | u16 lcol; /* Collision Window */ | ||
406 | u16 max_frame_size; | ||
407 | |||
408 | u16 MediaType; | ||
409 | u16 autoneg_advertised; | ||
410 | u16 pci_cmd_word; | ||
411 | |||
412 | u16 mii_autoneg_adv_reg; | ||
413 | |||
414 | u32 mem_rang; | ||
415 | u32 txcw; | ||
416 | u32 mc_filter_type; | ||
417 | u32 num_mc_addrs; | ||
418 | u32 collision_delta; | ||
419 | u32 tx_packet_delta; | ||
420 | u16 phy_spd_default; | ||
421 | |||
422 | u16 device_id; | ||
423 | u16 vendor_id; | ||
424 | u16 subsystem_id; | ||
425 | u16 subsystem_vendor_id; | ||
426 | u8 revision_id; | ||
427 | |||
428 | /* spi flash */ | ||
429 | u8 flash_vendor; | ||
430 | |||
431 | u8 dma_fairness; | ||
432 | u8 mac_addr[NODE_ADDRESS_SIZE]; | ||
433 | u8 perm_mac_addr[NODE_ADDRESS_SIZE]; | ||
434 | |||
435 | /* FIXME */ | ||
436 | /* bool phy_preamble_sup; */ | ||
437 | bool phy_configured; | ||
438 | }; | ||
439 | |||
440 | #endif /* _ATL2_HW_H_ */ | ||
441 | |||
442 | struct atl2_ring_header { | ||
443 | /* pointer to the descriptor ring memory */ | ||
444 | void *desc; | ||
445 | /* physical adress of the descriptor ring */ | ||
446 | dma_addr_t dma; | ||
447 | /* length of descriptor ring in bytes */ | ||
448 | unsigned int size; | ||
449 | }; | ||
450 | |||
451 | /* board specific private data structure */ | ||
452 | struct atl2_adapter { | ||
453 | /* OS defined structs */ | ||
454 | struct net_device *netdev; | ||
455 | struct pci_dev *pdev; | ||
456 | struct net_device_stats net_stats; | ||
457 | #ifdef NETIF_F_HW_VLAN_TX | ||
458 | struct vlan_group *vlgrp; | ||
459 | #endif | ||
460 | u32 wol; | ||
461 | u16 link_speed; | ||
462 | u16 link_duplex; | ||
463 | |||
464 | spinlock_t stats_lock; | ||
465 | spinlock_t tx_lock; | ||
466 | |||
467 | struct work_struct reset_task; | ||
468 | struct work_struct link_chg_task; | ||
469 | struct timer_list watchdog_timer; | ||
470 | struct timer_list phy_config_timer; | ||
471 | |||
472 | unsigned long cfg_phy; | ||
473 | bool mac_disabled; | ||
474 | |||
475 | /* All Descriptor memory */ | ||
476 | dma_addr_t ring_dma; | ||
477 | void *ring_vir_addr; | ||
478 | int ring_size; | ||
479 | |||
480 | struct tx_pkt_header *txd_ring; | ||
481 | dma_addr_t txd_dma; | ||
482 | |||
483 | struct tx_pkt_status *txs_ring; | ||
484 | dma_addr_t txs_dma; | ||
485 | |||
486 | struct rx_desc *rxd_ring; | ||
487 | dma_addr_t rxd_dma; | ||
488 | |||
489 | u32 txd_ring_size; /* bytes per unit */ | ||
490 | u32 txs_ring_size; /* dwords per unit */ | ||
491 | u32 rxd_ring_size; /* 1536 bytes per unit */ | ||
492 | |||
493 | /* read /write ptr: */ | ||
494 | /* host */ | ||
495 | u32 txd_write_ptr; | ||
496 | u32 txs_next_clear; | ||
497 | u32 rxd_read_ptr; | ||
498 | |||
499 | /* nic */ | ||
500 | atomic_t txd_read_ptr; | ||
501 | atomic_t txs_write_ptr; | ||
502 | u32 rxd_write_ptr; | ||
503 | |||
504 | /* Interrupt Moderator timer ( 2us resolution) */ | ||
505 | u16 imt; | ||
506 | /* Interrupt Clear timer (2us resolution) */ | ||
507 | u16 ict; | ||
508 | |||
509 | unsigned long flags; | ||
510 | /* structs defined in atl2_hw.h */ | ||
511 | u32 bd_number; /* board number */ | ||
512 | bool pci_using_64; | ||
513 | bool have_msi; | ||
514 | struct atl2_hw hw; | ||
515 | |||
516 | u32 usr_cmd; | ||
517 | /* FIXME */ | ||
518 | /* u32 regs_buff[ATL2_REGS_LEN]; */ | ||
519 | u32 pci_state[16]; | ||
520 | |||
521 | u32 *config_space; | ||
522 | }; | ||
523 | |||
524 | enum atl2_state_t { | ||
525 | __ATL2_TESTING, | ||
526 | __ATL2_RESETTING, | ||
527 | __ATL2_DOWN | ||
528 | }; | ||
529 | |||
530 | #endif /* _ATL2_H_ */ | ||
diff --git a/drivers/net/enic/Makefile b/drivers/net/enic/Makefile new file mode 100644 index 000000000000..391c3bce5b79 --- /dev/null +++ b/drivers/net/enic/Makefile | |||
@@ -0,0 +1,5 @@ | |||
1 | obj-$(CONFIG_ENIC) := enic.o | ||
2 | |||
3 | enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \ | ||
4 | enic_res.o vnic_dev.o vnic_rq.o | ||
5 | |||
diff --git a/drivers/net/enic/cq_desc.h b/drivers/net/enic/cq_desc.h new file mode 100644 index 000000000000..c036a8bfd043 --- /dev/null +++ b/drivers/net/enic/cq_desc.h | |||
@@ -0,0 +1,79 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #ifndef _CQ_DESC_H_ | ||
21 | #define _CQ_DESC_H_ | ||
22 | |||
23 | /* | ||
24 | * Completion queue descriptor types | ||
25 | */ | ||
26 | enum cq_desc_types { | ||
27 | CQ_DESC_TYPE_WQ_ENET = 0, | ||
28 | CQ_DESC_TYPE_DESC_COPY = 1, | ||
29 | CQ_DESC_TYPE_WQ_EXCH = 2, | ||
30 | CQ_DESC_TYPE_RQ_ENET = 3, | ||
31 | CQ_DESC_TYPE_RQ_FCP = 4, | ||
32 | }; | ||
33 | |||
34 | /* Completion queue descriptor: 16B | ||
35 | * | ||
36 | * All completion queues have this basic layout. The | ||
37 | * type_specfic area is unique for each completion | ||
38 | * queue type. | ||
39 | */ | ||
40 | struct cq_desc { | ||
41 | __le16 completed_index; | ||
42 | __le16 q_number; | ||
43 | u8 type_specfic[11]; | ||
44 | u8 type_color; | ||
45 | }; | ||
46 | |||
47 | #define CQ_DESC_TYPE_BITS 7 | ||
48 | #define CQ_DESC_TYPE_MASK ((1 << CQ_DESC_TYPE_BITS) - 1) | ||
49 | #define CQ_DESC_COLOR_MASK 1 | ||
50 | #define CQ_DESC_Q_NUM_BITS 10 | ||
51 | #define CQ_DESC_Q_NUM_MASK ((1 << CQ_DESC_Q_NUM_BITS) - 1) | ||
52 | #define CQ_DESC_COMP_NDX_BITS 12 | ||
53 | #define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1) | ||
54 | |||
55 | static inline void cq_desc_dec(const struct cq_desc *desc_arg, | ||
56 | u8 *type, u8 *color, u16 *q_number, u16 *completed_index) | ||
57 | { | ||
58 | const struct cq_desc *desc = desc_arg; | ||
59 | const u8 type_color = desc->type_color; | ||
60 | |||
61 | *color = (type_color >> CQ_DESC_TYPE_BITS) & CQ_DESC_COLOR_MASK; | ||
62 | |||
63 | /* | ||
64 | * Make sure color bit is read from desc *before* other fields | ||
65 | * are read from desc. Hardware guarantees color bit is last | ||
66 | * bit (byte) written. Adding the rmb() prevents the compiler | ||
67 | * and/or CPU from reordering the reads which would potentially | ||
68 | * result in reading stale values. | ||
69 | */ | ||
70 | |||
71 | rmb(); | ||
72 | |||
73 | *type = type_color & CQ_DESC_TYPE_MASK; | ||
74 | *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK; | ||
75 | *completed_index = le16_to_cpu(desc->completed_index) & | ||
76 | CQ_DESC_COMP_NDX_MASK; | ||
77 | } | ||
78 | |||
79 | #endif /* _CQ_DESC_H_ */ | ||
diff --git a/drivers/net/enic/cq_enet_desc.h b/drivers/net/enic/cq_enet_desc.h new file mode 100644 index 000000000000..03dce9ed612c --- /dev/null +++ b/drivers/net/enic/cq_enet_desc.h | |||
@@ -0,0 +1,169 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #ifndef _CQ_ENET_DESC_H_ | ||
21 | #define _CQ_ENET_DESC_H_ | ||
22 | |||
23 | #include "cq_desc.h" | ||
24 | |||
25 | /* Ethernet completion queue descriptor: 16B */ | ||
26 | struct cq_enet_wq_desc { | ||
27 | __le16 completed_index; | ||
28 | __le16 q_number; | ||
29 | u8 reserved[11]; | ||
30 | u8 type_color; | ||
31 | }; | ||
32 | |||
33 | static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc, | ||
34 | u8 *type, u8 *color, u16 *q_number, u16 *completed_index) | ||
35 | { | ||
36 | cq_desc_dec((struct cq_desc *)desc, type, | ||
37 | color, q_number, completed_index); | ||
38 | } | ||
39 | |||
40 | /* Completion queue descriptor: Ethernet receive queue, 16B */ | ||
41 | struct cq_enet_rq_desc { | ||
42 | __le16 completed_index_flags; | ||
43 | __le16 q_number_rss_type_flags; | ||
44 | __le32 rss_hash; | ||
45 | __le16 bytes_written_flags; | ||
46 | __le16 vlan; | ||
47 | __le16 checksum_fcoe; | ||
48 | u8 flags; | ||
49 | u8 type_color; | ||
50 | }; | ||
51 | |||
52 | #define CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT (0x1 << 12) | ||
53 | #define CQ_ENET_RQ_DESC_FLAGS_FCOE (0x1 << 13) | ||
54 | #define CQ_ENET_RQ_DESC_FLAGS_EOP (0x1 << 14) | ||
55 | #define CQ_ENET_RQ_DESC_FLAGS_SOP (0x1 << 15) | ||
56 | |||
57 | #define CQ_ENET_RQ_DESC_RSS_TYPE_BITS 4 | ||
58 | #define CQ_ENET_RQ_DESC_RSS_TYPE_MASK \ | ||
59 | ((1 << CQ_ENET_RQ_DESC_RSS_TYPE_BITS) - 1) | ||
60 | #define CQ_ENET_RQ_DESC_RSS_TYPE_NONE 0 | ||
61 | #define CQ_ENET_RQ_DESC_RSS_TYPE_IPv4 1 | ||
62 | #define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4 2 | ||
63 | #define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6 3 | ||
64 | #define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6 4 | ||
65 | #define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX 5 | ||
66 | #define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX 6 | ||
67 | |||
68 | #define CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC (0x1 << 14) | ||
69 | |||
70 | #define CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS 14 | ||
71 | #define CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK \ | ||
72 | ((1 << CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS) - 1) | ||
73 | #define CQ_ENET_RQ_DESC_FLAGS_TRUNCATED (0x1 << 14) | ||
74 | #define CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED (0x1 << 15) | ||
75 | |||
76 | #define CQ_ENET_RQ_DESC_FCOE_SOF_BITS 4 | ||
77 | #define CQ_ENET_RQ_DESC_FCOE_SOF_MASK \ | ||
78 | ((1 << CQ_ENET_RQ_DESC_FCOE_SOF_BITS) - 1) | ||
79 | #define CQ_ENET_RQ_DESC_FCOE_EOF_BITS 8 | ||
80 | #define CQ_ENET_RQ_DESC_FCOE_EOF_MASK \ | ||
81 | ((1 << CQ_ENET_RQ_DESC_FCOE_EOF_BITS) - 1) | ||
82 | #define CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT 8 | ||
83 | |||
84 | #define CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK (0x1 << 0) | ||
85 | #define CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK (0x1 << 0) | ||
86 | #define CQ_ENET_RQ_DESC_FLAGS_UDP (0x1 << 1) | ||
87 | #define CQ_ENET_RQ_DESC_FCOE_ENC_ERROR (0x1 << 1) | ||
88 | #define CQ_ENET_RQ_DESC_FLAGS_TCP (0x1 << 2) | ||
89 | #define CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK (0x1 << 3) | ||
90 | #define CQ_ENET_RQ_DESC_FLAGS_IPV6 (0x1 << 4) | ||
91 | #define CQ_ENET_RQ_DESC_FLAGS_IPV4 (0x1 << 5) | ||
92 | #define CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT (0x1 << 6) | ||
93 | #define CQ_ENET_RQ_DESC_FLAGS_FCS_OK (0x1 << 7) | ||
94 | |||
95 | static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc, | ||
96 | u8 *type, u8 *color, u16 *q_number, u16 *completed_index, | ||
97 | u8 *ingress_port, u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type, | ||
98 | u8 *csum_not_calc, u32 *rss_hash, u16 *bytes_written, u8 *packet_error, | ||
99 | u8 *vlan_stripped, u16 *vlan, u16 *checksum, u8 *fcoe_sof, | ||
100 | u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error, u8 *fcoe_eof, | ||
101 | u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok, | ||
102 | u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok) | ||
103 | { | ||
104 | u16 completed_index_flags = le16_to_cpu(desc->completed_index_flags); | ||
105 | u16 q_number_rss_type_flags = | ||
106 | le16_to_cpu(desc->q_number_rss_type_flags); | ||
107 | u16 bytes_written_flags = le16_to_cpu(desc->bytes_written_flags); | ||
108 | |||
109 | cq_desc_dec((struct cq_desc *)desc, type, | ||
110 | color, q_number, completed_index); | ||
111 | |||
112 | *ingress_port = (completed_index_flags & | ||
113 | CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0; | ||
114 | *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ? | ||
115 | 1 : 0; | ||
116 | *eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ? | ||
117 | 1 : 0; | ||
118 | *sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ? | ||
119 | 1 : 0; | ||
120 | |||
121 | *rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) & | ||
122 | CQ_ENET_RQ_DESC_RSS_TYPE_MASK); | ||
123 | *csum_not_calc = (q_number_rss_type_flags & | ||
124 | CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0; | ||
125 | |||
126 | *rss_hash = le32_to_cpu(desc->rss_hash); | ||
127 | |||
128 | *bytes_written = bytes_written_flags & | ||
129 | CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK; | ||
130 | *packet_error = (bytes_written_flags & | ||
131 | CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0; | ||
132 | *vlan_stripped = (bytes_written_flags & | ||
133 | CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0; | ||
134 | |||
135 | *vlan = le16_to_cpu(desc->vlan); | ||
136 | |||
137 | if (*fcoe) { | ||
138 | *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) & | ||
139 | CQ_ENET_RQ_DESC_FCOE_SOF_MASK); | ||
140 | *fcoe_fc_crc_ok = (desc->flags & | ||
141 | CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0; | ||
142 | *fcoe_enc_error = (desc->flags & | ||
143 | CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0; | ||
144 | *fcoe_eof = (u8)((desc->checksum_fcoe >> | ||
145 | CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) & | ||
146 | CQ_ENET_RQ_DESC_FCOE_EOF_MASK); | ||
147 | *checksum = 0; | ||
148 | } else { | ||
149 | *fcoe_sof = 0; | ||
150 | *fcoe_fc_crc_ok = 0; | ||
151 | *fcoe_enc_error = 0; | ||
152 | *fcoe_eof = 0; | ||
153 | *checksum = le16_to_cpu(desc->checksum_fcoe); | ||
154 | } | ||
155 | |||
156 | *tcp_udp_csum_ok = | ||
157 | (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0; | ||
158 | *udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0; | ||
159 | *tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0; | ||
160 | *ipv4_csum_ok = | ||
161 | (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0; | ||
162 | *ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0; | ||
163 | *ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0; | ||
164 | *ipv4_fragment = | ||
165 | (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0; | ||
166 | *fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0; | ||
167 | } | ||
168 | |||
169 | #endif /* _CQ_ENET_DESC_H_ */ | ||
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h new file mode 100644 index 000000000000..fb83c926da58 --- /dev/null +++ b/drivers/net/enic/enic.h | |||
@@ -0,0 +1,115 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #ifndef _ENIC_H_ | ||
21 | #define _ENIC_H_ | ||
22 | |||
23 | #include <linux/inet_lro.h> | ||
24 | |||
25 | #include "vnic_enet.h" | ||
26 | #include "vnic_dev.h" | ||
27 | #include "vnic_wq.h" | ||
28 | #include "vnic_rq.h" | ||
29 | #include "vnic_cq.h" | ||
30 | #include "vnic_intr.h" | ||
31 | #include "vnic_stats.h" | ||
32 | #include "vnic_rss.h" | ||
33 | |||
34 | #define DRV_NAME "enic" | ||
35 | #define DRV_DESCRIPTION "Cisco 10G Ethernet Driver" | ||
36 | #define DRV_VERSION "0.0.1.18163.472" | ||
37 | #define DRV_COPYRIGHT "Copyright 2008 Cisco Systems, Inc" | ||
38 | #define PFX DRV_NAME ": " | ||
39 | |||
40 | #define ENIC_LRO_MAX_DESC 8 | ||
41 | #define ENIC_LRO_MAX_AGGR 64 | ||
42 | |||
43 | enum enic_cq_index { | ||
44 | ENIC_CQ_RQ, | ||
45 | ENIC_CQ_WQ, | ||
46 | ENIC_CQ_MAX, | ||
47 | }; | ||
48 | |||
49 | enum enic_intx_intr_index { | ||
50 | ENIC_INTX_WQ_RQ, | ||
51 | ENIC_INTX_ERR, | ||
52 | ENIC_INTX_NOTIFY, | ||
53 | ENIC_INTX_MAX, | ||
54 | }; | ||
55 | |||
56 | enum enic_msix_intr_index { | ||
57 | ENIC_MSIX_RQ, | ||
58 | ENIC_MSIX_WQ, | ||
59 | ENIC_MSIX_ERR, | ||
60 | ENIC_MSIX_NOTIFY, | ||
61 | ENIC_MSIX_MAX, | ||
62 | }; | ||
63 | |||
64 | struct enic_msix_entry { | ||
65 | int requested; | ||
66 | char devname[IFNAMSIZ]; | ||
67 | irqreturn_t (*isr)(int, void *); | ||
68 | void *devid; | ||
69 | }; | ||
70 | |||
71 | /* Per-instance private data structure */ | ||
72 | struct enic { | ||
73 | struct net_device *netdev; | ||
74 | struct pci_dev *pdev; | ||
75 | struct vnic_enet_config config; | ||
76 | struct vnic_dev_bar bar0; | ||
77 | struct vnic_dev *vdev; | ||
78 | struct net_device_stats net_stats; | ||
79 | struct timer_list notify_timer; | ||
80 | struct work_struct reset; | ||
81 | struct msix_entry msix_entry[ENIC_MSIX_MAX]; | ||
82 | struct enic_msix_entry msix[ENIC_MSIX_MAX]; | ||
83 | u32 msg_enable; | ||
84 | spinlock_t devcmd_lock; | ||
85 | u8 mac_addr[ETH_ALEN]; | ||
86 | u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN]; | ||
87 | unsigned int mc_count; | ||
88 | int csum_rx_enabled; | ||
89 | u32 port_mtu; | ||
90 | |||
91 | /* work queue cache line section */ | ||
92 | ____cacheline_aligned struct vnic_wq wq[1]; | ||
93 | spinlock_t wq_lock[1]; | ||
94 | unsigned int wq_count; | ||
95 | struct vlan_group *vlan_group; | ||
96 | |||
97 | /* receive queue cache line section */ | ||
98 | ____cacheline_aligned struct vnic_rq rq[1]; | ||
99 | unsigned int rq_count; | ||
100 | int (*rq_alloc_buf)(struct vnic_rq *rq); | ||
101 | struct napi_struct napi; | ||
102 | struct net_lro_mgr lro_mgr; | ||
103 | struct net_lro_desc lro_desc[ENIC_LRO_MAX_DESC]; | ||
104 | |||
105 | /* interrupt resource cache line section */ | ||
106 | ____cacheline_aligned struct vnic_intr intr[ENIC_MSIX_MAX]; | ||
107 | unsigned int intr_count; | ||
108 | u32 __iomem *legacy_pba; /* memory-mapped */ | ||
109 | |||
110 | /* completion queue cache line section */ | ||
111 | ____cacheline_aligned struct vnic_cq cq[ENIC_CQ_MAX]; | ||
112 | unsigned int cq_count; | ||
113 | }; | ||
114 | |||
115 | #endif /* _ENIC_H_ */ | ||
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c new file mode 100644 index 000000000000..4cf5ec76c993 --- /dev/null +++ b/drivers/net/enic/enic_main.c | |||
@@ -0,0 +1,1949 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #include <linux/module.h> | ||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/string.h> | ||
23 | #include <linux/errno.h> | ||
24 | #include <linux/types.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/workqueue.h> | ||
27 | #include <linux/pci.h> | ||
28 | #include <linux/netdevice.h> | ||
29 | #include <linux/etherdevice.h> | ||
30 | #include <linux/if_ether.h> | ||
31 | #include <linux/if_vlan.h> | ||
32 | #include <linux/ethtool.h> | ||
33 | #include <linux/in.h> | ||
34 | #include <linux/ip.h> | ||
35 | #include <linux/ipv6.h> | ||
36 | #include <linux/tcp.h> | ||
37 | |||
38 | #include "cq_enet_desc.h" | ||
39 | #include "vnic_dev.h" | ||
40 | #include "vnic_intr.h" | ||
41 | #include "vnic_stats.h" | ||
42 | #include "enic_res.h" | ||
43 | #include "enic.h" | ||
44 | |||
45 | #define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ) | ||
46 | #define ENIC_JUMBO_FIRST_BUF_SIZE 256 | ||
47 | |||
48 | /* Supported devices */ | ||
49 | static struct pci_device_id enic_id_table[] = { | ||
50 | { PCI_VDEVICE(CISCO, 0x0043) }, | ||
51 | { 0, } /* end of table */ | ||
52 | }; | ||
53 | |||
54 | MODULE_DESCRIPTION(DRV_DESCRIPTION); | ||
55 | MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>"); | ||
56 | MODULE_LICENSE("GPL"); | ||
57 | MODULE_VERSION(DRV_VERSION); | ||
58 | MODULE_DEVICE_TABLE(pci, enic_id_table); | ||
59 | |||
60 | struct enic_stat { | ||
61 | char name[ETH_GSTRING_LEN]; | ||
62 | unsigned int offset; | ||
63 | }; | ||
64 | |||
65 | #define ENIC_TX_STAT(stat) \ | ||
66 | { .name = #stat, .offset = offsetof(struct vnic_tx_stats, stat) / 8 } | ||
67 | #define ENIC_RX_STAT(stat) \ | ||
68 | { .name = #stat, .offset = offsetof(struct vnic_rx_stats, stat) / 8 } | ||
69 | |||
70 | static const struct enic_stat enic_tx_stats[] = { | ||
71 | ENIC_TX_STAT(tx_frames_ok), | ||
72 | ENIC_TX_STAT(tx_unicast_frames_ok), | ||
73 | ENIC_TX_STAT(tx_multicast_frames_ok), | ||
74 | ENIC_TX_STAT(tx_broadcast_frames_ok), | ||
75 | ENIC_TX_STAT(tx_bytes_ok), | ||
76 | ENIC_TX_STAT(tx_unicast_bytes_ok), | ||
77 | ENIC_TX_STAT(tx_multicast_bytes_ok), | ||
78 | ENIC_TX_STAT(tx_broadcast_bytes_ok), | ||
79 | ENIC_TX_STAT(tx_drops), | ||
80 | ENIC_TX_STAT(tx_errors), | ||
81 | ENIC_TX_STAT(tx_tso), | ||
82 | }; | ||
83 | |||
84 | static const struct enic_stat enic_rx_stats[] = { | ||
85 | ENIC_RX_STAT(rx_frames_ok), | ||
86 | ENIC_RX_STAT(rx_frames_total), | ||
87 | ENIC_RX_STAT(rx_unicast_frames_ok), | ||
88 | ENIC_RX_STAT(rx_multicast_frames_ok), | ||
89 | ENIC_RX_STAT(rx_broadcast_frames_ok), | ||
90 | ENIC_RX_STAT(rx_bytes_ok), | ||
91 | ENIC_RX_STAT(rx_unicast_bytes_ok), | ||
92 | ENIC_RX_STAT(rx_multicast_bytes_ok), | ||
93 | ENIC_RX_STAT(rx_broadcast_bytes_ok), | ||
94 | ENIC_RX_STAT(rx_drop), | ||
95 | ENIC_RX_STAT(rx_no_bufs), | ||
96 | ENIC_RX_STAT(rx_errors), | ||
97 | ENIC_RX_STAT(rx_rss), | ||
98 | ENIC_RX_STAT(rx_crc_errors), | ||
99 | ENIC_RX_STAT(rx_frames_64), | ||
100 | ENIC_RX_STAT(rx_frames_127), | ||
101 | ENIC_RX_STAT(rx_frames_255), | ||
102 | ENIC_RX_STAT(rx_frames_511), | ||
103 | ENIC_RX_STAT(rx_frames_1023), | ||
104 | ENIC_RX_STAT(rx_frames_1518), | ||
105 | ENIC_RX_STAT(rx_frames_to_max), | ||
106 | }; | ||
107 | |||
108 | static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats); | ||
109 | static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats); | ||
110 | |||
111 | static int enic_get_settings(struct net_device *netdev, | ||
112 | struct ethtool_cmd *ecmd) | ||
113 | { | ||
114 | struct enic *enic = netdev_priv(netdev); | ||
115 | |||
116 | ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); | ||
117 | ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); | ||
118 | ecmd->port = PORT_FIBRE; | ||
119 | ecmd->transceiver = XCVR_EXTERNAL; | ||
120 | |||
121 | if (netif_carrier_ok(netdev)) { | ||
122 | ecmd->speed = vnic_dev_port_speed(enic->vdev); | ||
123 | ecmd->duplex = DUPLEX_FULL; | ||
124 | } else { | ||
125 | ecmd->speed = -1; | ||
126 | ecmd->duplex = -1; | ||
127 | } | ||
128 | |||
129 | ecmd->autoneg = AUTONEG_DISABLE; | ||
130 | |||
131 | return 0; | ||
132 | } | ||
133 | |||
134 | static void enic_get_drvinfo(struct net_device *netdev, | ||
135 | struct ethtool_drvinfo *drvinfo) | ||
136 | { | ||
137 | struct enic *enic = netdev_priv(netdev); | ||
138 | struct vnic_devcmd_fw_info *fw_info; | ||
139 | |||
140 | spin_lock(&enic->devcmd_lock); | ||
141 | vnic_dev_fw_info(enic->vdev, &fw_info); | ||
142 | spin_unlock(&enic->devcmd_lock); | ||
143 | |||
144 | strncpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); | ||
145 | strncpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); | ||
146 | strncpy(drvinfo->fw_version, fw_info->fw_version, | ||
147 | sizeof(drvinfo->fw_version)); | ||
148 | strncpy(drvinfo->bus_info, pci_name(enic->pdev), | ||
149 | sizeof(drvinfo->bus_info)); | ||
150 | } | ||
151 | |||
152 | static void enic_get_strings(struct net_device *netdev, u32 stringset, u8 *data) | ||
153 | { | ||
154 | unsigned int i; | ||
155 | |||
156 | switch (stringset) { | ||
157 | case ETH_SS_STATS: | ||
158 | for (i = 0; i < enic_n_tx_stats; i++) { | ||
159 | memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN); | ||
160 | data += ETH_GSTRING_LEN; | ||
161 | } | ||
162 | for (i = 0; i < enic_n_rx_stats; i++) { | ||
163 | memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN); | ||
164 | data += ETH_GSTRING_LEN; | ||
165 | } | ||
166 | break; | ||
167 | } | ||
168 | } | ||
169 | |||
170 | static int enic_get_stats_count(struct net_device *netdev) | ||
171 | { | ||
172 | return enic_n_tx_stats + enic_n_rx_stats; | ||
173 | } | ||
174 | |||
175 | static void enic_get_ethtool_stats(struct net_device *netdev, | ||
176 | struct ethtool_stats *stats, u64 *data) | ||
177 | { | ||
178 | struct enic *enic = netdev_priv(netdev); | ||
179 | struct vnic_stats *vstats; | ||
180 | unsigned int i; | ||
181 | |||
182 | spin_lock(&enic->devcmd_lock); | ||
183 | vnic_dev_stats_dump(enic->vdev, &vstats); | ||
184 | spin_unlock(&enic->devcmd_lock); | ||
185 | |||
186 | for (i = 0; i < enic_n_tx_stats; i++) | ||
187 | *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].offset]; | ||
188 | for (i = 0; i < enic_n_rx_stats; i++) | ||
189 | *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].offset]; | ||
190 | } | ||
191 | |||
192 | static u32 enic_get_rx_csum(struct net_device *netdev) | ||
193 | { | ||
194 | struct enic *enic = netdev_priv(netdev); | ||
195 | return enic->csum_rx_enabled; | ||
196 | } | ||
197 | |||
198 | static int enic_set_rx_csum(struct net_device *netdev, u32 data) | ||
199 | { | ||
200 | struct enic *enic = netdev_priv(netdev); | ||
201 | |||
202 | enic->csum_rx_enabled = | ||
203 | (data && ENIC_SETTING(enic, RXCSUM)) ? 1 : 0; | ||
204 | |||
205 | return 0; | ||
206 | } | ||
207 | |||
208 | static int enic_set_tx_csum(struct net_device *netdev, u32 data) | ||
209 | { | ||
210 | struct enic *enic = netdev_priv(netdev); | ||
211 | |||
212 | if (data && ENIC_SETTING(enic, TXCSUM)) | ||
213 | netdev->features |= NETIF_F_HW_CSUM; | ||
214 | else | ||
215 | netdev->features &= ~NETIF_F_HW_CSUM; | ||
216 | |||
217 | return 0; | ||
218 | } | ||
219 | |||
220 | static int enic_set_tso(struct net_device *netdev, u32 data) | ||
221 | { | ||
222 | struct enic *enic = netdev_priv(netdev); | ||
223 | |||
224 | if (data && ENIC_SETTING(enic, TSO)) | ||
225 | netdev->features |= | ||
226 | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN; | ||
227 | else | ||
228 | netdev->features &= | ||
229 | ~(NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN); | ||
230 | |||
231 | return 0; | ||
232 | } | ||
233 | |||
234 | static u32 enic_get_msglevel(struct net_device *netdev) | ||
235 | { | ||
236 | struct enic *enic = netdev_priv(netdev); | ||
237 | return enic->msg_enable; | ||
238 | } | ||
239 | |||
240 | static void enic_set_msglevel(struct net_device *netdev, u32 value) | ||
241 | { | ||
242 | struct enic *enic = netdev_priv(netdev); | ||
243 | enic->msg_enable = value; | ||
244 | } | ||
245 | |||
246 | static struct ethtool_ops enic_ethtool_ops = { | ||
247 | .get_settings = enic_get_settings, | ||
248 | .get_drvinfo = enic_get_drvinfo, | ||
249 | .get_msglevel = enic_get_msglevel, | ||
250 | .set_msglevel = enic_set_msglevel, | ||
251 | .get_link = ethtool_op_get_link, | ||
252 | .get_strings = enic_get_strings, | ||
253 | .get_stats_count = enic_get_stats_count, | ||
254 | .get_ethtool_stats = enic_get_ethtool_stats, | ||
255 | .get_rx_csum = enic_get_rx_csum, | ||
256 | .set_rx_csum = enic_set_rx_csum, | ||
257 | .get_tx_csum = ethtool_op_get_tx_csum, | ||
258 | .set_tx_csum = enic_set_tx_csum, | ||
259 | .get_sg = ethtool_op_get_sg, | ||
260 | .set_sg = ethtool_op_set_sg, | ||
261 | .get_tso = ethtool_op_get_tso, | ||
262 | .set_tso = enic_set_tso, | ||
263 | }; | ||
264 | |||
265 | static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) | ||
266 | { | ||
267 | struct enic *enic = vnic_dev_priv(wq->vdev); | ||
268 | |||
269 | if (buf->sop) | ||
270 | pci_unmap_single(enic->pdev, buf->dma_addr, | ||
271 | buf->len, PCI_DMA_TODEVICE); | ||
272 | else | ||
273 | pci_unmap_page(enic->pdev, buf->dma_addr, | ||
274 | buf->len, PCI_DMA_TODEVICE); | ||
275 | |||
276 | if (buf->os_buf) | ||
277 | dev_kfree_skb_any(buf->os_buf); | ||
278 | } | ||
279 | |||
280 | static void enic_wq_free_buf(struct vnic_wq *wq, | ||
281 | struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque) | ||
282 | { | ||
283 | enic_free_wq_buf(wq, buf); | ||
284 | } | ||
285 | |||
286 | static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, | ||
287 | u8 type, u16 q_number, u16 completed_index, void *opaque) | ||
288 | { | ||
289 | struct enic *enic = vnic_dev_priv(vdev); | ||
290 | |||
291 | spin_lock(&enic->wq_lock[q_number]); | ||
292 | |||
293 | vnic_wq_service(&enic->wq[q_number], cq_desc, | ||
294 | completed_index, enic_wq_free_buf, | ||
295 | opaque); | ||
296 | |||
297 | if (netif_queue_stopped(enic->netdev) && | ||
298 | vnic_wq_desc_avail(&enic->wq[q_number]) >= MAX_SKB_FRAGS + 1) | ||
299 | netif_wake_queue(enic->netdev); | ||
300 | |||
301 | spin_unlock(&enic->wq_lock[q_number]); | ||
302 | |||
303 | return 0; | ||
304 | } | ||
305 | |||
306 | static void enic_log_q_error(struct enic *enic) | ||
307 | { | ||
308 | unsigned int i; | ||
309 | u32 error_status; | ||
310 | |||
311 | for (i = 0; i < enic->wq_count; i++) { | ||
312 | error_status = vnic_wq_error_status(&enic->wq[i]); | ||
313 | if (error_status) | ||
314 | printk(KERN_ERR PFX "%s: WQ[%d] error_status %d\n", | ||
315 | enic->netdev->name, i, error_status); | ||
316 | } | ||
317 | |||
318 | for (i = 0; i < enic->rq_count; i++) { | ||
319 | error_status = vnic_rq_error_status(&enic->rq[i]); | ||
320 | if (error_status) | ||
321 | printk(KERN_ERR PFX "%s: RQ[%d] error_status %d\n", | ||
322 | enic->netdev->name, i, error_status); | ||
323 | } | ||
324 | } | ||
325 | |||
326 | static void enic_link_check(struct enic *enic) | ||
327 | { | ||
328 | int link_status = vnic_dev_link_status(enic->vdev); | ||
329 | int carrier_ok = netif_carrier_ok(enic->netdev); | ||
330 | |||
331 | if (link_status && !carrier_ok) { | ||
332 | printk(KERN_INFO PFX "%s: Link UP\n", enic->netdev->name); | ||
333 | netif_carrier_on(enic->netdev); | ||
334 | } else if (!link_status && carrier_ok) { | ||
335 | printk(KERN_INFO PFX "%s: Link DOWN\n", enic->netdev->name); | ||
336 | netif_carrier_off(enic->netdev); | ||
337 | } | ||
338 | } | ||
339 | |||
340 | static void enic_mtu_check(struct enic *enic) | ||
341 | { | ||
342 | u32 mtu = vnic_dev_mtu(enic->vdev); | ||
343 | |||
344 | if (mtu != enic->port_mtu) { | ||
345 | if (mtu < enic->netdev->mtu) | ||
346 | printk(KERN_WARNING PFX | ||
347 | "%s: interface MTU (%d) set higher " | ||
348 | "than switch port MTU (%d)\n", | ||
349 | enic->netdev->name, enic->netdev->mtu, mtu); | ||
350 | enic->port_mtu = mtu; | ||
351 | } | ||
352 | } | ||
353 | |||
354 | static void enic_msglvl_check(struct enic *enic) | ||
355 | { | ||
356 | u32 msg_enable = vnic_dev_msg_lvl(enic->vdev); | ||
357 | |||
358 | if (msg_enable != enic->msg_enable) { | ||
359 | printk(KERN_INFO PFX "%s: msg lvl changed from 0x%x to 0x%x\n", | ||
360 | enic->netdev->name, enic->msg_enable, msg_enable); | ||
361 | enic->msg_enable = msg_enable; | ||
362 | } | ||
363 | } | ||
364 | |||
365 | static void enic_notify_check(struct enic *enic) | ||
366 | { | ||
367 | enic_msglvl_check(enic); | ||
368 | enic_mtu_check(enic); | ||
369 | enic_link_check(enic); | ||
370 | } | ||
371 | |||
372 | #define ENIC_TEST_INTR(pba, i) (pba & (1 << i)) | ||
373 | |||
374 | static irqreturn_t enic_isr_legacy(int irq, void *data) | ||
375 | { | ||
376 | struct net_device *netdev = data; | ||
377 | struct enic *enic = netdev_priv(netdev); | ||
378 | u32 pba; | ||
379 | |||
380 | vnic_intr_mask(&enic->intr[ENIC_INTX_WQ_RQ]); | ||
381 | |||
382 | pba = vnic_intr_legacy_pba(enic->legacy_pba); | ||
383 | if (!pba) { | ||
384 | vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]); | ||
385 | return IRQ_NONE; /* not our interrupt */ | ||
386 | } | ||
387 | |||
388 | if (ENIC_TEST_INTR(pba, ENIC_INTX_NOTIFY)) | ||
389 | enic_notify_check(enic); | ||
390 | |||
391 | if (ENIC_TEST_INTR(pba, ENIC_INTX_ERR)) { | ||
392 | enic_log_q_error(enic); | ||
393 | /* schedule recovery from WQ/RQ error */ | ||
394 | schedule_work(&enic->reset); | ||
395 | return IRQ_HANDLED; | ||
396 | } | ||
397 | |||
398 | if (ENIC_TEST_INTR(pba, ENIC_INTX_WQ_RQ)) { | ||
399 | if (netif_rx_schedule_prep(netdev, &enic->napi)) | ||
400 | __netif_rx_schedule(netdev, &enic->napi); | ||
401 | } else { | ||
402 | vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]); | ||
403 | } | ||
404 | |||
405 | return IRQ_HANDLED; | ||
406 | } | ||
407 | |||
408 | static irqreturn_t enic_isr_msi(int irq, void *data) | ||
409 | { | ||
410 | struct enic *enic = data; | ||
411 | |||
412 | /* With MSI, there is no sharing of interrupts, so this is | ||
413 | * our interrupt and there is no need to ack it. The device | ||
414 | * is not providing per-vector masking, so the OS will not | ||
415 | * write to PCI config space to mask/unmask the interrupt. | ||
416 | * We're using mask_on_assertion for MSI, so the device | ||
417 | * automatically masks the interrupt when the interrupt is | ||
418 | * generated. Later, when exiting polling, the interrupt | ||
419 | * will be unmasked (see enic_poll). | ||
420 | * | ||
421 | * Also, the device uses the same PCIe Traffic Class (TC) | ||
422 | * for Memory Write data and MSI, so there are no ordering | ||
423 | * issues; the MSI will always arrive at the Root Complex | ||
424 | * _after_ corresponding Memory Writes (i.e. descriptor | ||
425 | * writes). | ||
426 | */ | ||
427 | |||
428 | netif_rx_schedule(enic->netdev, &enic->napi); | ||
429 | |||
430 | return IRQ_HANDLED; | ||
431 | } | ||
432 | |||
433 | static irqreturn_t enic_isr_msix_rq(int irq, void *data) | ||
434 | { | ||
435 | struct enic *enic = data; | ||
436 | |||
437 | /* schedule NAPI polling for RQ cleanup */ | ||
438 | netif_rx_schedule(enic->netdev, &enic->napi); | ||
439 | |||
440 | return IRQ_HANDLED; | ||
441 | } | ||
442 | |||
443 | static irqreturn_t enic_isr_msix_wq(int irq, void *data) | ||
444 | { | ||
445 | struct enic *enic = data; | ||
446 | unsigned int wq_work_to_do = -1; /* no limit */ | ||
447 | unsigned int wq_work_done; | ||
448 | |||
449 | wq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_WQ], | ||
450 | wq_work_to_do, enic_wq_service, NULL); | ||
451 | |||
452 | vnic_intr_return_credits(&enic->intr[ENIC_MSIX_WQ], | ||
453 | wq_work_done, | ||
454 | 1 /* unmask intr */, | ||
455 | 1 /* reset intr timer */); | ||
456 | |||
457 | return IRQ_HANDLED; | ||
458 | } | ||
459 | |||
460 | static irqreturn_t enic_isr_msix_err(int irq, void *data) | ||
461 | { | ||
462 | struct enic *enic = data; | ||
463 | |||
464 | enic_log_q_error(enic); | ||
465 | |||
466 | /* schedule recovery from WQ/RQ error */ | ||
467 | schedule_work(&enic->reset); | ||
468 | |||
469 | return IRQ_HANDLED; | ||
470 | } | ||
471 | |||
472 | static irqreturn_t enic_isr_msix_notify(int irq, void *data) | ||
473 | { | ||
474 | struct enic *enic = data; | ||
475 | |||
476 | enic_notify_check(enic); | ||
477 | vnic_intr_unmask(&enic->intr[ENIC_MSIX_NOTIFY]); | ||
478 | |||
479 | return IRQ_HANDLED; | ||
480 | } | ||
481 | |||
482 | static inline void enic_queue_wq_skb_cont(struct enic *enic, | ||
483 | struct vnic_wq *wq, struct sk_buff *skb, | ||
484 | unsigned int len_left) | ||
485 | { | ||
486 | skb_frag_t *frag; | ||
487 | |||
488 | /* Queue additional data fragments */ | ||
489 | for (frag = skb_shinfo(skb)->frags; len_left; frag++) { | ||
490 | len_left -= frag->size; | ||
491 | enic_queue_wq_desc_cont(wq, skb, | ||
492 | pci_map_page(enic->pdev, frag->page, | ||
493 | frag->page_offset, frag->size, | ||
494 | PCI_DMA_TODEVICE), | ||
495 | frag->size, | ||
496 | (len_left == 0)); /* EOP? */ | ||
497 | } | ||
498 | } | ||
499 | |||
500 | static inline void enic_queue_wq_skb_vlan(struct enic *enic, | ||
501 | struct vnic_wq *wq, struct sk_buff *skb, | ||
502 | int vlan_tag_insert, unsigned int vlan_tag) | ||
503 | { | ||
504 | unsigned int head_len = skb_headlen(skb); | ||
505 | unsigned int len_left = skb->len - head_len; | ||
506 | int eop = (len_left == 0); | ||
507 | |||
508 | /* Queue the main skb fragment */ | ||
509 | enic_queue_wq_desc(wq, skb, | ||
510 | pci_map_single(enic->pdev, skb->data, | ||
511 | head_len, PCI_DMA_TODEVICE), | ||
512 | head_len, | ||
513 | vlan_tag_insert, vlan_tag, | ||
514 | eop); | ||
515 | |||
516 | if (!eop) | ||
517 | enic_queue_wq_skb_cont(enic, wq, skb, len_left); | ||
518 | } | ||
519 | |||
520 | static inline void enic_queue_wq_skb_csum_l4(struct enic *enic, | ||
521 | struct vnic_wq *wq, struct sk_buff *skb, | ||
522 | int vlan_tag_insert, unsigned int vlan_tag) | ||
523 | { | ||
524 | unsigned int head_len = skb_headlen(skb); | ||
525 | unsigned int len_left = skb->len - head_len; | ||
526 | unsigned int hdr_len = skb_transport_offset(skb); | ||
527 | unsigned int csum_offset = hdr_len + skb->csum_offset; | ||
528 | int eop = (len_left == 0); | ||
529 | |||
530 | /* Queue the main skb fragment */ | ||
531 | enic_queue_wq_desc_csum_l4(wq, skb, | ||
532 | pci_map_single(enic->pdev, skb->data, | ||
533 | head_len, PCI_DMA_TODEVICE), | ||
534 | head_len, | ||
535 | csum_offset, | ||
536 | hdr_len, | ||
537 | vlan_tag_insert, vlan_tag, | ||
538 | eop); | ||
539 | |||
540 | if (!eop) | ||
541 | enic_queue_wq_skb_cont(enic, wq, skb, len_left); | ||
542 | } | ||
543 | |||
544 | static inline void enic_queue_wq_skb_tso(struct enic *enic, | ||
545 | struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss, | ||
546 | int vlan_tag_insert, unsigned int vlan_tag) | ||
547 | { | ||
548 | unsigned int head_len = skb_headlen(skb); | ||
549 | unsigned int len_left = skb->len - head_len; | ||
550 | unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | ||
551 | int eop = (len_left == 0); | ||
552 | |||
553 | /* Preload TCP csum field with IP pseudo hdr calculated | ||
554 | * with IP length set to zero. HW will later add in length | ||
555 | * to each TCP segment resulting from the TSO. | ||
556 | */ | ||
557 | |||
558 | if (skb->protocol == __constant_htons(ETH_P_IP)) { | ||
559 | ip_hdr(skb)->check = 0; | ||
560 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, | ||
561 | ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); | ||
562 | } else if (skb->protocol == __constant_htons(ETH_P_IPV6)) { | ||
563 | tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | ||
564 | &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); | ||
565 | } | ||
566 | |||
567 | /* Queue the main skb fragment */ | ||
568 | enic_queue_wq_desc_tso(wq, skb, | ||
569 | pci_map_single(enic->pdev, skb->data, | ||
570 | head_len, PCI_DMA_TODEVICE), | ||
571 | head_len, | ||
572 | mss, hdr_len, | ||
573 | vlan_tag_insert, vlan_tag, | ||
574 | eop); | ||
575 | |||
576 | if (!eop) | ||
577 | enic_queue_wq_skb_cont(enic, wq, skb, len_left); | ||
578 | } | ||
579 | |||
580 | static inline void enic_queue_wq_skb(struct enic *enic, | ||
581 | struct vnic_wq *wq, struct sk_buff *skb) | ||
582 | { | ||
583 | unsigned int mss = skb_shinfo(skb)->gso_size; | ||
584 | unsigned int vlan_tag = 0; | ||
585 | int vlan_tag_insert = 0; | ||
586 | |||
587 | if (enic->vlan_group && vlan_tx_tag_present(skb)) { | ||
588 | /* VLAN tag from trunking driver */ | ||
589 | vlan_tag_insert = 1; | ||
590 | vlan_tag = vlan_tx_tag_get(skb); | ||
591 | } | ||
592 | |||
593 | if (mss) | ||
594 | enic_queue_wq_skb_tso(enic, wq, skb, mss, | ||
595 | vlan_tag_insert, vlan_tag); | ||
596 | else if (skb->ip_summed == CHECKSUM_PARTIAL) | ||
597 | enic_queue_wq_skb_csum_l4(enic, wq, skb, | ||
598 | vlan_tag_insert, vlan_tag); | ||
599 | else | ||
600 | enic_queue_wq_skb_vlan(enic, wq, skb, | ||
601 | vlan_tag_insert, vlan_tag); | ||
602 | } | ||
603 | |||
604 | /* netif_tx_lock held, process context with BHs disabled */ | ||
605 | static int enic_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev) | ||
606 | { | ||
607 | struct enic *enic = netdev_priv(netdev); | ||
608 | struct vnic_wq *wq = &enic->wq[0]; | ||
609 | unsigned long flags; | ||
610 | |||
611 | if (skb->len <= 0) { | ||
612 | dev_kfree_skb(skb); | ||
613 | return NETDEV_TX_OK; | ||
614 | } | ||
615 | |||
616 | /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs, | ||
617 | * which is very likely. In the off chance it's going to take | ||
618 | * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb. | ||
619 | */ | ||
620 | |||
621 | if (skb_shinfo(skb)->gso_size == 0 && | ||
622 | skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC && | ||
623 | skb_linearize(skb)) { | ||
624 | dev_kfree_skb(skb); | ||
625 | return NETDEV_TX_OK; | ||
626 | } | ||
627 | |||
628 | spin_lock_irqsave(&enic->wq_lock[0], flags); | ||
629 | |||
630 | if (vnic_wq_desc_avail(wq) < skb_shinfo(skb)->nr_frags + 1) { | ||
631 | netif_stop_queue(netdev); | ||
632 | /* This is a hard error, log it */ | ||
633 | printk(KERN_ERR PFX "%s: BUG! Tx ring full when " | ||
634 | "queue awake!\n", netdev->name); | ||
635 | spin_unlock_irqrestore(&enic->wq_lock[0], flags); | ||
636 | return NETDEV_TX_BUSY; | ||
637 | } | ||
638 | |||
639 | enic_queue_wq_skb(enic, wq, skb); | ||
640 | |||
641 | if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + 1) | ||
642 | netif_stop_queue(netdev); | ||
643 | |||
644 | netdev->trans_start = jiffies; | ||
645 | |||
646 | spin_unlock_irqrestore(&enic->wq_lock[0], flags); | ||
647 | |||
648 | return NETDEV_TX_OK; | ||
649 | } | ||
650 | |||
651 | /* dev_base_lock rwlock held, nominally process context */ | ||
652 | static struct net_device_stats *enic_get_stats(struct net_device *netdev) | ||
653 | { | ||
654 | struct enic *enic = netdev_priv(netdev); | ||
655 | struct vnic_stats *stats; | ||
656 | |||
657 | spin_lock(&enic->devcmd_lock); | ||
658 | vnic_dev_stats_dump(enic->vdev, &stats); | ||
659 | spin_unlock(&enic->devcmd_lock); | ||
660 | |||
661 | enic->net_stats.tx_packets = stats->tx.tx_frames_ok; | ||
662 | enic->net_stats.tx_bytes = stats->tx.tx_bytes_ok; | ||
663 | enic->net_stats.tx_errors = stats->tx.tx_errors; | ||
664 | enic->net_stats.tx_dropped = stats->tx.tx_drops; | ||
665 | |||
666 | enic->net_stats.rx_packets = stats->rx.rx_frames_ok; | ||
667 | enic->net_stats.rx_bytes = stats->rx.rx_bytes_ok; | ||
668 | enic->net_stats.rx_errors = stats->rx.rx_errors; | ||
669 | enic->net_stats.multicast = stats->rx.rx_multicast_frames_ok; | ||
670 | enic->net_stats.rx_crc_errors = stats->rx.rx_crc_errors; | ||
671 | enic->net_stats.rx_dropped = stats->rx.rx_no_bufs; | ||
672 | |||
673 | return &enic->net_stats; | ||
674 | } | ||
675 | |||
676 | static void enic_reset_mcaddrs(struct enic *enic) | ||
677 | { | ||
678 | enic->mc_count = 0; | ||
679 | } | ||
680 | |||
681 | static int enic_set_mac_addr(struct net_device *netdev, char *addr) | ||
682 | { | ||
683 | if (!is_valid_ether_addr(addr)) | ||
684 | return -EADDRNOTAVAIL; | ||
685 | |||
686 | memcpy(netdev->dev_addr, addr, netdev->addr_len); | ||
687 | |||
688 | return 0; | ||
689 | } | ||
690 | |||
691 | /* netif_tx_lock held, BHs disabled */ | ||
692 | static void enic_set_multicast_list(struct net_device *netdev) | ||
693 | { | ||
694 | struct enic *enic = netdev_priv(netdev); | ||
695 | struct dev_mc_list *list = netdev->mc_list; | ||
696 | int directed = 1; | ||
697 | int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0; | ||
698 | int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0; | ||
699 | int promisc = (netdev->flags & IFF_PROMISC) ? 1 : 0; | ||
700 | int allmulti = (netdev->flags & IFF_ALLMULTI) || | ||
701 | (netdev->mc_count > ENIC_MULTICAST_PERFECT_FILTERS); | ||
702 | u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN]; | ||
703 | unsigned int mc_count = netdev->mc_count; | ||
704 | unsigned int i, j; | ||
705 | |||
706 | if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS) | ||
707 | mc_count = ENIC_MULTICAST_PERFECT_FILTERS; | ||
708 | |||
709 | spin_lock(&enic->devcmd_lock); | ||
710 | |||
711 | vnic_dev_packet_filter(enic->vdev, directed, | ||
712 | multicast, broadcast, promisc, allmulti); | ||
713 | |||
714 | /* Is there an easier way? Trying to minimize to | ||
715 | * calls to add/del multicast addrs. We keep the | ||
716 | * addrs from the last call in enic->mc_addr and | ||
717 | * look for changes to add/del. | ||
718 | */ | ||
719 | |||
720 | for (i = 0; list && i < mc_count; i++) { | ||
721 | memcpy(mc_addr[i], list->dmi_addr, ETH_ALEN); | ||
722 | list = list->next; | ||
723 | } | ||
724 | |||
725 | for (i = 0; i < enic->mc_count; i++) { | ||
726 | for (j = 0; j < mc_count; j++) | ||
727 | if (compare_ether_addr(enic->mc_addr[i], | ||
728 | mc_addr[j]) == 0) | ||
729 | break; | ||
730 | if (j == mc_count) | ||
731 | enic_del_multicast_addr(enic, enic->mc_addr[i]); | ||
732 | } | ||
733 | |||
734 | for (i = 0; i < mc_count; i++) { | ||
735 | for (j = 0; j < enic->mc_count; j++) | ||
736 | if (compare_ether_addr(mc_addr[i], | ||
737 | enic->mc_addr[j]) == 0) | ||
738 | break; | ||
739 | if (j == enic->mc_count) | ||
740 | enic_add_multicast_addr(enic, mc_addr[i]); | ||
741 | } | ||
742 | |||
743 | /* Save the list to compare against next time | ||
744 | */ | ||
745 | |||
746 | for (i = 0; i < mc_count; i++) | ||
747 | memcpy(enic->mc_addr[i], mc_addr[i], ETH_ALEN); | ||
748 | |||
749 | enic->mc_count = mc_count; | ||
750 | |||
751 | spin_unlock(&enic->devcmd_lock); | ||
752 | } | ||
753 | |||
754 | /* rtnl lock is held */ | ||
755 | static void enic_vlan_rx_register(struct net_device *netdev, | ||
756 | struct vlan_group *vlan_group) | ||
757 | { | ||
758 | struct enic *enic = netdev_priv(netdev); | ||
759 | enic->vlan_group = vlan_group; | ||
760 | } | ||
761 | |||
762 | /* rtnl lock is held */ | ||
763 | static void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid) | ||
764 | { | ||
765 | struct enic *enic = netdev_priv(netdev); | ||
766 | |||
767 | spin_lock(&enic->devcmd_lock); | ||
768 | enic_add_vlan(enic, vid); | ||
769 | spin_unlock(&enic->devcmd_lock); | ||
770 | } | ||
771 | |||
772 | /* rtnl lock is held */ | ||
773 | static void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | ||
774 | { | ||
775 | struct enic *enic = netdev_priv(netdev); | ||
776 | |||
777 | spin_lock(&enic->devcmd_lock); | ||
778 | enic_del_vlan(enic, vid); | ||
779 | spin_unlock(&enic->devcmd_lock); | ||
780 | } | ||
781 | |||
782 | /* netif_tx_lock held, BHs disabled */ | ||
783 | static void enic_tx_timeout(struct net_device *netdev) | ||
784 | { | ||
785 | struct enic *enic = netdev_priv(netdev); | ||
786 | schedule_work(&enic->reset); | ||
787 | } | ||
788 | |||
789 | static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) | ||
790 | { | ||
791 | struct enic *enic = vnic_dev_priv(rq->vdev); | ||
792 | |||
793 | if (!buf->os_buf) | ||
794 | return; | ||
795 | |||
796 | pci_unmap_single(enic->pdev, buf->dma_addr, | ||
797 | buf->len, PCI_DMA_FROMDEVICE); | ||
798 | dev_kfree_skb_any(buf->os_buf); | ||
799 | } | ||
800 | |||
801 | static inline struct sk_buff *enic_rq_alloc_skb(unsigned int size) | ||
802 | { | ||
803 | struct sk_buff *skb; | ||
804 | |||
805 | skb = dev_alloc_skb(size + NET_IP_ALIGN); | ||
806 | |||
807 | if (skb) | ||
808 | skb_reserve(skb, NET_IP_ALIGN); | ||
809 | |||
810 | return skb; | ||
811 | } | ||
812 | |||
813 | static int enic_rq_alloc_buf(struct vnic_rq *rq) | ||
814 | { | ||
815 | struct enic *enic = vnic_dev_priv(rq->vdev); | ||
816 | struct sk_buff *skb; | ||
817 | unsigned int len = enic->netdev->mtu + ETH_HLEN; | ||
818 | unsigned int os_buf_index = 0; | ||
819 | dma_addr_t dma_addr; | ||
820 | |||
821 | skb = enic_rq_alloc_skb(len); | ||
822 | if (!skb) | ||
823 | return -ENOMEM; | ||
824 | |||
825 | dma_addr = pci_map_single(enic->pdev, skb->data, | ||
826 | len, PCI_DMA_FROMDEVICE); | ||
827 | |||
828 | enic_queue_rq_desc(rq, skb, os_buf_index, | ||
829 | dma_addr, len); | ||
830 | |||
831 | return 0; | ||
832 | } | ||
833 | |||
834 | static int enic_get_skb_header(struct sk_buff *skb, void **iphdr, | ||
835 | void **tcph, u64 *hdr_flags, void *priv) | ||
836 | { | ||
837 | struct cq_enet_rq_desc *cq_desc = priv; | ||
838 | unsigned int ip_len; | ||
839 | struct iphdr *iph; | ||
840 | |||
841 | u8 type, color, eop, sop, ingress_port, vlan_stripped; | ||
842 | u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof; | ||
843 | u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; | ||
844 | u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc; | ||
845 | u8 packet_error; | ||
846 | u16 q_number, completed_index, bytes_written, vlan, checksum; | ||
847 | u32 rss_hash; | ||
848 | |||
849 | cq_enet_rq_desc_dec(cq_desc, | ||
850 | &type, &color, &q_number, &completed_index, | ||
851 | &ingress_port, &fcoe, &eop, &sop, &rss_type, | ||
852 | &csum_not_calc, &rss_hash, &bytes_written, | ||
853 | &packet_error, &vlan_stripped, &vlan, &checksum, | ||
854 | &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error, | ||
855 | &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp, | ||
856 | &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment, | ||
857 | &fcs_ok); | ||
858 | |||
859 | if (!(ipv4 && tcp && !ipv4_fragment)) | ||
860 | return -1; | ||
861 | |||
862 | skb_reset_network_header(skb); | ||
863 | iph = ip_hdr(skb); | ||
864 | |||
865 | ip_len = ip_hdrlen(skb); | ||
866 | skb_set_transport_header(skb, ip_len); | ||
867 | |||
868 | /* check if ip header and tcp header are complete */ | ||
869 | if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb)) | ||
870 | return -1; | ||
871 | |||
872 | *hdr_flags = LRO_IPV4 | LRO_TCP; | ||
873 | *tcph = tcp_hdr(skb); | ||
874 | *iphdr = iph; | ||
875 | |||
876 | return 0; | ||
877 | } | ||
878 | |||
879 | static void enic_rq_indicate_buf(struct vnic_rq *rq, | ||
880 | struct cq_desc *cq_desc, struct vnic_rq_buf *buf, | ||
881 | int skipped, void *opaque) | ||
882 | { | ||
883 | struct enic *enic = vnic_dev_priv(rq->vdev); | ||
884 | struct sk_buff *skb; | ||
885 | |||
886 | u8 type, color, eop, sop, ingress_port, vlan_stripped; | ||
887 | u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof; | ||
888 | u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; | ||
889 | u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc; | ||
890 | u8 packet_error; | ||
891 | u16 q_number, completed_index, bytes_written, vlan, checksum; | ||
892 | u32 rss_hash; | ||
893 | |||
894 | if (skipped) | ||
895 | return; | ||
896 | |||
897 | skb = buf->os_buf; | ||
898 | prefetch(skb->data - NET_IP_ALIGN); | ||
899 | pci_unmap_single(enic->pdev, buf->dma_addr, | ||
900 | buf->len, PCI_DMA_FROMDEVICE); | ||
901 | |||
902 | cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, | ||
903 | &type, &color, &q_number, &completed_index, | ||
904 | &ingress_port, &fcoe, &eop, &sop, &rss_type, | ||
905 | &csum_not_calc, &rss_hash, &bytes_written, | ||
906 | &packet_error, &vlan_stripped, &vlan, &checksum, | ||
907 | &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error, | ||
908 | &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp, | ||
909 | &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment, | ||
910 | &fcs_ok); | ||
911 | |||
912 | if (packet_error) { | ||
913 | |||
914 | if (bytes_written > 0 && !fcs_ok) { | ||
915 | if (net_ratelimit()) | ||
916 | printk(KERN_ERR PFX | ||
917 | "%s: packet error: bad FCS\n", | ||
918 | enic->netdev->name); | ||
919 | } | ||
920 | |||
921 | dev_kfree_skb_any(skb); | ||
922 | |||
923 | return; | ||
924 | } | ||
925 | |||
926 | if (eop && bytes_written > 0) { | ||
927 | |||
928 | /* Good receive | ||
929 | */ | ||
930 | |||
931 | skb_put(skb, bytes_written); | ||
932 | skb->protocol = eth_type_trans(skb, enic->netdev); | ||
933 | |||
934 | if (enic->csum_rx_enabled && !csum_not_calc) { | ||
935 | skb->csum = htons(checksum); | ||
936 | skb->ip_summed = CHECKSUM_COMPLETE; | ||
937 | } | ||
938 | |||
939 | skb->dev = enic->netdev; | ||
940 | enic->netdev->last_rx = jiffies; | ||
941 | |||
942 | if (enic->vlan_group && vlan_stripped) { | ||
943 | |||
944 | if (ENIC_SETTING(enic, LRO)) | ||
945 | lro_vlan_hwaccel_receive_skb(&enic->lro_mgr, | ||
946 | skb, enic->vlan_group, | ||
947 | vlan, cq_desc); | ||
948 | else | ||
949 | vlan_hwaccel_receive_skb(skb, | ||
950 | enic->vlan_group, vlan); | ||
951 | |||
952 | } else { | ||
953 | |||
954 | if (ENIC_SETTING(enic, LRO)) | ||
955 | lro_receive_skb(&enic->lro_mgr, skb, cq_desc); | ||
956 | else | ||
957 | netif_receive_skb(skb); | ||
958 | |||
959 | } | ||
960 | |||
961 | } else { | ||
962 | |||
963 | /* Buffer overflow | ||
964 | */ | ||
965 | |||
966 | dev_kfree_skb_any(skb); | ||
967 | } | ||
968 | } | ||
969 | |||
970 | static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, | ||
971 | u8 type, u16 q_number, u16 completed_index, void *opaque) | ||
972 | { | ||
973 | struct enic *enic = vnic_dev_priv(vdev); | ||
974 | |||
975 | vnic_rq_service(&enic->rq[q_number], cq_desc, | ||
976 | completed_index, VNIC_RQ_RETURN_DESC, | ||
977 | enic_rq_indicate_buf, opaque); | ||
978 | |||
979 | return 0; | ||
980 | } | ||
981 | |||
982 | static void enic_rq_drop_buf(struct vnic_rq *rq, | ||
983 | struct cq_desc *cq_desc, struct vnic_rq_buf *buf, | ||
984 | int skipped, void *opaque) | ||
985 | { | ||
986 | struct enic *enic = vnic_dev_priv(rq->vdev); | ||
987 | struct sk_buff *skb = buf->os_buf; | ||
988 | |||
989 | if (skipped) | ||
990 | return; | ||
991 | |||
992 | pci_unmap_single(enic->pdev, buf->dma_addr, | ||
993 | buf->len, PCI_DMA_FROMDEVICE); | ||
994 | |||
995 | dev_kfree_skb_any(skb); | ||
996 | } | ||
997 | |||
998 | static int enic_rq_service_drop(struct vnic_dev *vdev, struct cq_desc *cq_desc, | ||
999 | u8 type, u16 q_number, u16 completed_index, void *opaque) | ||
1000 | { | ||
1001 | struct enic *enic = vnic_dev_priv(vdev); | ||
1002 | |||
1003 | vnic_rq_service(&enic->rq[q_number], cq_desc, | ||
1004 | completed_index, VNIC_RQ_RETURN_DESC, | ||
1005 | enic_rq_drop_buf, opaque); | ||
1006 | |||
1007 | return 0; | ||
1008 | } | ||
1009 | |||
1010 | static int enic_poll(struct napi_struct *napi, int budget) | ||
1011 | { | ||
1012 | struct enic *enic = container_of(napi, struct enic, napi); | ||
1013 | struct net_device *netdev = enic->netdev; | ||
1014 | unsigned int rq_work_to_do = budget; | ||
1015 | unsigned int wq_work_to_do = -1; /* no limit */ | ||
1016 | unsigned int work_done, rq_work_done, wq_work_done; | ||
1017 | |||
1018 | /* Service RQ (first) and WQ | ||
1019 | */ | ||
1020 | |||
1021 | rq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ], | ||
1022 | rq_work_to_do, enic_rq_service, NULL); | ||
1023 | |||
1024 | wq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_WQ], | ||
1025 | wq_work_to_do, enic_wq_service, NULL); | ||
1026 | |||
1027 | /* Accumulate intr event credits for this polling | ||
1028 | * cycle. An intr event is the completion of a | ||
1029 | * a WQ or RQ packet. | ||
1030 | */ | ||
1031 | |||
1032 | work_done = rq_work_done + wq_work_done; | ||
1033 | |||
1034 | if (work_done > 0) | ||
1035 | vnic_intr_return_credits(&enic->intr[ENIC_INTX_WQ_RQ], | ||
1036 | work_done, | ||
1037 | 0 /* don't unmask intr */, | ||
1038 | 0 /* don't reset intr timer */); | ||
1039 | |||
1040 | if (rq_work_done > 0) { | ||
1041 | |||
1042 | /* Replenish RQ | ||
1043 | */ | ||
1044 | |||
1045 | vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); | ||
1046 | |||
1047 | } else { | ||
1048 | |||
1049 | /* If no work done, flush all LROs and exit polling | ||
1050 | */ | ||
1051 | |||
1052 | if (ENIC_SETTING(enic, LRO)) | ||
1053 | lro_flush_all(&enic->lro_mgr); | ||
1054 | |||
1055 | netif_rx_complete(netdev, napi); | ||
1056 | vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]); | ||
1057 | } | ||
1058 | |||
1059 | return rq_work_done; | ||
1060 | } | ||
1061 | |||
1062 | static int enic_poll_msix(struct napi_struct *napi, int budget) | ||
1063 | { | ||
1064 | struct enic *enic = container_of(napi, struct enic, napi); | ||
1065 | struct net_device *netdev = enic->netdev; | ||
1066 | unsigned int work_to_do = budget; | ||
1067 | unsigned int work_done; | ||
1068 | |||
1069 | /* Service RQ | ||
1070 | */ | ||
1071 | |||
1072 | work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ], | ||
1073 | work_to_do, enic_rq_service, NULL); | ||
1074 | |||
1075 | if (work_done > 0) { | ||
1076 | |||
1077 | /* Replenish RQ | ||
1078 | */ | ||
1079 | |||
1080 | vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); | ||
1081 | |||
1082 | /* Accumulate intr event credits for this polling | ||
1083 | * cycle. An intr event is the completion of a | ||
1084 | * a WQ or RQ packet. | ||
1085 | */ | ||
1086 | |||
1087 | vnic_intr_return_credits(&enic->intr[ENIC_MSIX_RQ], | ||
1088 | work_done, | ||
1089 | 0 /* don't unmask intr */, | ||
1090 | 0 /* don't reset intr timer */); | ||
1091 | } else { | ||
1092 | |||
1093 | /* If no work done, flush all LROs and exit polling | ||
1094 | */ | ||
1095 | |||
1096 | if (ENIC_SETTING(enic, LRO)) | ||
1097 | lro_flush_all(&enic->lro_mgr); | ||
1098 | |||
1099 | netif_rx_complete(netdev, napi); | ||
1100 | vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]); | ||
1101 | } | ||
1102 | |||
1103 | return work_done; | ||
1104 | } | ||
1105 | |||
1106 | static void enic_notify_timer(unsigned long data) | ||
1107 | { | ||
1108 | struct enic *enic = (struct enic *)data; | ||
1109 | |||
1110 | enic_notify_check(enic); | ||
1111 | |||
1112 | mod_timer(&enic->notify_timer, round_jiffies(ENIC_NOTIFY_TIMER_PERIOD)); | ||
1113 | } | ||
1114 | |||
1115 | static void enic_free_intr(struct enic *enic) | ||
1116 | { | ||
1117 | struct net_device *netdev = enic->netdev; | ||
1118 | unsigned int i; | ||
1119 | |||
1120 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | ||
1121 | case VNIC_DEV_INTR_MODE_INTX: | ||
1122 | case VNIC_DEV_INTR_MODE_MSI: | ||
1123 | free_irq(enic->pdev->irq, netdev); | ||
1124 | break; | ||
1125 | case VNIC_DEV_INTR_MODE_MSIX: | ||
1126 | for (i = 0; i < ARRAY_SIZE(enic->msix); i++) | ||
1127 | if (enic->msix[i].requested) | ||
1128 | free_irq(enic->msix_entry[i].vector, | ||
1129 | enic->msix[i].devid); | ||
1130 | break; | ||
1131 | default: | ||
1132 | break; | ||
1133 | } | ||
1134 | } | ||
1135 | |||
1136 | static int enic_request_intr(struct enic *enic) | ||
1137 | { | ||
1138 | struct net_device *netdev = enic->netdev; | ||
1139 | unsigned int i; | ||
1140 | int err = 0; | ||
1141 | |||
1142 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | ||
1143 | |||
1144 | case VNIC_DEV_INTR_MODE_INTX: | ||
1145 | |||
1146 | err = request_irq(enic->pdev->irq, enic_isr_legacy, | ||
1147 | IRQF_SHARED, netdev->name, netdev); | ||
1148 | break; | ||
1149 | |||
1150 | case VNIC_DEV_INTR_MODE_MSI: | ||
1151 | |||
1152 | err = request_irq(enic->pdev->irq, enic_isr_msi, | ||
1153 | 0, netdev->name, enic); | ||
1154 | break; | ||
1155 | |||
1156 | case VNIC_DEV_INTR_MODE_MSIX: | ||
1157 | |||
1158 | sprintf(enic->msix[ENIC_MSIX_RQ].devname, | ||
1159 | "%.11s-rx", netdev->name); | ||
1160 | enic->msix[ENIC_MSIX_RQ].isr = enic_isr_msix_rq; | ||
1161 | enic->msix[ENIC_MSIX_RQ].devid = enic; | ||
1162 | |||
1163 | sprintf(enic->msix[ENIC_MSIX_WQ].devname, | ||
1164 | "%.11s-tx", netdev->name); | ||
1165 | enic->msix[ENIC_MSIX_WQ].isr = enic_isr_msix_wq; | ||
1166 | enic->msix[ENIC_MSIX_WQ].devid = enic; | ||
1167 | |||
1168 | sprintf(enic->msix[ENIC_MSIX_ERR].devname, | ||
1169 | "%.11s-err", netdev->name); | ||
1170 | enic->msix[ENIC_MSIX_ERR].isr = enic_isr_msix_err; | ||
1171 | enic->msix[ENIC_MSIX_ERR].devid = enic; | ||
1172 | |||
1173 | sprintf(enic->msix[ENIC_MSIX_NOTIFY].devname, | ||
1174 | "%.11s-notify", netdev->name); | ||
1175 | enic->msix[ENIC_MSIX_NOTIFY].isr = enic_isr_msix_notify; | ||
1176 | enic->msix[ENIC_MSIX_NOTIFY].devid = enic; | ||
1177 | |||
1178 | for (i = 0; i < ARRAY_SIZE(enic->msix); i++) { | ||
1179 | err = request_irq(enic->msix_entry[i].vector, | ||
1180 | enic->msix[i].isr, 0, | ||
1181 | enic->msix[i].devname, | ||
1182 | enic->msix[i].devid); | ||
1183 | if (err) { | ||
1184 | enic_free_intr(enic); | ||
1185 | break; | ||
1186 | } | ||
1187 | enic->msix[i].requested = 1; | ||
1188 | } | ||
1189 | |||
1190 | break; | ||
1191 | |||
1192 | default: | ||
1193 | break; | ||
1194 | } | ||
1195 | |||
1196 | return err; | ||
1197 | } | ||
1198 | |||
1199 | static int enic_notify_set(struct enic *enic) | ||
1200 | { | ||
1201 | int err; | ||
1202 | |||
1203 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | ||
1204 | case VNIC_DEV_INTR_MODE_INTX: | ||
1205 | err = vnic_dev_notify_set(enic->vdev, ENIC_INTX_NOTIFY); | ||
1206 | break; | ||
1207 | case VNIC_DEV_INTR_MODE_MSIX: | ||
1208 | err = vnic_dev_notify_set(enic->vdev, ENIC_MSIX_NOTIFY); | ||
1209 | break; | ||
1210 | default: | ||
1211 | err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */); | ||
1212 | break; | ||
1213 | } | ||
1214 | |||
1215 | return err; | ||
1216 | } | ||
1217 | |||
1218 | static void enic_notify_timer_start(struct enic *enic) | ||
1219 | { | ||
1220 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | ||
1221 | case VNIC_DEV_INTR_MODE_MSI: | ||
1222 | mod_timer(&enic->notify_timer, jiffies); | ||
1223 | break; | ||
1224 | default: | ||
1225 | /* Using intr for notification for INTx/MSI-X */ | ||
1226 | break; | ||
1227 | }; | ||
1228 | } | ||
1229 | |||
1230 | /* rtnl lock is held, process context */ | ||
1231 | static int enic_open(struct net_device *netdev) | ||
1232 | { | ||
1233 | struct enic *enic = netdev_priv(netdev); | ||
1234 | unsigned int i; | ||
1235 | int err; | ||
1236 | |||
1237 | for (i = 0; i < enic->rq_count; i++) { | ||
1238 | err = vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf); | ||
1239 | if (err) { | ||
1240 | printk(KERN_ERR PFX | ||
1241 | "%s: Unable to alloc receive buffers.\n", | ||
1242 | netdev->name); | ||
1243 | return err; | ||
1244 | } | ||
1245 | } | ||
1246 | |||
1247 | for (i = 0; i < enic->wq_count; i++) | ||
1248 | vnic_wq_enable(&enic->wq[i]); | ||
1249 | for (i = 0; i < enic->rq_count; i++) | ||
1250 | vnic_rq_enable(&enic->rq[i]); | ||
1251 | |||
1252 | enic_add_station_addr(enic); | ||
1253 | enic_set_multicast_list(netdev); | ||
1254 | |||
1255 | netif_wake_queue(netdev); | ||
1256 | napi_enable(&enic->napi); | ||
1257 | vnic_dev_enable(enic->vdev); | ||
1258 | |||
1259 | for (i = 0; i < enic->intr_count; i++) | ||
1260 | vnic_intr_unmask(&enic->intr[i]); | ||
1261 | |||
1262 | enic_notify_timer_start(enic); | ||
1263 | |||
1264 | return 0; | ||
1265 | } | ||
1266 | |||
1267 | /* rtnl lock is held, process context */ | ||
1268 | static int enic_stop(struct net_device *netdev) | ||
1269 | { | ||
1270 | struct enic *enic = netdev_priv(netdev); | ||
1271 | unsigned int i; | ||
1272 | int err; | ||
1273 | |||
1274 | del_timer_sync(&enic->notify_timer); | ||
1275 | |||
1276 | vnic_dev_disable(enic->vdev); | ||
1277 | napi_disable(&enic->napi); | ||
1278 | netif_stop_queue(netdev); | ||
1279 | |||
1280 | for (i = 0; i < enic->intr_count; i++) | ||
1281 | vnic_intr_mask(&enic->intr[i]); | ||
1282 | |||
1283 | for (i = 0; i < enic->wq_count; i++) { | ||
1284 | err = vnic_wq_disable(&enic->wq[i]); | ||
1285 | if (err) | ||
1286 | return err; | ||
1287 | } | ||
1288 | for (i = 0; i < enic->rq_count; i++) { | ||
1289 | err = vnic_rq_disable(&enic->rq[i]); | ||
1290 | if (err) | ||
1291 | return err; | ||
1292 | } | ||
1293 | |||
1294 | (void)vnic_cq_service(&enic->cq[ENIC_CQ_RQ], | ||
1295 | -1, enic_rq_service_drop, NULL); | ||
1296 | (void)vnic_cq_service(&enic->cq[ENIC_CQ_WQ], | ||
1297 | -1, enic_wq_service, NULL); | ||
1298 | |||
1299 | for (i = 0; i < enic->wq_count; i++) | ||
1300 | vnic_wq_clean(&enic->wq[i], enic_free_wq_buf); | ||
1301 | for (i = 0; i < enic->rq_count; i++) | ||
1302 | vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); | ||
1303 | for (i = 0; i < enic->cq_count; i++) | ||
1304 | vnic_cq_clean(&enic->cq[i]); | ||
1305 | for (i = 0; i < enic->intr_count; i++) | ||
1306 | vnic_intr_clean(&enic->intr[i]); | ||
1307 | |||
1308 | return 0; | ||
1309 | } | ||
1310 | |||
1311 | static int enic_change_mtu(struct net_device *netdev, int new_mtu) | ||
1312 | { | ||
1313 | struct enic *enic = netdev_priv(netdev); | ||
1314 | int running = netif_running(netdev); | ||
1315 | |||
1316 | if (running) | ||
1317 | enic_stop(netdev); | ||
1318 | |||
1319 | if (new_mtu < ENIC_MIN_MTU) | ||
1320 | new_mtu = ENIC_MIN_MTU; | ||
1321 | if (new_mtu > ENIC_MAX_MTU) | ||
1322 | new_mtu = ENIC_MAX_MTU; | ||
1323 | |||
1324 | netdev->mtu = new_mtu; | ||
1325 | |||
1326 | if (netdev->mtu > enic->port_mtu) | ||
1327 | printk(KERN_WARNING PFX | ||
1328 | "%s: interface MTU (%d) set higher " | ||
1329 | "than port MTU (%d)\n", | ||
1330 | netdev->name, netdev->mtu, enic->port_mtu); | ||
1331 | |||
1332 | if (running) | ||
1333 | enic_open(netdev); | ||
1334 | |||
1335 | return 0; | ||
1336 | } | ||
1337 | |||
1338 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1339 | static void enic_poll_controller(struct net_device *netdev) | ||
1340 | { | ||
1341 | struct enic *enic = netdev_priv(netdev); | ||
1342 | struct vnic_dev *vdev = enic->vdev; | ||
1343 | |||
1344 | switch (vnic_dev_get_intr_mode(vdev)) { | ||
1345 | case VNIC_DEV_INTR_MODE_MSIX: | ||
1346 | enic_isr_msix_rq(enic->pdev->irq, enic); | ||
1347 | enic_isr_msix_wq(enic->pdev->irq, enic); | ||
1348 | break; | ||
1349 | case VNIC_DEV_INTR_MODE_MSI: | ||
1350 | enic_isr_msi(enic->pdev->irq, enic); | ||
1351 | break; | ||
1352 | case VNIC_DEV_INTR_MODE_INTX: | ||
1353 | enic_isr_legacy(enic->pdev->irq, netdev); | ||
1354 | break; | ||
1355 | default: | ||
1356 | break; | ||
1357 | } | ||
1358 | } | ||
1359 | #endif | ||
1360 | |||
1361 | static int enic_dev_wait(struct vnic_dev *vdev, | ||
1362 | int (*start)(struct vnic_dev *, int), | ||
1363 | int (*finished)(struct vnic_dev *, int *), | ||
1364 | int arg) | ||
1365 | { | ||
1366 | unsigned long time; | ||
1367 | int done; | ||
1368 | int err; | ||
1369 | |||
1370 | BUG_ON(in_interrupt()); | ||
1371 | |||
1372 | err = start(vdev, arg); | ||
1373 | if (err) | ||
1374 | return err; | ||
1375 | |||
1376 | /* Wait for func to complete...2 seconds max | ||
1377 | */ | ||
1378 | |||
1379 | time = jiffies + (HZ * 2); | ||
1380 | do { | ||
1381 | |||
1382 | err = finished(vdev, &done); | ||
1383 | if (err) | ||
1384 | return err; | ||
1385 | |||
1386 | if (done) | ||
1387 | return 0; | ||
1388 | |||
1389 | schedule_timeout_uninterruptible(HZ / 10); | ||
1390 | |||
1391 | } while (time_after(time, jiffies)); | ||
1392 | |||
1393 | return -ETIMEDOUT; | ||
1394 | } | ||
1395 | |||
1396 | static int enic_dev_open(struct enic *enic) | ||
1397 | { | ||
1398 | int err; | ||
1399 | |||
1400 | err = enic_dev_wait(enic->vdev, vnic_dev_open, | ||
1401 | vnic_dev_open_done, 0); | ||
1402 | if (err) | ||
1403 | printk(KERN_ERR PFX | ||
1404 | "vNIC device open failed, err %d.\n", err); | ||
1405 | |||
1406 | return err; | ||
1407 | } | ||
1408 | |||
1409 | static int enic_dev_soft_reset(struct enic *enic) | ||
1410 | { | ||
1411 | int err; | ||
1412 | |||
1413 | err = enic_dev_wait(enic->vdev, vnic_dev_soft_reset, | ||
1414 | vnic_dev_soft_reset_done, 0); | ||
1415 | if (err) | ||
1416 | printk(KERN_ERR PFX | ||
1417 | "vNIC soft reset failed, err %d.\n", err); | ||
1418 | |||
1419 | return err; | ||
1420 | } | ||
1421 | |||
1422 | static void enic_reset(struct work_struct *work) | ||
1423 | { | ||
1424 | struct enic *enic = container_of(work, struct enic, reset); | ||
1425 | |||
1426 | if (!netif_running(enic->netdev)) | ||
1427 | return; | ||
1428 | |||
1429 | rtnl_lock(); | ||
1430 | |||
1431 | spin_lock(&enic->devcmd_lock); | ||
1432 | vnic_dev_hang_notify(enic->vdev); | ||
1433 | spin_unlock(&enic->devcmd_lock); | ||
1434 | |||
1435 | enic_stop(enic->netdev); | ||
1436 | enic_dev_soft_reset(enic); | ||
1437 | enic_reset_mcaddrs(enic); | ||
1438 | enic_init_vnic_resources(enic); | ||
1439 | enic_open(enic->netdev); | ||
1440 | |||
1441 | rtnl_unlock(); | ||
1442 | } | ||
1443 | |||
1444 | static int enic_set_intr_mode(struct enic *enic) | ||
1445 | { | ||
1446 | unsigned int n = ARRAY_SIZE(enic->rq); | ||
1447 | unsigned int m = ARRAY_SIZE(enic->wq); | ||
1448 | unsigned int i; | ||
1449 | |||
1450 | /* Set interrupt mode (INTx, MSI, MSI-X) depending | ||
1451 | * system capabilities. | ||
1452 | * | ||
1453 | * Try MSI-X first | ||
1454 | * | ||
1455 | * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs | ||
1456 | * (the second to last INTR is used for WQ/RQ errors) | ||
1457 | * (the last INTR is used for notifications) | ||
1458 | */ | ||
1459 | |||
1460 | BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2); | ||
1461 | for (i = 0; i < n + m + 2; i++) | ||
1462 | enic->msix_entry[i].entry = i; | ||
1463 | |||
1464 | if (enic->config.intr_mode < 1 && | ||
1465 | enic->rq_count >= n && | ||
1466 | enic->wq_count >= m && | ||
1467 | enic->cq_count >= n + m && | ||
1468 | enic->intr_count >= n + m + 2 && | ||
1469 | !pci_enable_msix(enic->pdev, enic->msix_entry, n + m + 2)) { | ||
1470 | |||
1471 | enic->rq_count = n; | ||
1472 | enic->wq_count = m; | ||
1473 | enic->cq_count = n + m; | ||
1474 | enic->intr_count = n + m + 2; | ||
1475 | |||
1476 | vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSIX); | ||
1477 | |||
1478 | return 0; | ||
1479 | } | ||
1480 | |||
1481 | /* Next try MSI | ||
1482 | * | ||
1483 | * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR | ||
1484 | */ | ||
1485 | |||
1486 | if (enic->config.intr_mode < 2 && | ||
1487 | enic->rq_count >= 1 && | ||
1488 | enic->wq_count >= 1 && | ||
1489 | enic->cq_count >= 2 && | ||
1490 | enic->intr_count >= 1 && | ||
1491 | !pci_enable_msi(enic->pdev)) { | ||
1492 | |||
1493 | enic->rq_count = 1; | ||
1494 | enic->wq_count = 1; | ||
1495 | enic->cq_count = 2; | ||
1496 | enic->intr_count = 1; | ||
1497 | |||
1498 | vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI); | ||
1499 | |||
1500 | return 0; | ||
1501 | } | ||
1502 | |||
1503 | /* Next try INTx | ||
1504 | * | ||
1505 | * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs | ||
1506 | * (the first INTR is used for WQ/RQ) | ||
1507 | * (the second INTR is used for WQ/RQ errors) | ||
1508 | * (the last INTR is used for notifications) | ||
1509 | */ | ||
1510 | |||
1511 | if (enic->config.intr_mode < 3 && | ||
1512 | enic->rq_count >= 1 && | ||
1513 | enic->wq_count >= 1 && | ||
1514 | enic->cq_count >= 2 && | ||
1515 | enic->intr_count >= 3) { | ||
1516 | |||
1517 | enic->rq_count = 1; | ||
1518 | enic->wq_count = 1; | ||
1519 | enic->cq_count = 2; | ||
1520 | enic->intr_count = 3; | ||
1521 | |||
1522 | vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX); | ||
1523 | |||
1524 | return 0; | ||
1525 | } | ||
1526 | |||
1527 | vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); | ||
1528 | |||
1529 | return -EINVAL; | ||
1530 | } | ||
1531 | |||
1532 | static void enic_clear_intr_mode(struct enic *enic) | ||
1533 | { | ||
1534 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | ||
1535 | case VNIC_DEV_INTR_MODE_MSIX: | ||
1536 | pci_disable_msix(enic->pdev); | ||
1537 | break; | ||
1538 | case VNIC_DEV_INTR_MODE_MSI: | ||
1539 | pci_disable_msi(enic->pdev); | ||
1540 | break; | ||
1541 | default: | ||
1542 | break; | ||
1543 | } | ||
1544 | |||
1545 | vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); | ||
1546 | } | ||
1547 | |||
1548 | static void enic_iounmap(struct enic *enic) | ||
1549 | { | ||
1550 | if (enic->bar0.vaddr) | ||
1551 | iounmap(enic->bar0.vaddr); | ||
1552 | } | ||
1553 | |||
1554 | static int __devinit enic_probe(struct pci_dev *pdev, | ||
1555 | const struct pci_device_id *ent) | ||
1556 | { | ||
1557 | struct net_device *netdev; | ||
1558 | struct enic *enic; | ||
1559 | int using_dac = 0; | ||
1560 | unsigned int i; | ||
1561 | int err; | ||
1562 | |||
1563 | const u8 rss_default_cpu = 0; | ||
1564 | const u8 rss_hash_type = 0; | ||
1565 | const u8 rss_hash_bits = 0; | ||
1566 | const u8 rss_base_cpu = 0; | ||
1567 | const u8 rss_enable = 0; | ||
1568 | const u8 tso_ipid_split_en = 0; | ||
1569 | const u8 ig_vlan_strip_en = 1; | ||
1570 | |||
1571 | /* Allocate net device structure and initialize. Private | ||
1572 | * instance data is initialized to zero. | ||
1573 | */ | ||
1574 | |||
1575 | netdev = alloc_etherdev(sizeof(struct enic)); | ||
1576 | if (!netdev) { | ||
1577 | printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); | ||
1578 | return -ENOMEM; | ||
1579 | } | ||
1580 | |||
1581 | /* Set the netdev name early so intr vectors are properly | ||
1582 | * named and any error msgs can include netdev->name | ||
1583 | */ | ||
1584 | |||
1585 | rtnl_lock(); | ||
1586 | err = dev_alloc_name(netdev, netdev->name); | ||
1587 | rtnl_unlock(); | ||
1588 | if (err < 0) { | ||
1589 | printk(KERN_ERR PFX "Unable to allocate netdev name.\n"); | ||
1590 | goto err_out_free_netdev; | ||
1591 | } | ||
1592 | |||
1593 | pci_set_drvdata(pdev, netdev); | ||
1594 | |||
1595 | SET_NETDEV_DEV(netdev, &pdev->dev); | ||
1596 | |||
1597 | enic = netdev_priv(netdev); | ||
1598 | enic->netdev = netdev; | ||
1599 | enic->pdev = pdev; | ||
1600 | |||
1601 | /* Setup PCI resources | ||
1602 | */ | ||
1603 | |||
1604 | err = pci_enable_device(pdev); | ||
1605 | if (err) { | ||
1606 | printk(KERN_ERR PFX | ||
1607 | "%s: Cannot enable PCI device, aborting.\n", | ||
1608 | netdev->name); | ||
1609 | goto err_out_free_netdev; | ||
1610 | } | ||
1611 | |||
1612 | err = pci_request_regions(pdev, DRV_NAME); | ||
1613 | if (err) { | ||
1614 | printk(KERN_ERR PFX | ||
1615 | "%s: Cannot request PCI regions, aborting.\n", | ||
1616 | netdev->name); | ||
1617 | goto err_out_disable_device; | ||
1618 | } | ||
1619 | |||
1620 | pci_set_master(pdev); | ||
1621 | |||
1622 | /* Query PCI controller on system for DMA addressing | ||
1623 | * limitation for the device. Try 40-bit first, and | ||
1624 | * fail to 32-bit. | ||
1625 | */ | ||
1626 | |||
1627 | err = pci_set_dma_mask(pdev, DMA_40BIT_MASK); | ||
1628 | if (err) { | ||
1629 | err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | ||
1630 | if (err) { | ||
1631 | printk(KERN_ERR PFX | ||
1632 | "%s: No usable DMA configuration, aborting.\n", | ||
1633 | netdev->name); | ||
1634 | goto err_out_release_regions; | ||
1635 | } | ||
1636 | err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); | ||
1637 | if (err) { | ||
1638 | printk(KERN_ERR PFX | ||
1639 | "%s: Unable to obtain 32-bit DMA " | ||
1640 | "for consistent allocations, aborting.\n", | ||
1641 | netdev->name); | ||
1642 | goto err_out_release_regions; | ||
1643 | } | ||
1644 | } else { | ||
1645 | err = pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK); | ||
1646 | if (err) { | ||
1647 | printk(KERN_ERR PFX | ||
1648 | "%s: Unable to obtain 40-bit DMA " | ||
1649 | "for consistent allocations, aborting.\n", | ||
1650 | netdev->name); | ||
1651 | goto err_out_release_regions; | ||
1652 | } | ||
1653 | using_dac = 1; | ||
1654 | } | ||
1655 | |||
1656 | /* Map vNIC resources from BAR0 | ||
1657 | */ | ||
1658 | |||
1659 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { | ||
1660 | printk(KERN_ERR PFX | ||
1661 | "%s: BAR0 not memory-map'able, aborting.\n", | ||
1662 | netdev->name); | ||
1663 | err = -ENODEV; | ||
1664 | goto err_out_release_regions; | ||
1665 | } | ||
1666 | |||
1667 | enic->bar0.vaddr = pci_iomap(pdev, 0, enic->bar0.len); | ||
1668 | enic->bar0.bus_addr = pci_resource_start(pdev, 0); | ||
1669 | enic->bar0.len = pci_resource_len(pdev, 0); | ||
1670 | |||
1671 | if (!enic->bar0.vaddr) { | ||
1672 | printk(KERN_ERR PFX | ||
1673 | "%s: Cannot memory-map BAR0 res hdr, aborting.\n", | ||
1674 | netdev->name); | ||
1675 | err = -ENODEV; | ||
1676 | goto err_out_release_regions; | ||
1677 | } | ||
1678 | |||
1679 | /* Register vNIC device | ||
1680 | */ | ||
1681 | |||
1682 | enic->vdev = vnic_dev_register(NULL, enic, pdev, &enic->bar0); | ||
1683 | if (!enic->vdev) { | ||
1684 | printk(KERN_ERR PFX | ||
1685 | "%s: vNIC registration failed, aborting.\n", | ||
1686 | netdev->name); | ||
1687 | err = -ENODEV; | ||
1688 | goto err_out_iounmap; | ||
1689 | } | ||
1690 | |||
1691 | /* Issue device open to get device in known state | ||
1692 | */ | ||
1693 | |||
1694 | err = enic_dev_open(enic); | ||
1695 | if (err) { | ||
1696 | printk(KERN_ERR PFX | ||
1697 | "%s: vNIC dev open failed, aborting.\n", | ||
1698 | netdev->name); | ||
1699 | goto err_out_vnic_unregister; | ||
1700 | } | ||
1701 | |||
1702 | /* Issue device init to initialize the vnic-to-switch link. | ||
1703 | * We'll start with carrier off and wait for link UP | ||
1704 | * notification later to turn on carrier. We don't need | ||
1705 | * to wait here for the vnic-to-switch link initialization | ||
1706 | * to complete; link UP notification is the indication that | ||
1707 | * the process is complete. | ||
1708 | */ | ||
1709 | |||
1710 | netif_carrier_off(netdev); | ||
1711 | |||
1712 | err = vnic_dev_init(enic->vdev, 0); | ||
1713 | if (err) { | ||
1714 | printk(KERN_ERR PFX | ||
1715 | "%s: vNIC dev init failed, aborting.\n", | ||
1716 | netdev->name); | ||
1717 | goto err_out_dev_close; | ||
1718 | } | ||
1719 | |||
1720 | /* Get vNIC configuration | ||
1721 | */ | ||
1722 | |||
1723 | err = enic_get_vnic_config(enic); | ||
1724 | if (err) { | ||
1725 | printk(KERN_ERR PFX | ||
1726 | "%s: Get vNIC configuration failed, aborting.\n", | ||
1727 | netdev->name); | ||
1728 | goto err_out_dev_close; | ||
1729 | } | ||
1730 | |||
1731 | /* Get available resource counts | ||
1732 | */ | ||
1733 | |||
1734 | enic_get_res_counts(enic); | ||
1735 | |||
1736 | /* Set interrupt mode based on resource counts and system | ||
1737 | * capabilities | ||
1738 | */ | ||
1739 | |||
1740 | err = enic_set_intr_mode(enic); | ||
1741 | if (err) { | ||
1742 | printk(KERN_ERR PFX | ||
1743 | "%s: Failed to set intr mode, aborting.\n", | ||
1744 | netdev->name); | ||
1745 | goto err_out_dev_close; | ||
1746 | } | ||
1747 | |||
1748 | /* Request interrupt vector(s) | ||
1749 | */ | ||
1750 | |||
1751 | err = enic_request_intr(enic); | ||
1752 | if (err) { | ||
1753 | printk(KERN_ERR PFX "%s: Unable to request irq.\n", | ||
1754 | netdev->name); | ||
1755 | goto err_out_dev_close; | ||
1756 | } | ||
1757 | |||
1758 | /* Allocate and configure vNIC resources | ||
1759 | */ | ||
1760 | |||
1761 | err = enic_alloc_vnic_resources(enic); | ||
1762 | if (err) { | ||
1763 | printk(KERN_ERR PFX | ||
1764 | "%s: Failed to alloc vNIC resources, aborting.\n", | ||
1765 | netdev->name); | ||
1766 | goto err_out_free_vnic_resources; | ||
1767 | } | ||
1768 | |||
1769 | enic_init_vnic_resources(enic); | ||
1770 | |||
1771 | /* Enable VLAN tag stripping. RSS not enabled (yet). | ||
1772 | */ | ||
1773 | |||
1774 | err = enic_set_nic_cfg(enic, | ||
1775 | rss_default_cpu, rss_hash_type, | ||
1776 | rss_hash_bits, rss_base_cpu, | ||
1777 | rss_enable, tso_ipid_split_en, | ||
1778 | ig_vlan_strip_en); | ||
1779 | if (err) { | ||
1780 | printk(KERN_ERR PFX | ||
1781 | "%s: Failed to config nic, aborting.\n", | ||
1782 | netdev->name); | ||
1783 | goto err_out_free_vnic_resources; | ||
1784 | } | ||
1785 | |||
1786 | /* Setup notification buffer area | ||
1787 | */ | ||
1788 | |||
1789 | err = enic_notify_set(enic); | ||
1790 | if (err) { | ||
1791 | printk(KERN_ERR PFX | ||
1792 | "%s: Failed to alloc notify buffer, aborting.\n", | ||
1793 | netdev->name); | ||
1794 | goto err_out_free_vnic_resources; | ||
1795 | } | ||
1796 | |||
1797 | /* Setup notification timer, HW reset task, and locks | ||
1798 | */ | ||
1799 | |||
1800 | init_timer(&enic->notify_timer); | ||
1801 | enic->notify_timer.function = enic_notify_timer; | ||
1802 | enic->notify_timer.data = (unsigned long)enic; | ||
1803 | |||
1804 | INIT_WORK(&enic->reset, enic_reset); | ||
1805 | |||
1806 | for (i = 0; i < enic->wq_count; i++) | ||
1807 | spin_lock_init(&enic->wq_lock[i]); | ||
1808 | |||
1809 | spin_lock_init(&enic->devcmd_lock); | ||
1810 | |||
1811 | /* Register net device | ||
1812 | */ | ||
1813 | |||
1814 | enic->port_mtu = enic->config.mtu; | ||
1815 | (void)enic_change_mtu(netdev, enic->port_mtu); | ||
1816 | |||
1817 | err = enic_set_mac_addr(netdev, enic->mac_addr); | ||
1818 | if (err) { | ||
1819 | printk(KERN_ERR PFX | ||
1820 | "%s: Invalid MAC address, aborting.\n", | ||
1821 | netdev->name); | ||
1822 | goto err_out_notify_unset; | ||
1823 | } | ||
1824 | |||
1825 | netdev->open = enic_open; | ||
1826 | netdev->stop = enic_stop; | ||
1827 | netdev->hard_start_xmit = enic_hard_start_xmit; | ||
1828 | netdev->get_stats = enic_get_stats; | ||
1829 | netdev->set_multicast_list = enic_set_multicast_list; | ||
1830 | netdev->change_mtu = enic_change_mtu; | ||
1831 | netdev->vlan_rx_register = enic_vlan_rx_register; | ||
1832 | netdev->vlan_rx_add_vid = enic_vlan_rx_add_vid; | ||
1833 | netdev->vlan_rx_kill_vid = enic_vlan_rx_kill_vid; | ||
1834 | netdev->tx_timeout = enic_tx_timeout; | ||
1835 | netdev->watchdog_timeo = 2 * HZ; | ||
1836 | netdev->ethtool_ops = &enic_ethtool_ops; | ||
1837 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1838 | netdev->poll_controller = enic_poll_controller; | ||
1839 | #endif | ||
1840 | |||
1841 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | ||
1842 | default: | ||
1843 | netif_napi_add(netdev, &enic->napi, enic_poll, 64); | ||
1844 | break; | ||
1845 | case VNIC_DEV_INTR_MODE_MSIX: | ||
1846 | netif_napi_add(netdev, &enic->napi, enic_poll_msix, 64); | ||
1847 | break; | ||
1848 | } | ||
1849 | |||
1850 | netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; | ||
1851 | if (ENIC_SETTING(enic, TXCSUM)) | ||
1852 | netdev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; | ||
1853 | if (ENIC_SETTING(enic, TSO)) | ||
1854 | netdev->features |= NETIF_F_TSO | | ||
1855 | NETIF_F_TSO6 | NETIF_F_TSO_ECN; | ||
1856 | if (using_dac) | ||
1857 | netdev->features |= NETIF_F_HIGHDMA; | ||
1858 | |||
1859 | |||
1860 | enic->csum_rx_enabled = ENIC_SETTING(enic, RXCSUM); | ||
1861 | |||
1862 | if (ENIC_SETTING(enic, LRO)) { | ||
1863 | enic->lro_mgr.max_aggr = ENIC_LRO_MAX_AGGR; | ||
1864 | enic->lro_mgr.max_desc = ENIC_LRO_MAX_DESC; | ||
1865 | enic->lro_mgr.lro_arr = enic->lro_desc; | ||
1866 | enic->lro_mgr.get_skb_header = enic_get_skb_header; | ||
1867 | enic->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID; | ||
1868 | enic->lro_mgr.dev = netdev; | ||
1869 | enic->lro_mgr.ip_summed = CHECKSUM_COMPLETE; | ||
1870 | enic->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; | ||
1871 | } | ||
1872 | |||
1873 | err = register_netdev(netdev); | ||
1874 | if (err) { | ||
1875 | printk(KERN_ERR PFX | ||
1876 | "%s: Cannot register net device, aborting.\n", | ||
1877 | netdev->name); | ||
1878 | goto err_out_notify_unset; | ||
1879 | } | ||
1880 | |||
1881 | return 0; | ||
1882 | |||
1883 | err_out_notify_unset: | ||
1884 | vnic_dev_notify_unset(enic->vdev); | ||
1885 | err_out_free_vnic_resources: | ||
1886 | enic_free_vnic_resources(enic); | ||
1887 | enic_free_intr(enic); | ||
1888 | err_out_dev_close: | ||
1889 | vnic_dev_close(enic->vdev); | ||
1890 | err_out_vnic_unregister: | ||
1891 | enic_clear_intr_mode(enic); | ||
1892 | vnic_dev_unregister(enic->vdev); | ||
1893 | err_out_iounmap: | ||
1894 | enic_iounmap(enic); | ||
1895 | err_out_release_regions: | ||
1896 | pci_release_regions(pdev); | ||
1897 | err_out_disable_device: | ||
1898 | pci_disable_device(pdev); | ||
1899 | err_out_free_netdev: | ||
1900 | pci_set_drvdata(pdev, NULL); | ||
1901 | free_netdev(netdev); | ||
1902 | |||
1903 | return err; | ||
1904 | } | ||
1905 | |||
1906 | static void __devexit enic_remove(struct pci_dev *pdev) | ||
1907 | { | ||
1908 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
1909 | |||
1910 | if (netdev) { | ||
1911 | struct enic *enic = netdev_priv(netdev); | ||
1912 | |||
1913 | flush_scheduled_work(); | ||
1914 | unregister_netdev(netdev); | ||
1915 | vnic_dev_notify_unset(enic->vdev); | ||
1916 | enic_free_vnic_resources(enic); | ||
1917 | enic_free_intr(enic); | ||
1918 | vnic_dev_close(enic->vdev); | ||
1919 | enic_clear_intr_mode(enic); | ||
1920 | vnic_dev_unregister(enic->vdev); | ||
1921 | enic_iounmap(enic); | ||
1922 | pci_release_regions(pdev); | ||
1923 | pci_disable_device(pdev); | ||
1924 | pci_set_drvdata(pdev, NULL); | ||
1925 | free_netdev(netdev); | ||
1926 | } | ||
1927 | } | ||
1928 | |||
1929 | static struct pci_driver enic_driver = { | ||
1930 | .name = DRV_NAME, | ||
1931 | .id_table = enic_id_table, | ||
1932 | .probe = enic_probe, | ||
1933 | .remove = __devexit_p(enic_remove), | ||
1934 | }; | ||
1935 | |||
1936 | static int __init enic_init_module(void) | ||
1937 | { | ||
1938 | printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION); | ||
1939 | |||
1940 | return pci_register_driver(&enic_driver); | ||
1941 | } | ||
1942 | |||
1943 | static void __exit enic_cleanup_module(void) | ||
1944 | { | ||
1945 | pci_unregister_driver(&enic_driver); | ||
1946 | } | ||
1947 | |||
1948 | module_init(enic_init_module); | ||
1949 | module_exit(enic_cleanup_module); | ||
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c new file mode 100644 index 000000000000..95184b9108ef --- /dev/null +++ b/drivers/net/enic/enic_res.c | |||
@@ -0,0 +1,370 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/errno.h> | ||
22 | #include <linux/types.h> | ||
23 | #include <linux/pci.h> | ||
24 | #include <linux/netdevice.h> | ||
25 | |||
26 | #include "wq_enet_desc.h" | ||
27 | #include "rq_enet_desc.h" | ||
28 | #include "cq_enet_desc.h" | ||
29 | #include "vnic_resource.h" | ||
30 | #include "vnic_enet.h" | ||
31 | #include "vnic_dev.h" | ||
32 | #include "vnic_wq.h" | ||
33 | #include "vnic_rq.h" | ||
34 | #include "vnic_cq.h" | ||
35 | #include "vnic_intr.h" | ||
36 | #include "vnic_stats.h" | ||
37 | #include "vnic_nic.h" | ||
38 | #include "vnic_rss.h" | ||
39 | #include "enic_res.h" | ||
40 | #include "enic.h" | ||
41 | |||
42 | int enic_get_vnic_config(struct enic *enic) | ||
43 | { | ||
44 | struct vnic_enet_config *c = &enic->config; | ||
45 | int err; | ||
46 | |||
47 | err = vnic_dev_mac_addr(enic->vdev, enic->mac_addr); | ||
48 | if (err) { | ||
49 | printk(KERN_ERR PFX "Error getting MAC addr, %d\n", err); | ||
50 | return err; | ||
51 | } | ||
52 | |||
53 | #define GET_CONFIG(m) \ | ||
54 | do { \ | ||
55 | err = vnic_dev_spec(enic->vdev, \ | ||
56 | offsetof(struct vnic_enet_config, m), \ | ||
57 | sizeof(c->m), &c->m); \ | ||
58 | if (err) { \ | ||
59 | printk(KERN_ERR PFX \ | ||
60 | "Error getting %s, %d\n", #m, err); \ | ||
61 | return err; \ | ||
62 | } \ | ||
63 | } while (0) | ||
64 | |||
65 | GET_CONFIG(flags); | ||
66 | GET_CONFIG(wq_desc_count); | ||
67 | GET_CONFIG(rq_desc_count); | ||
68 | GET_CONFIG(mtu); | ||
69 | GET_CONFIG(intr_timer); | ||
70 | GET_CONFIG(intr_timer_type); | ||
71 | GET_CONFIG(intr_mode); | ||
72 | |||
73 | c->wq_desc_count = | ||
74 | min_t(u32, ENIC_MAX_WQ_DESCS, | ||
75 | max_t(u32, ENIC_MIN_WQ_DESCS, | ||
76 | c->wq_desc_count)); | ||
77 | c->wq_desc_count &= 0xfffffff0; /* must be aligned to groups of 16 */ | ||
78 | |||
79 | c->rq_desc_count = | ||
80 | min_t(u32, ENIC_MAX_RQ_DESCS, | ||
81 | max_t(u32, ENIC_MIN_RQ_DESCS, | ||
82 | c->rq_desc_count)); | ||
83 | c->rq_desc_count &= 0xfffffff0; /* must be aligned to groups of 16 */ | ||
84 | |||
85 | if (c->mtu == 0) | ||
86 | c->mtu = 1500; | ||
87 | c->mtu = min_t(u16, ENIC_MAX_MTU, | ||
88 | max_t(u16, ENIC_MIN_MTU, | ||
89 | c->mtu)); | ||
90 | |||
91 | c->intr_timer = min_t(u16, VNIC_INTR_TIMER_MAX, c->intr_timer); | ||
92 | |||
93 | printk(KERN_INFO PFX "vNIC MAC addr %02x:%02x:%02x:%02x:%02x:%02x " | ||
94 | "wq/rq %d/%d\n", | ||
95 | enic->mac_addr[0], enic->mac_addr[1], enic->mac_addr[2], | ||
96 | enic->mac_addr[3], enic->mac_addr[4], enic->mac_addr[5], | ||
97 | c->wq_desc_count, c->rq_desc_count); | ||
98 | printk(KERN_INFO PFX "vNIC mtu %d csum tx/rx %d/%d tso/lro %d/%d " | ||
99 | "intr timer %d\n", | ||
100 | c->mtu, ENIC_SETTING(enic, TXCSUM), | ||
101 | ENIC_SETTING(enic, RXCSUM), ENIC_SETTING(enic, TSO), | ||
102 | ENIC_SETTING(enic, LRO), c->intr_timer); | ||
103 | |||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | void enic_add_station_addr(struct enic *enic) | ||
108 | { | ||
109 | vnic_dev_add_addr(enic->vdev, enic->mac_addr); | ||
110 | } | ||
111 | |||
112 | void enic_add_multicast_addr(struct enic *enic, u8 *addr) | ||
113 | { | ||
114 | vnic_dev_add_addr(enic->vdev, addr); | ||
115 | } | ||
116 | |||
117 | void enic_del_multicast_addr(struct enic *enic, u8 *addr) | ||
118 | { | ||
119 | vnic_dev_del_addr(enic->vdev, addr); | ||
120 | } | ||
121 | |||
122 | void enic_add_vlan(struct enic *enic, u16 vlanid) | ||
123 | { | ||
124 | u64 a0 = vlanid, a1 = 0; | ||
125 | int wait = 1000; | ||
126 | int err; | ||
127 | |||
128 | err = vnic_dev_cmd(enic->vdev, CMD_VLAN_ADD, &a0, &a1, wait); | ||
129 | if (err) | ||
130 | printk(KERN_ERR PFX "Can't add vlan id, %d\n", err); | ||
131 | } | ||
132 | |||
133 | void enic_del_vlan(struct enic *enic, u16 vlanid) | ||
134 | { | ||
135 | u64 a0 = vlanid, a1 = 0; | ||
136 | int wait = 1000; | ||
137 | int err; | ||
138 | |||
139 | err = vnic_dev_cmd(enic->vdev, CMD_VLAN_DEL, &a0, &a1, wait); | ||
140 | if (err) | ||
141 | printk(KERN_ERR PFX "Can't delete vlan id, %d\n", err); | ||
142 | } | ||
143 | |||
144 | int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type, | ||
145 | u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en, | ||
146 | u8 ig_vlan_strip_en) | ||
147 | { | ||
148 | u64 a0, a1; | ||
149 | u32 nic_cfg; | ||
150 | int wait = 1000; | ||
151 | |||
152 | vnic_set_nic_cfg(&nic_cfg, rss_default_cpu, | ||
153 | rss_hash_type, rss_hash_bits, rss_base_cpu, | ||
154 | rss_enable, tso_ipid_split_en, ig_vlan_strip_en); | ||
155 | |||
156 | a0 = nic_cfg; | ||
157 | a1 = 0; | ||
158 | |||
159 | return vnic_dev_cmd(enic->vdev, CMD_NIC_CFG, &a0, &a1, wait); | ||
160 | } | ||
161 | |||
162 | void enic_free_vnic_resources(struct enic *enic) | ||
163 | { | ||
164 | unsigned int i; | ||
165 | |||
166 | for (i = 0; i < enic->wq_count; i++) | ||
167 | vnic_wq_free(&enic->wq[i]); | ||
168 | for (i = 0; i < enic->rq_count; i++) | ||
169 | vnic_rq_free(&enic->rq[i]); | ||
170 | for (i = 0; i < enic->cq_count; i++) | ||
171 | vnic_cq_free(&enic->cq[i]); | ||
172 | for (i = 0; i < enic->intr_count; i++) | ||
173 | vnic_intr_free(&enic->intr[i]); | ||
174 | } | ||
175 | |||
176 | void enic_get_res_counts(struct enic *enic) | ||
177 | { | ||
178 | enic->wq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ); | ||
179 | enic->rq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ); | ||
180 | enic->cq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ); | ||
181 | enic->intr_count = vnic_dev_get_res_count(enic->vdev, | ||
182 | RES_TYPE_INTR_CTRL); | ||
183 | |||
184 | printk(KERN_INFO PFX "vNIC resources avail: " | ||
185 | "wq %d rq %d cq %d intr %d\n", | ||
186 | enic->wq_count, enic->rq_count, | ||
187 | enic->cq_count, enic->intr_count); | ||
188 | } | ||
189 | |||
190 | void enic_init_vnic_resources(struct enic *enic) | ||
191 | { | ||
192 | enum vnic_dev_intr_mode intr_mode; | ||
193 | unsigned int mask_on_assertion; | ||
194 | unsigned int interrupt_offset; | ||
195 | unsigned int error_interrupt_enable; | ||
196 | unsigned int error_interrupt_offset; | ||
197 | unsigned int cq_index; | ||
198 | unsigned int i; | ||
199 | |||
200 | intr_mode = vnic_dev_get_intr_mode(enic->vdev); | ||
201 | |||
202 | /* Init RQ/WQ resources. | ||
203 | * | ||
204 | * RQ[0 - n-1] point to CQ[0 - n-1] | ||
205 | * WQ[0 - m-1] point to CQ[n - n+m-1] | ||
206 | * | ||
207 | * Error interrupt is not enabled for MSI. | ||
208 | */ | ||
209 | |||
210 | switch (intr_mode) { | ||
211 | case VNIC_DEV_INTR_MODE_INTX: | ||
212 | case VNIC_DEV_INTR_MODE_MSIX: | ||
213 | error_interrupt_enable = 1; | ||
214 | error_interrupt_offset = enic->intr_count - 2; | ||
215 | break; | ||
216 | default: | ||
217 | error_interrupt_enable = 0; | ||
218 | error_interrupt_offset = 0; | ||
219 | break; | ||
220 | } | ||
221 | |||
222 | for (i = 0; i < enic->rq_count; i++) { | ||
223 | cq_index = i; | ||
224 | vnic_rq_init(&enic->rq[i], | ||
225 | cq_index, | ||
226 | error_interrupt_enable, | ||
227 | error_interrupt_offset); | ||
228 | } | ||
229 | |||
230 | for (i = 0; i < enic->wq_count; i++) { | ||
231 | cq_index = enic->rq_count + i; | ||
232 | vnic_wq_init(&enic->wq[i], | ||
233 | cq_index, | ||
234 | error_interrupt_enable, | ||
235 | error_interrupt_offset); | ||
236 | } | ||
237 | |||
238 | /* Init CQ resources | ||
239 | * | ||
240 | * CQ[0 - n+m-1] point to INTR[0] for INTx, MSI | ||
241 | * CQ[0 - n+m-1] point to INTR[0 - n+m-1] for MSI-X | ||
242 | */ | ||
243 | |||
244 | for (i = 0; i < enic->cq_count; i++) { | ||
245 | |||
246 | switch (intr_mode) { | ||
247 | case VNIC_DEV_INTR_MODE_MSIX: | ||
248 | interrupt_offset = i; | ||
249 | break; | ||
250 | default: | ||
251 | interrupt_offset = 0; | ||
252 | break; | ||
253 | } | ||
254 | |||
255 | vnic_cq_init(&enic->cq[i], | ||
256 | 0 /* flow_control_enable */, | ||
257 | 1 /* color_enable */, | ||
258 | 0 /* cq_head */, | ||
259 | 0 /* cq_tail */, | ||
260 | 1 /* cq_tail_color */, | ||
261 | 1 /* interrupt_enable */, | ||
262 | 1 /* cq_entry_enable */, | ||
263 | 0 /* cq_message_enable */, | ||
264 | interrupt_offset, | ||
265 | 0 /* cq_message_addr */); | ||
266 | } | ||
267 | |||
268 | /* Init INTR resources | ||
269 | * | ||
270 | * mask_on_assertion is not used for INTx due to the level- | ||
271 | * triggered nature of INTx | ||
272 | */ | ||
273 | |||
274 | switch (intr_mode) { | ||
275 | case VNIC_DEV_INTR_MODE_MSI: | ||
276 | case VNIC_DEV_INTR_MODE_MSIX: | ||
277 | mask_on_assertion = 1; | ||
278 | break; | ||
279 | default: | ||
280 | mask_on_assertion = 0; | ||
281 | break; | ||
282 | } | ||
283 | |||
284 | for (i = 0; i < enic->intr_count; i++) { | ||
285 | vnic_intr_init(&enic->intr[i], | ||
286 | enic->config.intr_timer, | ||
287 | enic->config.intr_timer_type, | ||
288 | mask_on_assertion); | ||
289 | } | ||
290 | |||
291 | /* Clear LIF stats | ||
292 | */ | ||
293 | |||
294 | vnic_dev_stats_clear(enic->vdev); | ||
295 | } | ||
296 | |||
297 | int enic_alloc_vnic_resources(struct enic *enic) | ||
298 | { | ||
299 | enum vnic_dev_intr_mode intr_mode; | ||
300 | unsigned int i; | ||
301 | int err; | ||
302 | |||
303 | intr_mode = vnic_dev_get_intr_mode(enic->vdev); | ||
304 | |||
305 | printk(KERN_INFO PFX "vNIC resources used: " | ||
306 | "wq %d rq %d cq %d intr %d intr mode %s\n", | ||
307 | enic->wq_count, enic->rq_count, | ||
308 | enic->cq_count, enic->intr_count, | ||
309 | intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" : | ||
310 | intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" : | ||
311 | intr_mode == VNIC_DEV_INTR_MODE_MSIX ? "MSI-X" : | ||
312 | "unknown" | ||
313 | ); | ||
314 | |||
315 | /* Allocate queue resources | ||
316 | */ | ||
317 | |||
318 | for (i = 0; i < enic->wq_count; i++) { | ||
319 | err = vnic_wq_alloc(enic->vdev, &enic->wq[i], i, | ||
320 | enic->config.wq_desc_count, | ||
321 | sizeof(struct wq_enet_desc)); | ||
322 | if (err) | ||
323 | goto err_out_cleanup; | ||
324 | } | ||
325 | |||
326 | for (i = 0; i < enic->rq_count; i++) { | ||
327 | err = vnic_rq_alloc(enic->vdev, &enic->rq[i], i, | ||
328 | enic->config.rq_desc_count, | ||
329 | sizeof(struct rq_enet_desc)); | ||
330 | if (err) | ||
331 | goto err_out_cleanup; | ||
332 | } | ||
333 | |||
334 | for (i = 0; i < enic->cq_count; i++) { | ||
335 | if (i < enic->rq_count) | ||
336 | err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i, | ||
337 | enic->config.rq_desc_count, | ||
338 | sizeof(struct cq_enet_rq_desc)); | ||
339 | else | ||
340 | err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i, | ||
341 | enic->config.wq_desc_count, | ||
342 | sizeof(struct cq_enet_wq_desc)); | ||
343 | if (err) | ||
344 | goto err_out_cleanup; | ||
345 | } | ||
346 | |||
347 | for (i = 0; i < enic->intr_count; i++) { | ||
348 | err = vnic_intr_alloc(enic->vdev, &enic->intr[i], i); | ||
349 | if (err) | ||
350 | goto err_out_cleanup; | ||
351 | } | ||
352 | |||
353 | /* Hook remaining resource | ||
354 | */ | ||
355 | |||
356 | enic->legacy_pba = vnic_dev_get_res(enic->vdev, | ||
357 | RES_TYPE_INTR_PBA_LEGACY, 0); | ||
358 | if (!enic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) { | ||
359 | printk(KERN_ERR PFX "Failed to hook legacy pba resource\n"); | ||
360 | err = -ENODEV; | ||
361 | goto err_out_cleanup; | ||
362 | } | ||
363 | |||
364 | return 0; | ||
365 | |||
366 | err_out_cleanup: | ||
367 | enic_free_vnic_resources(enic); | ||
368 | |||
369 | return err; | ||
370 | } | ||
diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h new file mode 100644 index 000000000000..68534a29b7ac --- /dev/null +++ b/drivers/net/enic/enic_res.h | |||
@@ -0,0 +1,151 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #ifndef _ENIC_RES_H_ | ||
21 | #define _ENIC_RES_H_ | ||
22 | |||
23 | #include "wq_enet_desc.h" | ||
24 | #include "rq_enet_desc.h" | ||
25 | #include "vnic_wq.h" | ||
26 | #include "vnic_rq.h" | ||
27 | |||
28 | #define ENIC_MIN_WQ_DESCS 64 | ||
29 | #define ENIC_MAX_WQ_DESCS 4096 | ||
30 | #define ENIC_MIN_RQ_DESCS 64 | ||
31 | #define ENIC_MAX_RQ_DESCS 4096 | ||
32 | |||
33 | #define ENIC_MIN_MTU 576 /* minimum for IPv4 */ | ||
34 | #define ENIC_MAX_MTU 9000 | ||
35 | |||
36 | #define ENIC_MULTICAST_PERFECT_FILTERS 32 | ||
37 | |||
38 | #define ENIC_NON_TSO_MAX_DESC 16 | ||
39 | |||
40 | #define ENIC_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0) | ||
41 | |||
42 | static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq, | ||
43 | void *os_buf, dma_addr_t dma_addr, unsigned int len, | ||
44 | unsigned int mss_or_csum_offset, unsigned int hdr_len, | ||
45 | int vlan_tag_insert, unsigned int vlan_tag, | ||
46 | int offload_mode, int cq_entry, int sop, int eop) | ||
47 | { | ||
48 | struct wq_enet_desc *desc = vnic_wq_next_desc(wq); | ||
49 | |||
50 | wq_enet_desc_enc(desc, | ||
51 | (u64)dma_addr | VNIC_PADDR_TARGET, | ||
52 | (u16)len, | ||
53 | (u16)mss_or_csum_offset, | ||
54 | (u16)hdr_len, (u8)offload_mode, | ||
55 | (u8)eop, (u8)cq_entry, | ||
56 | 0, /* fcoe_encap */ | ||
57 | (u8)vlan_tag_insert, | ||
58 | (u16)vlan_tag, | ||
59 | 0 /* loopback */); | ||
60 | |||
61 | wmb(); | ||
62 | |||
63 | vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop); | ||
64 | } | ||
65 | |||
66 | static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq, | ||
67 | void *os_buf, dma_addr_t dma_addr, unsigned int len, int eop) | ||
68 | { | ||
69 | enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, | ||
70 | 0, 0, 0, 0, 0, | ||
71 | eop, 0 /* !SOP */, eop); | ||
72 | } | ||
73 | |||
74 | static inline void enic_queue_wq_desc(struct vnic_wq *wq, void *os_buf, | ||
75 | dma_addr_t dma_addr, unsigned int len, int vlan_tag_insert, | ||
76 | unsigned int vlan_tag, int eop) | ||
77 | { | ||
78 | enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, | ||
79 | 0, 0, vlan_tag_insert, vlan_tag, | ||
80 | WQ_ENET_OFFLOAD_MODE_CSUM, | ||
81 | eop, 1 /* SOP */, eop); | ||
82 | } | ||
83 | |||
84 | static inline void enic_queue_wq_desc_csum(struct vnic_wq *wq, | ||
85 | void *os_buf, dma_addr_t dma_addr, unsigned int len, | ||
86 | int ip_csum, int tcpudp_csum, int vlan_tag_insert, | ||
87 | unsigned int vlan_tag, int eop) | ||
88 | { | ||
89 | enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, | ||
90 | (ip_csum ? 1 : 0) + (tcpudp_csum ? 2 : 0), | ||
91 | 0, vlan_tag_insert, vlan_tag, | ||
92 | WQ_ENET_OFFLOAD_MODE_CSUM, | ||
93 | eop, 1 /* SOP */, eop); | ||
94 | } | ||
95 | |||
96 | static inline void enic_queue_wq_desc_csum_l4(struct vnic_wq *wq, | ||
97 | void *os_buf, dma_addr_t dma_addr, unsigned int len, | ||
98 | unsigned int csum_offset, unsigned int hdr_len, | ||
99 | int vlan_tag_insert, unsigned int vlan_tag, int eop) | ||
100 | { | ||
101 | enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, | ||
102 | csum_offset, hdr_len, vlan_tag_insert, vlan_tag, | ||
103 | WQ_ENET_OFFLOAD_MODE_CSUM_L4, | ||
104 | eop, 1 /* SOP */, eop); | ||
105 | } | ||
106 | |||
107 | static inline void enic_queue_wq_desc_tso(struct vnic_wq *wq, | ||
108 | void *os_buf, dma_addr_t dma_addr, unsigned int len, | ||
109 | unsigned int mss, unsigned int hdr_len, int vlan_tag_insert, | ||
110 | unsigned int vlan_tag, int eop) | ||
111 | { | ||
112 | enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, | ||
113 | mss, hdr_len, vlan_tag_insert, vlan_tag, | ||
114 | WQ_ENET_OFFLOAD_MODE_TSO, | ||
115 | eop, 1 /* SOP */, eop); | ||
116 | } | ||
117 | |||
118 | static inline void enic_queue_rq_desc(struct vnic_rq *rq, | ||
119 | void *os_buf, unsigned int os_buf_index, | ||
120 | dma_addr_t dma_addr, unsigned int len) | ||
121 | { | ||
122 | struct rq_enet_desc *desc = vnic_rq_next_desc(rq); | ||
123 | u8 type = os_buf_index ? | ||
124 | RQ_ENET_TYPE_NOT_SOP : RQ_ENET_TYPE_ONLY_SOP; | ||
125 | |||
126 | rq_enet_desc_enc(desc, | ||
127 | (u64)dma_addr | VNIC_PADDR_TARGET, | ||
128 | type, (u16)len); | ||
129 | |||
130 | wmb(); | ||
131 | |||
132 | vnic_rq_post(rq, os_buf, os_buf_index, dma_addr, len); | ||
133 | } | ||
134 | |||
135 | struct enic; | ||
136 | |||
137 | int enic_get_vnic_config(struct enic *); | ||
138 | void enic_add_station_addr(struct enic *enic); | ||
139 | void enic_add_multicast_addr(struct enic *enic, u8 *addr); | ||
140 | void enic_del_multicast_addr(struct enic *enic, u8 *addr); | ||
141 | void enic_add_vlan(struct enic *enic, u16 vlanid); | ||
142 | void enic_del_vlan(struct enic *enic, u16 vlanid); | ||
143 | int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type, | ||
144 | u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en, | ||
145 | u8 ig_vlan_strip_en); | ||
146 | void enic_get_res_counts(struct enic *enic); | ||
147 | void enic_init_vnic_resources(struct enic *enic); | ||
148 | int enic_alloc_vnic_resources(struct enic *); | ||
149 | void enic_free_vnic_resources(struct enic *); | ||
150 | |||
151 | #endif /* _ENIC_RES_H_ */ | ||
diff --git a/drivers/net/enic/rq_enet_desc.h b/drivers/net/enic/rq_enet_desc.h new file mode 100644 index 000000000000..a06e649010ce --- /dev/null +++ b/drivers/net/enic/rq_enet_desc.h | |||
@@ -0,0 +1,60 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #ifndef _RQ_ENET_DESC_H_ | ||
21 | #define _RQ_ENET_DESC_H_ | ||
22 | |||
23 | /* Ethernet receive queue descriptor: 16B */ | ||
24 | struct rq_enet_desc { | ||
25 | __le64 address; | ||
26 | __le16 length_type; | ||
27 | u8 reserved[6]; | ||
28 | }; | ||
29 | |||
30 | enum rq_enet_type_types { | ||
31 | RQ_ENET_TYPE_ONLY_SOP = 0, | ||
32 | RQ_ENET_TYPE_NOT_SOP = 1, | ||
33 | RQ_ENET_TYPE_RESV2 = 2, | ||
34 | RQ_ENET_TYPE_RESV3 = 3, | ||
35 | }; | ||
36 | |||
37 | #define RQ_ENET_ADDR_BITS 64 | ||
38 | #define RQ_ENET_LEN_BITS 14 | ||
39 | #define RQ_ENET_LEN_MASK ((1 << RQ_ENET_LEN_BITS) - 1) | ||
40 | #define RQ_ENET_TYPE_BITS 2 | ||
41 | #define RQ_ENET_TYPE_MASK ((1 << RQ_ENET_TYPE_BITS) - 1) | ||
42 | |||
43 | static inline void rq_enet_desc_enc(struct rq_enet_desc *desc, | ||
44 | u64 address, u8 type, u16 length) | ||
45 | { | ||
46 | desc->address = cpu_to_le64(address); | ||
47 | desc->length_type = cpu_to_le16((length & RQ_ENET_LEN_MASK) | | ||
48 | ((type & RQ_ENET_TYPE_MASK) << RQ_ENET_LEN_BITS)); | ||
49 | } | ||
50 | |||
51 | static inline void rq_enet_desc_dec(struct rq_enet_desc *desc, | ||
52 | u64 *address, u8 *type, u16 *length) | ||
53 | { | ||
54 | *address = le64_to_cpu(desc->address); | ||
55 | *length = le16_to_cpu(desc->length_type) & RQ_ENET_LEN_MASK; | ||
56 | *type = (u8)((le16_to_cpu(desc->length_type) >> RQ_ENET_LEN_BITS) & | ||
57 | RQ_ENET_TYPE_MASK); | ||
58 | } | ||
59 | |||
60 | #endif /* _RQ_ENET_DESC_H_ */ | ||
diff --git a/drivers/net/enic/vnic_cq.c b/drivers/net/enic/vnic_cq.c new file mode 100644 index 000000000000..020ae6c3f3d9 --- /dev/null +++ b/drivers/net/enic/vnic_cq.c | |||
@@ -0,0 +1,89 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/errno.h> | ||
22 | #include <linux/types.h> | ||
23 | #include <linux/pci.h> | ||
24 | |||
25 | #include "vnic_dev.h" | ||
26 | #include "vnic_cq.h" | ||
27 | |||
28 | void vnic_cq_free(struct vnic_cq *cq) | ||
29 | { | ||
30 | vnic_dev_free_desc_ring(cq->vdev, &cq->ring); | ||
31 | |||
32 | cq->ctrl = NULL; | ||
33 | } | ||
34 | |||
35 | int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, | ||
36 | unsigned int desc_count, unsigned int desc_size) | ||
37 | { | ||
38 | int err; | ||
39 | |||
40 | cq->index = index; | ||
41 | cq->vdev = vdev; | ||
42 | |||
43 | cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index); | ||
44 | if (!cq->ctrl) { | ||
45 | printk(KERN_ERR "Failed to hook CQ[%d] resource\n", index); | ||
46 | return -EINVAL; | ||
47 | } | ||
48 | |||
49 | err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size); | ||
50 | if (err) | ||
51 | return err; | ||
52 | |||
53 | return 0; | ||
54 | } | ||
55 | |||
56 | void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, | ||
57 | unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail, | ||
58 | unsigned int cq_tail_color, unsigned int interrupt_enable, | ||
59 | unsigned int cq_entry_enable, unsigned int cq_message_enable, | ||
60 | unsigned int interrupt_offset, u64 cq_message_addr) | ||
61 | { | ||
62 | u64 paddr; | ||
63 | |||
64 | paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET; | ||
65 | writeq(paddr, &cq->ctrl->ring_base); | ||
66 | iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size); | ||
67 | iowrite32(flow_control_enable, &cq->ctrl->flow_control_enable); | ||
68 | iowrite32(color_enable, &cq->ctrl->color_enable); | ||
69 | iowrite32(cq_head, &cq->ctrl->cq_head); | ||
70 | iowrite32(cq_tail, &cq->ctrl->cq_tail); | ||
71 | iowrite32(cq_tail_color, &cq->ctrl->cq_tail_color); | ||
72 | iowrite32(interrupt_enable, &cq->ctrl->interrupt_enable); | ||
73 | iowrite32(cq_entry_enable, &cq->ctrl->cq_entry_enable); | ||
74 | iowrite32(cq_message_enable, &cq->ctrl->cq_message_enable); | ||
75 | iowrite32(interrupt_offset, &cq->ctrl->interrupt_offset); | ||
76 | writeq(cq_message_addr, &cq->ctrl->cq_message_addr); | ||
77 | } | ||
78 | |||
79 | void vnic_cq_clean(struct vnic_cq *cq) | ||
80 | { | ||
81 | cq->to_clean = 0; | ||
82 | cq->last_color = 0; | ||
83 | |||
84 | iowrite32(0, &cq->ctrl->cq_head); | ||
85 | iowrite32(0, &cq->ctrl->cq_tail); | ||
86 | iowrite32(1, &cq->ctrl->cq_tail_color); | ||
87 | |||
88 | vnic_dev_clear_desc_ring(&cq->ring); | ||
89 | } | ||
diff --git a/drivers/net/enic/vnic_cq.h b/drivers/net/enic/vnic_cq.h new file mode 100644 index 000000000000..114763cbc2f8 --- /dev/null +++ b/drivers/net/enic/vnic_cq.h | |||
@@ -0,0 +1,113 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #ifndef _VNIC_CQ_H_ | ||
21 | #define _VNIC_CQ_H_ | ||
22 | |||
23 | #include "cq_desc.h" | ||
24 | #include "vnic_dev.h" | ||
25 | |||
26 | /* Completion queue control */ | ||
27 | struct vnic_cq_ctrl { | ||
28 | u64 ring_base; /* 0x00 */ | ||
29 | u32 ring_size; /* 0x08 */ | ||
30 | u32 pad0; | ||
31 | u32 flow_control_enable; /* 0x10 */ | ||
32 | u32 pad1; | ||
33 | u32 color_enable; /* 0x18 */ | ||
34 | u32 pad2; | ||
35 | u32 cq_head; /* 0x20 */ | ||
36 | u32 pad3; | ||
37 | u32 cq_tail; /* 0x28 */ | ||
38 | u32 pad4; | ||
39 | u32 cq_tail_color; /* 0x30 */ | ||
40 | u32 pad5; | ||
41 | u32 interrupt_enable; /* 0x38 */ | ||
42 | u32 pad6; | ||
43 | u32 cq_entry_enable; /* 0x40 */ | ||
44 | u32 pad7; | ||
45 | u32 cq_message_enable; /* 0x48 */ | ||
46 | u32 pad8; | ||
47 | u32 interrupt_offset; /* 0x50 */ | ||
48 | u32 pad9; | ||
49 | u64 cq_message_addr; /* 0x58 */ | ||
50 | u32 pad10; | ||
51 | }; | ||
52 | |||
53 | struct vnic_cq { | ||
54 | unsigned int index; | ||
55 | struct vnic_dev *vdev; | ||
56 | struct vnic_cq_ctrl __iomem *ctrl; /* memory-mapped */ | ||
57 | struct vnic_dev_ring ring; | ||
58 | unsigned int to_clean; | ||
59 | unsigned int last_color; | ||
60 | }; | ||
61 | |||
62 | static inline unsigned int vnic_cq_service(struct vnic_cq *cq, | ||
63 | unsigned int work_to_do, | ||
64 | int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc, | ||
65 | u8 type, u16 q_number, u16 completed_index, void *opaque), | ||
66 | void *opaque) | ||
67 | { | ||
68 | struct cq_desc *cq_desc; | ||
69 | unsigned int work_done = 0; | ||
70 | u16 q_number, completed_index; | ||
71 | u8 type, color; | ||
72 | |||
73 | cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + | ||
74 | cq->ring.desc_size * cq->to_clean); | ||
75 | cq_desc_dec(cq_desc, &type, &color, | ||
76 | &q_number, &completed_index); | ||
77 | |||
78 | while (color != cq->last_color) { | ||
79 | |||
80 | if ((*q_service)(cq->vdev, cq_desc, type, | ||
81 | q_number, completed_index, opaque)) | ||
82 | break; | ||
83 | |||
84 | cq->to_clean++; | ||
85 | if (cq->to_clean == cq->ring.desc_count) { | ||
86 | cq->to_clean = 0; | ||
87 | cq->last_color = cq->last_color ? 0 : 1; | ||
88 | } | ||
89 | |||
90 | cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + | ||
91 | cq->ring.desc_size * cq->to_clean); | ||
92 | cq_desc_dec(cq_desc, &type, &color, | ||
93 | &q_number, &completed_index); | ||
94 | |||
95 | work_done++; | ||
96 | if (work_done >= work_to_do) | ||
97 | break; | ||
98 | } | ||
99 | |||
100 | return work_done; | ||
101 | } | ||
102 | |||
103 | void vnic_cq_free(struct vnic_cq *cq); | ||
104 | int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, | ||
105 | unsigned int desc_count, unsigned int desc_size); | ||
106 | void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, | ||
107 | unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail, | ||
108 | unsigned int cq_tail_color, unsigned int interrupt_enable, | ||
109 | unsigned int cq_entry_enable, unsigned int message_enable, | ||
110 | unsigned int interrupt_offset, u64 message_addr); | ||
111 | void vnic_cq_clean(struct vnic_cq *cq); | ||
112 | |||
113 | #endif /* _VNIC_CQ_H_ */ | ||
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c new file mode 100644 index 000000000000..4d104f5c30f9 --- /dev/null +++ b/drivers/net/enic/vnic_dev.c | |||
@@ -0,0 +1,674 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/errno.h> | ||
22 | #include <linux/types.h> | ||
23 | #include <linux/pci.h> | ||
24 | #include <linux/delay.h> | ||
25 | #include <linux/if_ether.h> | ||
26 | |||
27 | #include "vnic_resource.h" | ||
28 | #include "vnic_devcmd.h" | ||
29 | #include "vnic_dev.h" | ||
30 | #include "vnic_stats.h" | ||
31 | |||
32 | struct vnic_res { | ||
33 | void __iomem *vaddr; | ||
34 | unsigned int count; | ||
35 | }; | ||
36 | |||
37 | struct vnic_dev { | ||
38 | void *priv; | ||
39 | struct pci_dev *pdev; | ||
40 | struct vnic_res res[RES_TYPE_MAX]; | ||
41 | enum vnic_dev_intr_mode intr_mode; | ||
42 | struct vnic_devcmd __iomem *devcmd; | ||
43 | struct vnic_devcmd_notify *notify; | ||
44 | struct vnic_devcmd_notify notify_copy; | ||
45 | dma_addr_t notify_pa; | ||
46 | u32 *linkstatus; | ||
47 | dma_addr_t linkstatus_pa; | ||
48 | struct vnic_stats *stats; | ||
49 | dma_addr_t stats_pa; | ||
50 | struct vnic_devcmd_fw_info *fw_info; | ||
51 | dma_addr_t fw_info_pa; | ||
52 | }; | ||
53 | |||
54 | #define VNIC_MAX_RES_HDR_SIZE \ | ||
55 | (sizeof(struct vnic_resource_header) + \ | ||
56 | sizeof(struct vnic_resource) * RES_TYPE_MAX) | ||
57 | #define VNIC_RES_STRIDE 128 | ||
58 | |||
59 | void *vnic_dev_priv(struct vnic_dev *vdev) | ||
60 | { | ||
61 | return vdev->priv; | ||
62 | } | ||
63 | |||
64 | static int vnic_dev_discover_res(struct vnic_dev *vdev, | ||
65 | struct vnic_dev_bar *bar) | ||
66 | { | ||
67 | struct vnic_resource_header __iomem *rh; | ||
68 | struct vnic_resource __iomem *r; | ||
69 | u8 type; | ||
70 | |||
71 | if (bar->len < VNIC_MAX_RES_HDR_SIZE) { | ||
72 | printk(KERN_ERR "vNIC BAR0 res hdr length error\n"); | ||
73 | return -EINVAL; | ||
74 | } | ||
75 | |||
76 | rh = bar->vaddr; | ||
77 | if (!rh) { | ||
78 | printk(KERN_ERR "vNIC BAR0 res hdr not mem-mapped\n"); | ||
79 | return -EINVAL; | ||
80 | } | ||
81 | |||
82 | if (ioread32(&rh->magic) != VNIC_RES_MAGIC || | ||
83 | ioread32(&rh->version) != VNIC_RES_VERSION) { | ||
84 | printk(KERN_ERR "vNIC BAR0 res magic/version error " | ||
85 | "exp (%lx/%lx) curr (%x/%x)\n", | ||
86 | VNIC_RES_MAGIC, VNIC_RES_VERSION, | ||
87 | ioread32(&rh->magic), ioread32(&rh->version)); | ||
88 | return -EINVAL; | ||
89 | } | ||
90 | |||
91 | r = (struct vnic_resource __iomem *)(rh + 1); | ||
92 | |||
93 | while ((type = ioread8(&r->type)) != RES_TYPE_EOL) { | ||
94 | |||
95 | u8 bar_num = ioread8(&r->bar); | ||
96 | u32 bar_offset = ioread32(&r->bar_offset); | ||
97 | u32 count = ioread32(&r->count); | ||
98 | u32 len; | ||
99 | |||
100 | r++; | ||
101 | |||
102 | if (bar_num != 0) /* only mapping in BAR0 resources */ | ||
103 | continue; | ||
104 | |||
105 | switch (type) { | ||
106 | case RES_TYPE_WQ: | ||
107 | case RES_TYPE_RQ: | ||
108 | case RES_TYPE_CQ: | ||
109 | case RES_TYPE_INTR_CTRL: | ||
110 | /* each count is stride bytes long */ | ||
111 | len = count * VNIC_RES_STRIDE; | ||
112 | if (len + bar_offset > bar->len) { | ||
113 | printk(KERN_ERR "vNIC BAR0 resource %d " | ||
114 | "out-of-bounds, offset 0x%x + " | ||
115 | "size 0x%x > bar len 0x%lx\n", | ||
116 | type, bar_offset, | ||
117 | len, | ||
118 | bar->len); | ||
119 | return -EINVAL; | ||
120 | } | ||
121 | break; | ||
122 | case RES_TYPE_INTR_PBA_LEGACY: | ||
123 | case RES_TYPE_DEVCMD: | ||
124 | len = count; | ||
125 | break; | ||
126 | default: | ||
127 | continue; | ||
128 | } | ||
129 | |||
130 | vdev->res[type].count = count; | ||
131 | vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset; | ||
132 | } | ||
133 | |||
134 | return 0; | ||
135 | } | ||
136 | |||
137 | unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev, | ||
138 | enum vnic_res_type type) | ||
139 | { | ||
140 | return vdev->res[type].count; | ||
141 | } | ||
142 | |||
143 | void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, | ||
144 | unsigned int index) | ||
145 | { | ||
146 | if (!vdev->res[type].vaddr) | ||
147 | return NULL; | ||
148 | |||
149 | switch (type) { | ||
150 | case RES_TYPE_WQ: | ||
151 | case RES_TYPE_RQ: | ||
152 | case RES_TYPE_CQ: | ||
153 | case RES_TYPE_INTR_CTRL: | ||
154 | return (char __iomem *)vdev->res[type].vaddr + | ||
155 | index * VNIC_RES_STRIDE; | ||
156 | default: | ||
157 | return (char __iomem *)vdev->res[type].vaddr; | ||
158 | } | ||
159 | } | ||
160 | |||
161 | unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring, | ||
162 | unsigned int desc_count, unsigned int desc_size) | ||
163 | { | ||
164 | /* The base address of the desc rings must be 512 byte aligned. | ||
165 | * Descriptor count is aligned to groups of 32 descriptors. A | ||
166 | * count of 0 means the maximum 4096 descriptors. Descriptor | ||
167 | * size is aligned to 16 bytes. | ||
168 | */ | ||
169 | |||
170 | unsigned int count_align = 32; | ||
171 | unsigned int desc_align = 16; | ||
172 | |||
173 | ring->base_align = 512; | ||
174 | |||
175 | if (desc_count == 0) | ||
176 | desc_count = 4096; | ||
177 | |||
178 | ring->desc_count = ALIGN(desc_count, count_align); | ||
179 | |||
180 | ring->desc_size = ALIGN(desc_size, desc_align); | ||
181 | |||
182 | ring->size = ring->desc_count * ring->desc_size; | ||
183 | ring->size_unaligned = ring->size + ring->base_align; | ||
184 | |||
185 | return ring->size_unaligned; | ||
186 | } | ||
187 | |||
188 | void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring) | ||
189 | { | ||
190 | memset(ring->descs, 0, ring->size); | ||
191 | } | ||
192 | |||
193 | int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, | ||
194 | unsigned int desc_count, unsigned int desc_size) | ||
195 | { | ||
196 | vnic_dev_desc_ring_size(ring, desc_count, desc_size); | ||
197 | |||
198 | ring->descs_unaligned = pci_alloc_consistent(vdev->pdev, | ||
199 | ring->size_unaligned, | ||
200 | &ring->base_addr_unaligned); | ||
201 | |||
202 | if (!ring->descs_unaligned) { | ||
203 | printk(KERN_ERR | ||
204 | "Failed to allocate ring (size=%d), aborting\n", | ||
205 | (int)ring->size); | ||
206 | return -ENOMEM; | ||
207 | } | ||
208 | |||
209 | ring->base_addr = ALIGN(ring->base_addr_unaligned, | ||
210 | ring->base_align); | ||
211 | ring->descs = (u8 *)ring->descs_unaligned + | ||
212 | (ring->base_addr - ring->base_addr_unaligned); | ||
213 | |||
214 | vnic_dev_clear_desc_ring(ring); | ||
215 | |||
216 | ring->desc_avail = ring->desc_count - 1; | ||
217 | |||
218 | return 0; | ||
219 | } | ||
220 | |||
221 | void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring) | ||
222 | { | ||
223 | if (ring->descs) { | ||
224 | pci_free_consistent(vdev->pdev, | ||
225 | ring->size_unaligned, | ||
226 | ring->descs_unaligned, | ||
227 | ring->base_addr_unaligned); | ||
228 | ring->descs = NULL; | ||
229 | } | ||
230 | } | ||
231 | |||
232 | int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, | ||
233 | u64 *a0, u64 *a1, int wait) | ||
234 | { | ||
235 | struct vnic_devcmd __iomem *devcmd = vdev->devcmd; | ||
236 | int delay; | ||
237 | u32 status; | ||
238 | int dev_cmd_err[] = { | ||
239 | /* convert from fw's version of error.h to host's version */ | ||
240 | 0, /* ERR_SUCCESS */ | ||
241 | EINVAL, /* ERR_EINVAL */ | ||
242 | EFAULT, /* ERR_EFAULT */ | ||
243 | EPERM, /* ERR_EPERM */ | ||
244 | EBUSY, /* ERR_EBUSY */ | ||
245 | }; | ||
246 | int err; | ||
247 | |||
248 | status = ioread32(&devcmd->status); | ||
249 | if (status & STAT_BUSY) { | ||
250 | printk(KERN_ERR "Busy devcmd %d\n", _CMD_N(cmd)); | ||
251 | return -EBUSY; | ||
252 | } | ||
253 | |||
254 | if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) { | ||
255 | writeq(*a0, &devcmd->args[0]); | ||
256 | writeq(*a1, &devcmd->args[1]); | ||
257 | wmb(); | ||
258 | } | ||
259 | |||
260 | iowrite32(cmd, &devcmd->cmd); | ||
261 | |||
262 | if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) | ||
263 | return 0; | ||
264 | |||
265 | for (delay = 0; delay < wait; delay++) { | ||
266 | |||
267 | udelay(100); | ||
268 | |||
269 | status = ioread32(&devcmd->status); | ||
270 | if (!(status & STAT_BUSY)) { | ||
271 | |||
272 | if (status & STAT_ERROR) { | ||
273 | err = dev_cmd_err[(int)readq(&devcmd->args[0])]; | ||
274 | printk(KERN_ERR "Error %d devcmd %d\n", | ||
275 | err, _CMD_N(cmd)); | ||
276 | return -err; | ||
277 | } | ||
278 | |||
279 | if (_CMD_DIR(cmd) & _CMD_DIR_READ) { | ||
280 | rmb(); | ||
281 | *a0 = readq(&devcmd->args[0]); | ||
282 | *a1 = readq(&devcmd->args[1]); | ||
283 | } | ||
284 | |||
285 | return 0; | ||
286 | } | ||
287 | } | ||
288 | |||
289 | printk(KERN_ERR "Timedout devcmd %d\n", _CMD_N(cmd)); | ||
290 | return -ETIMEDOUT; | ||
291 | } | ||
292 | |||
293 | int vnic_dev_fw_info(struct vnic_dev *vdev, | ||
294 | struct vnic_devcmd_fw_info **fw_info) | ||
295 | { | ||
296 | u64 a0, a1 = 0; | ||
297 | int wait = 1000; | ||
298 | int err = 0; | ||
299 | |||
300 | if (!vdev->fw_info) { | ||
301 | vdev->fw_info = pci_alloc_consistent(vdev->pdev, | ||
302 | sizeof(struct vnic_devcmd_fw_info), | ||
303 | &vdev->fw_info_pa); | ||
304 | if (!vdev->fw_info) | ||
305 | return -ENOMEM; | ||
306 | |||
307 | a0 = vdev->fw_info_pa; | ||
308 | |||
309 | /* only get fw_info once and cache it */ | ||
310 | err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait); | ||
311 | } | ||
312 | |||
313 | *fw_info = vdev->fw_info; | ||
314 | |||
315 | return err; | ||
316 | } | ||
317 | |||
318 | int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size, | ||
319 | void *value) | ||
320 | { | ||
321 | u64 a0, a1; | ||
322 | int wait = 1000; | ||
323 | int err; | ||
324 | |||
325 | a0 = offset; | ||
326 | a1 = size; | ||
327 | |||
328 | err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait); | ||
329 | |||
330 | switch (size) { | ||
331 | case 1: *(u8 *)value = (u8)a0; break; | ||
332 | case 2: *(u16 *)value = (u16)a0; break; | ||
333 | case 4: *(u32 *)value = (u32)a0; break; | ||
334 | case 8: *(u64 *)value = a0; break; | ||
335 | default: BUG(); break; | ||
336 | } | ||
337 | |||
338 | return err; | ||
339 | } | ||
340 | |||
341 | int vnic_dev_stats_clear(struct vnic_dev *vdev) | ||
342 | { | ||
343 | u64 a0 = 0, a1 = 0; | ||
344 | int wait = 1000; | ||
345 | return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait); | ||
346 | } | ||
347 | |||
348 | int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats) | ||
349 | { | ||
350 | u64 a0, a1; | ||
351 | int wait = 1000; | ||
352 | |||
353 | if (!vdev->stats) { | ||
354 | vdev->stats = pci_alloc_consistent(vdev->pdev, | ||
355 | sizeof(struct vnic_stats), &vdev->stats_pa); | ||
356 | if (!vdev->stats) | ||
357 | return -ENOMEM; | ||
358 | } | ||
359 | |||
360 | *stats = vdev->stats; | ||
361 | a0 = vdev->stats_pa; | ||
362 | a1 = sizeof(struct vnic_stats); | ||
363 | |||
364 | return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait); | ||
365 | } | ||
366 | |||
367 | int vnic_dev_close(struct vnic_dev *vdev) | ||
368 | { | ||
369 | u64 a0 = 0, a1 = 0; | ||
370 | int wait = 1000; | ||
371 | return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait); | ||
372 | } | ||
373 | |||
374 | int vnic_dev_enable(struct vnic_dev *vdev) | ||
375 | { | ||
376 | u64 a0 = 0, a1 = 0; | ||
377 | int wait = 1000; | ||
378 | return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait); | ||
379 | } | ||
380 | |||
381 | int vnic_dev_disable(struct vnic_dev *vdev) | ||
382 | { | ||
383 | u64 a0 = 0, a1 = 0; | ||
384 | int wait = 1000; | ||
385 | return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait); | ||
386 | } | ||
387 | |||
388 | int vnic_dev_open(struct vnic_dev *vdev, int arg) | ||
389 | { | ||
390 | u64 a0 = (u32)arg, a1 = 0; | ||
391 | int wait = 1000; | ||
392 | return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait); | ||
393 | } | ||
394 | |||
395 | int vnic_dev_open_done(struct vnic_dev *vdev, int *done) | ||
396 | { | ||
397 | u64 a0 = 0, a1 = 0; | ||
398 | int wait = 1000; | ||
399 | int err; | ||
400 | |||
401 | *done = 0; | ||
402 | |||
403 | err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait); | ||
404 | if (err) | ||
405 | return err; | ||
406 | |||
407 | *done = (a0 == 0); | ||
408 | |||
409 | return 0; | ||
410 | } | ||
411 | |||
412 | int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg) | ||
413 | { | ||
414 | u64 a0 = (u32)arg, a1 = 0; | ||
415 | int wait = 1000; | ||
416 | return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait); | ||
417 | } | ||
418 | |||
419 | int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done) | ||
420 | { | ||
421 | u64 a0 = 0, a1 = 0; | ||
422 | int wait = 1000; | ||
423 | int err; | ||
424 | |||
425 | *done = 0; | ||
426 | |||
427 | err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait); | ||
428 | if (err) | ||
429 | return err; | ||
430 | |||
431 | *done = (a0 == 0); | ||
432 | |||
433 | return 0; | ||
434 | } | ||
435 | |||
436 | int vnic_dev_hang_notify(struct vnic_dev *vdev) | ||
437 | { | ||
438 | u64 a0, a1; | ||
439 | int wait = 1000; | ||
440 | return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait); | ||
441 | } | ||
442 | |||
443 | int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr) | ||
444 | { | ||
445 | u64 a0, a1; | ||
446 | int wait = 1000; | ||
447 | int err, i; | ||
448 | |||
449 | for (i = 0; i < ETH_ALEN; i++) | ||
450 | mac_addr[i] = 0; | ||
451 | |||
452 | err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait); | ||
453 | if (err) | ||
454 | return err; | ||
455 | |||
456 | for (i = 0; i < ETH_ALEN; i++) | ||
457 | mac_addr[i] = ((u8 *)&a0)[i]; | ||
458 | |||
459 | return 0; | ||
460 | } | ||
461 | |||
462 | void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, | ||
463 | int broadcast, int promisc, int allmulti) | ||
464 | { | ||
465 | u64 a0, a1 = 0; | ||
466 | int wait = 1000; | ||
467 | int err; | ||
468 | |||
469 | a0 = (directed ? CMD_PFILTER_DIRECTED : 0) | | ||
470 | (multicast ? CMD_PFILTER_MULTICAST : 0) | | ||
471 | (broadcast ? CMD_PFILTER_BROADCAST : 0) | | ||
472 | (promisc ? CMD_PFILTER_PROMISCUOUS : 0) | | ||
473 | (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0); | ||
474 | |||
475 | err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait); | ||
476 | if (err) | ||
477 | printk(KERN_ERR "Can't set packet filter\n"); | ||
478 | } | ||
479 | |||
480 | void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr) | ||
481 | { | ||
482 | u64 a0 = 0, a1 = 0; | ||
483 | int wait = 1000; | ||
484 | int err; | ||
485 | int i; | ||
486 | |||
487 | for (i = 0; i < ETH_ALEN; i++) | ||
488 | ((u8 *)&a0)[i] = addr[i]; | ||
489 | |||
490 | err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); | ||
491 | if (err) | ||
492 | printk(KERN_ERR | ||
493 | "Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n", | ||
494 | addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], | ||
495 | err); | ||
496 | } | ||
497 | |||
498 | void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr) | ||
499 | { | ||
500 | u64 a0 = 0, a1 = 0; | ||
501 | int wait = 1000; | ||
502 | int err; | ||
503 | int i; | ||
504 | |||
505 | for (i = 0; i < ETH_ALEN; i++) | ||
506 | ((u8 *)&a0)[i] = addr[i]; | ||
507 | |||
508 | err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait); | ||
509 | if (err) | ||
510 | printk(KERN_ERR | ||
511 | "Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n", | ||
512 | addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], | ||
513 | err); | ||
514 | } | ||
515 | |||
516 | int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr) | ||
517 | { | ||
518 | u64 a0, a1; | ||
519 | int wait = 1000; | ||
520 | |||
521 | if (!vdev->notify) { | ||
522 | vdev->notify = pci_alloc_consistent(vdev->pdev, | ||
523 | sizeof(struct vnic_devcmd_notify), | ||
524 | &vdev->notify_pa); | ||
525 | if (!vdev->notify) | ||
526 | return -ENOMEM; | ||
527 | } | ||
528 | |||
529 | a0 = vdev->notify_pa; | ||
530 | a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL; | ||
531 | a1 += sizeof(struct vnic_devcmd_notify); | ||
532 | |||
533 | return vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); | ||
534 | } | ||
535 | |||
536 | void vnic_dev_notify_unset(struct vnic_dev *vdev) | ||
537 | { | ||
538 | u64 a0, a1; | ||
539 | int wait = 1000; | ||
540 | |||
541 | a0 = 0; /* paddr = 0 to unset notify buffer */ | ||
542 | a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */ | ||
543 | a1 += sizeof(struct vnic_devcmd_notify); | ||
544 | |||
545 | vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); | ||
546 | } | ||
547 | |||
548 | static int vnic_dev_notify_ready(struct vnic_dev *vdev) | ||
549 | { | ||
550 | u32 *words; | ||
551 | unsigned int nwords = sizeof(struct vnic_devcmd_notify) / 4; | ||
552 | unsigned int i; | ||
553 | u32 csum; | ||
554 | |||
555 | if (!vdev->notify) | ||
556 | return 0; | ||
557 | |||
558 | do { | ||
559 | csum = 0; | ||
560 | memcpy(&vdev->notify_copy, vdev->notify, | ||
561 | sizeof(struct vnic_devcmd_notify)); | ||
562 | words = (u32 *)&vdev->notify_copy; | ||
563 | for (i = 1; i < nwords; i++) | ||
564 | csum += words[i]; | ||
565 | } while (csum != words[0]); | ||
566 | |||
567 | return 1; | ||
568 | } | ||
569 | |||
570 | int vnic_dev_init(struct vnic_dev *vdev, int arg) | ||
571 | { | ||
572 | u64 a0 = (u32)arg, a1 = 0; | ||
573 | int wait = 1000; | ||
574 | return vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait); | ||
575 | } | ||
576 | |||
577 | int vnic_dev_link_status(struct vnic_dev *vdev) | ||
578 | { | ||
579 | if (vdev->linkstatus) | ||
580 | return *vdev->linkstatus; | ||
581 | |||
582 | if (!vnic_dev_notify_ready(vdev)) | ||
583 | return 0; | ||
584 | |||
585 | return vdev->notify_copy.link_state; | ||
586 | } | ||
587 | |||
588 | u32 vnic_dev_port_speed(struct vnic_dev *vdev) | ||
589 | { | ||
590 | if (!vnic_dev_notify_ready(vdev)) | ||
591 | return 0; | ||
592 | |||
593 | return vdev->notify_copy.port_speed; | ||
594 | } | ||
595 | |||
596 | u32 vnic_dev_msg_lvl(struct vnic_dev *vdev) | ||
597 | { | ||
598 | if (!vnic_dev_notify_ready(vdev)) | ||
599 | return 0; | ||
600 | |||
601 | return vdev->notify_copy.msglvl; | ||
602 | } | ||
603 | |||
604 | u32 vnic_dev_mtu(struct vnic_dev *vdev) | ||
605 | { | ||
606 | if (!vnic_dev_notify_ready(vdev)) | ||
607 | return 0; | ||
608 | |||
609 | return vdev->notify_copy.mtu; | ||
610 | } | ||
611 | |||
612 | void vnic_dev_set_intr_mode(struct vnic_dev *vdev, | ||
613 | enum vnic_dev_intr_mode intr_mode) | ||
614 | { | ||
615 | vdev->intr_mode = intr_mode; | ||
616 | } | ||
617 | |||
618 | enum vnic_dev_intr_mode vnic_dev_get_intr_mode( | ||
619 | struct vnic_dev *vdev) | ||
620 | { | ||
621 | return vdev->intr_mode; | ||
622 | } | ||
623 | |||
624 | void vnic_dev_unregister(struct vnic_dev *vdev) | ||
625 | { | ||
626 | if (vdev) { | ||
627 | if (vdev->notify) | ||
628 | pci_free_consistent(vdev->pdev, | ||
629 | sizeof(struct vnic_devcmd_notify), | ||
630 | vdev->notify, | ||
631 | vdev->notify_pa); | ||
632 | if (vdev->linkstatus) | ||
633 | pci_free_consistent(vdev->pdev, | ||
634 | sizeof(u32), | ||
635 | vdev->linkstatus, | ||
636 | vdev->linkstatus_pa); | ||
637 | if (vdev->stats) | ||
638 | pci_free_consistent(vdev->pdev, | ||
639 | sizeof(struct vnic_dev), | ||
640 | vdev->stats, vdev->stats_pa); | ||
641 | if (vdev->fw_info) | ||
642 | pci_free_consistent(vdev->pdev, | ||
643 | sizeof(struct vnic_devcmd_fw_info), | ||
644 | vdev->fw_info, vdev->fw_info_pa); | ||
645 | kfree(vdev); | ||
646 | } | ||
647 | } | ||
648 | |||
649 | struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, | ||
650 | void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar) | ||
651 | { | ||
652 | if (!vdev) { | ||
653 | vdev = kzalloc(sizeof(struct vnic_dev), GFP_ATOMIC); | ||
654 | if (!vdev) | ||
655 | return NULL; | ||
656 | } | ||
657 | |||
658 | vdev->priv = priv; | ||
659 | vdev->pdev = pdev; | ||
660 | |||
661 | if (vnic_dev_discover_res(vdev, bar)) | ||
662 | goto err_out; | ||
663 | |||
664 | vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0); | ||
665 | if (!vdev->devcmd) | ||
666 | goto err_out; | ||
667 | |||
668 | return vdev; | ||
669 | |||
670 | err_out: | ||
671 | vnic_dev_unregister(vdev); | ||
672 | return NULL; | ||
673 | } | ||
674 | |||
diff --git a/drivers/net/enic/vnic_dev.h b/drivers/net/enic/vnic_dev.h new file mode 100644 index 000000000000..2dcffd3a24bd --- /dev/null +++ b/drivers/net/enic/vnic_dev.h | |||
@@ -0,0 +1,106 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #ifndef _VNIC_DEV_H_ | ||
21 | #define _VNIC_DEV_H_ | ||
22 | |||
23 | #include "vnic_resource.h" | ||
24 | #include "vnic_devcmd.h" | ||
25 | |||
26 | #ifndef VNIC_PADDR_TARGET | ||
27 | #define VNIC_PADDR_TARGET 0x0000000000000000ULL | ||
28 | #endif | ||
29 | |||
30 | enum vnic_dev_intr_mode { | ||
31 | VNIC_DEV_INTR_MODE_UNKNOWN, | ||
32 | VNIC_DEV_INTR_MODE_INTX, | ||
33 | VNIC_DEV_INTR_MODE_MSI, | ||
34 | VNIC_DEV_INTR_MODE_MSIX, | ||
35 | }; | ||
36 | |||
37 | struct vnic_dev_bar { | ||
38 | void __iomem *vaddr; | ||
39 | dma_addr_t bus_addr; | ||
40 | unsigned long len; | ||
41 | }; | ||
42 | |||
43 | struct vnic_dev_ring { | ||
44 | void *descs; | ||
45 | size_t size; | ||
46 | dma_addr_t base_addr; | ||
47 | size_t base_align; | ||
48 | void *descs_unaligned; | ||
49 | size_t size_unaligned; | ||
50 | dma_addr_t base_addr_unaligned; | ||
51 | unsigned int desc_size; | ||
52 | unsigned int desc_count; | ||
53 | unsigned int desc_avail; | ||
54 | }; | ||
55 | |||
56 | struct vnic_dev; | ||
57 | struct vnic_stats; | ||
58 | |||
59 | void *vnic_dev_priv(struct vnic_dev *vdev); | ||
60 | unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev, | ||
61 | enum vnic_res_type type); | ||
62 | void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, | ||
63 | unsigned int index); | ||
64 | unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring, | ||
65 | unsigned int desc_count, unsigned int desc_size); | ||
66 | void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring); | ||
67 | int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, | ||
68 | unsigned int desc_count, unsigned int desc_size); | ||
69 | void vnic_dev_free_desc_ring(struct vnic_dev *vdev, | ||
70 | struct vnic_dev_ring *ring); | ||
71 | int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, | ||
72 | u64 *a0, u64 *a1, int wait); | ||
73 | int vnic_dev_fw_info(struct vnic_dev *vdev, | ||
74 | struct vnic_devcmd_fw_info **fw_info); | ||
75 | int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size, | ||
76 | void *value); | ||
77 | int vnic_dev_stats_clear(struct vnic_dev *vdev); | ||
78 | int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats); | ||
79 | int vnic_dev_hang_notify(struct vnic_dev *vdev); | ||
80 | void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, | ||
81 | int broadcast, int promisc, int allmulti); | ||
82 | void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr); | ||
83 | void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr); | ||
84 | int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr); | ||
85 | int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr); | ||
86 | void vnic_dev_notify_unset(struct vnic_dev *vdev); | ||
87 | int vnic_dev_link_status(struct vnic_dev *vdev); | ||
88 | u32 vnic_dev_port_speed(struct vnic_dev *vdev); | ||
89 | u32 vnic_dev_msg_lvl(struct vnic_dev *vdev); | ||
90 | u32 vnic_dev_mtu(struct vnic_dev *vdev); | ||
91 | int vnic_dev_close(struct vnic_dev *vdev); | ||
92 | int vnic_dev_enable(struct vnic_dev *vdev); | ||
93 | int vnic_dev_disable(struct vnic_dev *vdev); | ||
94 | int vnic_dev_open(struct vnic_dev *vdev, int arg); | ||
95 | int vnic_dev_open_done(struct vnic_dev *vdev, int *done); | ||
96 | int vnic_dev_init(struct vnic_dev *vdev, int arg); | ||
97 | int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg); | ||
98 | int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done); | ||
99 | void vnic_dev_set_intr_mode(struct vnic_dev *vdev, | ||
100 | enum vnic_dev_intr_mode intr_mode); | ||
101 | enum vnic_dev_intr_mode vnic_dev_get_intr_mode(struct vnic_dev *vdev); | ||
102 | void vnic_dev_unregister(struct vnic_dev *vdev); | ||
103 | struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, | ||
104 | void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar); | ||
105 | |||
106 | #endif /* _VNIC_DEV_H_ */ | ||
diff --git a/drivers/net/enic/vnic_devcmd.h b/drivers/net/enic/vnic_devcmd.h new file mode 100644 index 000000000000..d8617a3373b1 --- /dev/null +++ b/drivers/net/enic/vnic_devcmd.h | |||
@@ -0,0 +1,282 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #ifndef _VNIC_DEVCMD_H_ | ||
21 | #define _VNIC_DEVCMD_H_ | ||
22 | |||
23 | #define _CMD_NBITS 14 | ||
24 | #define _CMD_VTYPEBITS 10 | ||
25 | #define _CMD_FLAGSBITS 6 | ||
26 | #define _CMD_DIRBITS 2 | ||
27 | |||
28 | #define _CMD_NMASK ((1 << _CMD_NBITS)-1) | ||
29 | #define _CMD_VTYPEMASK ((1 << _CMD_VTYPEBITS)-1) | ||
30 | #define _CMD_FLAGSMASK ((1 << _CMD_FLAGSBITS)-1) | ||
31 | #define _CMD_DIRMASK ((1 << _CMD_DIRBITS)-1) | ||
32 | |||
33 | #define _CMD_NSHIFT 0 | ||
34 | #define _CMD_VTYPESHIFT (_CMD_NSHIFT+_CMD_NBITS) | ||
35 | #define _CMD_FLAGSSHIFT (_CMD_VTYPESHIFT+_CMD_VTYPEBITS) | ||
36 | #define _CMD_DIRSHIFT (_CMD_FLAGSSHIFT+_CMD_FLAGSBITS) | ||
37 | |||
38 | /* | ||
39 | * Direction bits (from host perspective). | ||
40 | */ | ||
41 | #define _CMD_DIR_NONE 0U | ||
42 | #define _CMD_DIR_WRITE 1U | ||
43 | #define _CMD_DIR_READ 2U | ||
44 | #define _CMD_DIR_RW (_CMD_DIR_WRITE | _CMD_DIR_READ) | ||
45 | |||
46 | /* | ||
47 | * Flag bits. | ||
48 | */ | ||
49 | #define _CMD_FLAGS_NONE 0U | ||
50 | #define _CMD_FLAGS_NOWAIT 1U | ||
51 | |||
52 | /* | ||
53 | * vNIC type bits. | ||
54 | */ | ||
55 | #define _CMD_VTYPE_NONE 0U | ||
56 | #define _CMD_VTYPE_ENET 1U | ||
57 | #define _CMD_VTYPE_FC 2U | ||
58 | #define _CMD_VTYPE_SCSI 4U | ||
59 | #define _CMD_VTYPE_ALL (_CMD_VTYPE_ENET | _CMD_VTYPE_FC | _CMD_VTYPE_SCSI) | ||
60 | |||
61 | /* | ||
62 | * Used to create cmds.. | ||
63 | */ | ||
64 | #define _CMDCF(dir, flags, vtype, nr) \ | ||
65 | (((dir) << _CMD_DIRSHIFT) | \ | ||
66 | ((flags) << _CMD_FLAGSSHIFT) | \ | ||
67 | ((vtype) << _CMD_VTYPESHIFT) | \ | ||
68 | ((nr) << _CMD_NSHIFT)) | ||
69 | #define _CMDC(dir, vtype, nr) _CMDCF(dir, 0, vtype, nr) | ||
70 | #define _CMDCNW(dir, vtype, nr) _CMDCF(dir, _CMD_FLAGS_NOWAIT, vtype, nr) | ||
71 | |||
72 | /* | ||
73 | * Used to decode cmds.. | ||
74 | */ | ||
75 | #define _CMD_DIR(cmd) (((cmd) >> _CMD_DIRSHIFT) & _CMD_DIRMASK) | ||
76 | #define _CMD_FLAGS(cmd) (((cmd) >> _CMD_FLAGSSHIFT) & _CMD_FLAGSMASK) | ||
77 | #define _CMD_VTYPE(cmd) (((cmd) >> _CMD_VTYPESHIFT) & _CMD_VTYPEMASK) | ||
78 | #define _CMD_N(cmd) (((cmd) >> _CMD_NSHIFT) & _CMD_NMASK) | ||
79 | |||
80 | enum vnic_devcmd_cmd { | ||
81 | CMD_NONE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0), | ||
82 | |||
83 | /* mcpu fw info in mem: (u64)a0=paddr to struct vnic_devcmd_fw_info */ | ||
84 | CMD_MCPU_FW_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1), | ||
85 | |||
86 | /* dev-specific block member: | ||
87 | * in: (u16)a0=offset,(u8)a1=size | ||
88 | * out: a0=value */ | ||
89 | CMD_DEV_SPEC = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 2), | ||
90 | |||
91 | /* stats clear */ | ||
92 | CMD_STATS_CLEAR = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 3), | ||
93 | |||
94 | /* stats dump in mem: (u64)a0=paddr to stats area, | ||
95 | * (u16)a1=sizeof stats area */ | ||
96 | CMD_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 4), | ||
97 | |||
98 | /* set Rx packet filter: (u32)a0=filters (see CMD_PFILTER_*) */ | ||
99 | CMD_PACKET_FILTER = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 7), | ||
100 | |||
101 | /* hang detection notification */ | ||
102 | CMD_HANG_NOTIFY = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8), | ||
103 | |||
104 | /* MAC address in (u48)a0 */ | ||
105 | CMD_MAC_ADDR = _CMDC(_CMD_DIR_READ, | ||
106 | _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 9), | ||
107 | |||
108 | /* disable/enable promisc mode: (u8)a0=0/1 */ | ||
109 | /***** XXX DEPRECATED *****/ | ||
110 | CMD_PROMISC_MODE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 10), | ||
111 | |||
112 | /* disable/enable all-multi mode: (u8)a0=0/1 */ | ||
113 | /***** XXX DEPRECATED *****/ | ||
114 | CMD_ALLMULTI_MODE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 11), | ||
115 | |||
116 | /* add addr from (u48)a0 */ | ||
117 | CMD_ADDR_ADD = _CMDCNW(_CMD_DIR_WRITE, | ||
118 | _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 12), | ||
119 | |||
120 | /* del addr from (u48)a0 */ | ||
121 | CMD_ADDR_DEL = _CMDCNW(_CMD_DIR_WRITE, | ||
122 | _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 13), | ||
123 | |||
124 | /* add VLAN id in (u16)a0 */ | ||
125 | CMD_VLAN_ADD = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 14), | ||
126 | |||
127 | /* del VLAN id in (u16)a0 */ | ||
128 | CMD_VLAN_DEL = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 15), | ||
129 | |||
130 | /* nic_cfg in (u32)a0 */ | ||
131 | CMD_NIC_CFG = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16), | ||
132 | |||
133 | /* union vnic_rss_key in mem: (u64)a0=paddr, (u16)a1=len */ | ||
134 | CMD_RSS_KEY = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 17), | ||
135 | |||
136 | /* union vnic_rss_cpu in mem: (u64)a0=paddr, (u16)a1=len */ | ||
137 | CMD_RSS_CPU = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 18), | ||
138 | |||
139 | /* initiate softreset */ | ||
140 | CMD_SOFT_RESET = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 19), | ||
141 | |||
142 | /* softreset status: | ||
143 | * out: a0=0 reset complete, a0=1 reset in progress */ | ||
144 | CMD_SOFT_RESET_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 20), | ||
145 | |||
146 | /* set struct vnic_devcmd_notify buffer in mem: | ||
147 | * in: | ||
148 | * (u64)a0=paddr to notify (set paddr=0 to unset) | ||
149 | * (u32)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify) | ||
150 | * (u16)a1 & 0x0000ffff00000000=intr num (-1 for no intr) | ||
151 | * out: | ||
152 | * (u32)a1 = effective size | ||
153 | */ | ||
154 | CMD_NOTIFY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 21), | ||
155 | |||
156 | /* UNDI API: (u64)a0=paddr to s_PXENV_UNDI_ struct, | ||
157 | * (u8)a1=PXENV_UNDI_xxx */ | ||
158 | CMD_UNDI = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 22), | ||
159 | |||
160 | /* initiate open sequence (u32)a0=flags (see CMD_OPENF_*) */ | ||
161 | CMD_OPEN = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 23), | ||
162 | |||
163 | /* open status: | ||
164 | * out: a0=0 open complete, a0=1 open in progress */ | ||
165 | CMD_OPEN_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 24), | ||
166 | |||
167 | /* close vnic */ | ||
168 | CMD_CLOSE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 25), | ||
169 | |||
170 | /* initialize virtual link: (u32)a0=flags (see CMD_INITF_*) */ | ||
171 | CMD_INIT = _CMDCNW(_CMD_DIR_READ, _CMD_VTYPE_ALL, 26), | ||
172 | |||
173 | /* variant of CMD_INIT, with provisioning info | ||
174 | * (u64)a0=paddr of vnic_devcmd_provinfo | ||
175 | * (u32)a1=sizeof provision info */ | ||
176 | CMD_INIT_PROV_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 27), | ||
177 | |||
178 | /* enable virtual link */ | ||
179 | CMD_ENABLE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28), | ||
180 | |||
181 | /* disable virtual link */ | ||
182 | CMD_DISABLE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 29), | ||
183 | |||
184 | /* stats dump all vnics on uplink in mem: (u64)a0=paddr (u32)a1=uif */ | ||
185 | CMD_STATS_DUMP_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 30), | ||
186 | |||
187 | /* init status: | ||
188 | * out: a0=0 init complete, a0=1 init in progress | ||
189 | * if a0=0, a1=errno */ | ||
190 | CMD_INIT_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 31), | ||
191 | |||
192 | /* INT13 API: (u64)a0=paddr to vnic_int13_params struct | ||
193 | * (u8)a1=INT13_CMD_xxx */ | ||
194 | CMD_INT13 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_FC, 32), | ||
195 | |||
196 | /* logical uplink enable/disable: (u64)a0: 0/1=disable/enable */ | ||
197 | CMD_LOGICAL_UPLINK = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 33), | ||
198 | |||
199 | /* undo initialize of virtual link */ | ||
200 | CMD_DEINIT = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34), | ||
201 | }; | ||
202 | |||
203 | /* flags for CMD_OPEN */ | ||
204 | #define CMD_OPENF_OPROM 0x1 /* open coming from option rom */ | ||
205 | |||
206 | /* flags for CMD_INIT */ | ||
207 | #define CMD_INITF_DEFAULT_MAC 0x1 /* init with default mac addr */ | ||
208 | |||
209 | /* flags for CMD_PACKET_FILTER */ | ||
210 | #define CMD_PFILTER_DIRECTED 0x01 | ||
211 | #define CMD_PFILTER_MULTICAST 0x02 | ||
212 | #define CMD_PFILTER_BROADCAST 0x04 | ||
213 | #define CMD_PFILTER_PROMISCUOUS 0x08 | ||
214 | #define CMD_PFILTER_ALL_MULTICAST 0x10 | ||
215 | |||
216 | enum vnic_devcmd_status { | ||
217 | STAT_NONE = 0, | ||
218 | STAT_BUSY = 1 << 0, /* cmd in progress */ | ||
219 | STAT_ERROR = 1 << 1, /* last cmd caused error (code in a0) */ | ||
220 | }; | ||
221 | |||
222 | enum vnic_devcmd_error { | ||
223 | ERR_SUCCESS = 0, | ||
224 | ERR_EINVAL = 1, | ||
225 | ERR_EFAULT = 2, | ||
226 | ERR_EPERM = 3, | ||
227 | ERR_EBUSY = 4, | ||
228 | ERR_ECMDUNKNOWN = 5, | ||
229 | ERR_EBADSTATE = 6, | ||
230 | ERR_ENOMEM = 7, | ||
231 | ERR_ETIMEDOUT = 8, | ||
232 | ERR_ELINKDOWN = 9, | ||
233 | }; | ||
234 | |||
235 | struct vnic_devcmd_fw_info { | ||
236 | char fw_version[32]; | ||
237 | char fw_build[32]; | ||
238 | char hw_version[32]; | ||
239 | char hw_serial_number[32]; | ||
240 | }; | ||
241 | |||
242 | struct vnic_devcmd_notify { | ||
243 | u32 csum; /* checksum over following words */ | ||
244 | |||
245 | u32 link_state; /* link up == 1 */ | ||
246 | u32 port_speed; /* effective port speed (rate limit) */ | ||
247 | u32 mtu; /* MTU */ | ||
248 | u32 msglvl; /* requested driver msg lvl */ | ||
249 | u32 uif; /* uplink interface */ | ||
250 | u32 status; /* status bits (see VNIC_STF_*) */ | ||
251 | u32 error; /* error code (see ERR_*) for first ERR */ | ||
252 | }; | ||
253 | #define VNIC_STF_FATAL_ERR 0x0001 /* fatal fw error */ | ||
254 | |||
255 | struct vnic_devcmd_provinfo { | ||
256 | u8 oui[3]; | ||
257 | u8 type; | ||
258 | u8 data[0]; | ||
259 | }; | ||
260 | |||
261 | /* | ||
262 | * Writing cmd register causes STAT_BUSY to get set in status register. | ||
263 | * When cmd completes, STAT_BUSY will be cleared. | ||
264 | * | ||
265 | * If cmd completed successfully STAT_ERROR will be clear | ||
266 | * and args registers contain cmd-specific results. | ||
267 | * | ||
268 | * If cmd error, STAT_ERROR will be set and args[0] contains error code. | ||
269 | * | ||
270 | * status register is read-only. While STAT_BUSY is set, | ||
271 | * all other register contents are read-only. | ||
272 | */ | ||
273 | |||
274 | /* Make sizeof(vnic_devcmd) a power-of-2 for I/O BAR. */ | ||
275 | #define VNIC_DEVCMD_NARGS 15 | ||
276 | struct vnic_devcmd { | ||
277 | u32 status; /* RO */ | ||
278 | u32 cmd; /* RW */ | ||
279 | u64 args[VNIC_DEVCMD_NARGS]; /* RW cmd args (little-endian) */ | ||
280 | }; | ||
281 | |||
282 | #endif /* _VNIC_DEVCMD_H_ */ | ||
diff --git a/drivers/net/enic/vnic_enet.h b/drivers/net/enic/vnic_enet.h new file mode 100644 index 000000000000..6332ac9391b8 --- /dev/null +++ b/drivers/net/enic/vnic_enet.h | |||
@@ -0,0 +1,47 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #ifndef _VNIC_ENIC_H_ | ||
21 | #define _VNIC_ENIC_H_ | ||
22 | |||
23 | /* Device-specific region: enet configuration */ | ||
24 | struct vnic_enet_config { | ||
25 | u32 flags; | ||
26 | u32 wq_desc_count; | ||
27 | u32 rq_desc_count; | ||
28 | u16 mtu; | ||
29 | u16 intr_timer; | ||
30 | u8 intr_timer_type; | ||
31 | u8 intr_mode; | ||
32 | char devname[16]; | ||
33 | }; | ||
34 | |||
35 | #define VENETF_TSO 0x1 /* TSO enabled */ | ||
36 | #define VENETF_LRO 0x2 /* LRO enabled */ | ||
37 | #define VENETF_RXCSUM 0x4 /* RX csum enabled */ | ||
38 | #define VENETF_TXCSUM 0x8 /* TX csum enabled */ | ||
39 | #define VENETF_RSS 0x10 /* RSS enabled */ | ||
40 | #define VENETF_RSSHASH_IPV4 0x20 /* Hash on IPv4 fields */ | ||
41 | #define VENETF_RSSHASH_TCPIPV4 0x40 /* Hash on TCP + IPv4 fields */ | ||
42 | #define VENETF_RSSHASH_IPV6 0x80 /* Hash on IPv6 fields */ | ||
43 | #define VENETF_RSSHASH_TCPIPV6 0x100 /* Hash on TCP + IPv6 fields */ | ||
44 | #define VENETF_RSSHASH_IPV6_EX 0x200 /* Hash on IPv6 extended fields */ | ||
45 | #define VENETF_RSSHASH_TCPIPV6_EX 0x400 /* Hash on TCP + IPv6 ext. fields */ | ||
46 | |||
47 | #endif /* _VNIC_ENIC_H_ */ | ||
diff --git a/drivers/net/enic/vnic_intr.c b/drivers/net/enic/vnic_intr.c new file mode 100644 index 000000000000..ddc38f8f4656 --- /dev/null +++ b/drivers/net/enic/vnic_intr.c | |||
@@ -0,0 +1,62 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/errno.h> | ||
22 | #include <linux/types.h> | ||
23 | #include <linux/pci.h> | ||
24 | #include <linux/delay.h> | ||
25 | |||
26 | #include "vnic_dev.h" | ||
27 | #include "vnic_intr.h" | ||
28 | |||
29 | void vnic_intr_free(struct vnic_intr *intr) | ||
30 | { | ||
31 | intr->ctrl = NULL; | ||
32 | } | ||
33 | |||
34 | int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr, | ||
35 | unsigned int index) | ||
36 | { | ||
37 | intr->index = index; | ||
38 | intr->vdev = vdev; | ||
39 | |||
40 | intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index); | ||
41 | if (!intr->ctrl) { | ||
42 | printk(KERN_ERR "Failed to hook INTR[%d].ctrl resource\n", | ||
43 | index); | ||
44 | return -EINVAL; | ||
45 | } | ||
46 | |||
47 | return 0; | ||
48 | } | ||
49 | |||
50 | void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer, | ||
51 | unsigned int coalescing_type, unsigned int mask_on_assertion) | ||
52 | { | ||
53 | iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer); | ||
54 | iowrite32(coalescing_type, &intr->ctrl->coalescing_type); | ||
55 | iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion); | ||
56 | iowrite32(0, &intr->ctrl->int_credits); | ||
57 | } | ||
58 | |||
59 | void vnic_intr_clean(struct vnic_intr *intr) | ||
60 | { | ||
61 | iowrite32(0, &intr->ctrl->int_credits); | ||
62 | } | ||
diff --git a/drivers/net/enic/vnic_intr.h b/drivers/net/enic/vnic_intr.h new file mode 100644 index 000000000000..ccc408116af8 --- /dev/null +++ b/drivers/net/enic/vnic_intr.h | |||
@@ -0,0 +1,92 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #ifndef _VNIC_INTR_H_ | ||
21 | #define _VNIC_INTR_H_ | ||
22 | |||
23 | #include <linux/pci.h> | ||
24 | |||
25 | #include "vnic_dev.h" | ||
26 | |||
27 | #define VNIC_INTR_TIMER_MAX 0xffff | ||
28 | |||
29 | #define VNIC_INTR_TIMER_TYPE_ABS 0 | ||
30 | #define VNIC_INTR_TIMER_TYPE_QUIET 1 | ||
31 | |||
32 | /* Interrupt control */ | ||
33 | struct vnic_intr_ctrl { | ||
34 | u32 coalescing_timer; /* 0x00 */ | ||
35 | u32 pad0; | ||
36 | u32 coalescing_value; /* 0x08 */ | ||
37 | u32 pad1; | ||
38 | u32 coalescing_type; /* 0x10 */ | ||
39 | u32 pad2; | ||
40 | u32 mask_on_assertion; /* 0x18 */ | ||
41 | u32 pad3; | ||
42 | u32 mask; /* 0x20 */ | ||
43 | u32 pad4; | ||
44 | u32 int_credits; /* 0x28 */ | ||
45 | u32 pad5; | ||
46 | u32 int_credit_return; /* 0x30 */ | ||
47 | u32 pad6; | ||
48 | }; | ||
49 | |||
50 | struct vnic_intr { | ||
51 | unsigned int index; | ||
52 | struct vnic_dev *vdev; | ||
53 | struct vnic_intr_ctrl __iomem *ctrl; /* memory-mapped */ | ||
54 | }; | ||
55 | |||
56 | static inline void vnic_intr_unmask(struct vnic_intr *intr) | ||
57 | { | ||
58 | iowrite32(0, &intr->ctrl->mask); | ||
59 | } | ||
60 | |||
61 | static inline void vnic_intr_mask(struct vnic_intr *intr) | ||
62 | { | ||
63 | iowrite32(1, &intr->ctrl->mask); | ||
64 | } | ||
65 | |||
66 | static inline void vnic_intr_return_credits(struct vnic_intr *intr, | ||
67 | unsigned int credits, int unmask, int reset_timer) | ||
68 | { | ||
69 | #define VNIC_INTR_UNMASK_SHIFT 16 | ||
70 | #define VNIC_INTR_RESET_TIMER_SHIFT 17 | ||
71 | |||
72 | u32 int_credit_return = (credits & 0xffff) | | ||
73 | (unmask ? (1 << VNIC_INTR_UNMASK_SHIFT) : 0) | | ||
74 | (reset_timer ? (1 << VNIC_INTR_RESET_TIMER_SHIFT) : 0); | ||
75 | |||
76 | iowrite32(int_credit_return, &intr->ctrl->int_credit_return); | ||
77 | } | ||
78 | |||
79 | static inline u32 vnic_intr_legacy_pba(u32 __iomem *legacy_pba) | ||
80 | { | ||
81 | /* get and ack interrupt in one read (clear-and-ack-on-read) */ | ||
82 | return ioread32(legacy_pba); | ||
83 | } | ||
84 | |||
85 | void vnic_intr_free(struct vnic_intr *intr); | ||
86 | int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr, | ||
87 | unsigned int index); | ||
88 | void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer, | ||
89 | unsigned int coalescing_type, unsigned int mask_on_assertion); | ||
90 | void vnic_intr_clean(struct vnic_intr *intr); | ||
91 | |||
92 | #endif /* _VNIC_INTR_H_ */ | ||
diff --git a/drivers/net/enic/vnic_nic.h b/drivers/net/enic/vnic_nic.h new file mode 100644 index 000000000000..dadf26fae69a --- /dev/null +++ b/drivers/net/enic/vnic_nic.h | |||
@@ -0,0 +1,65 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #ifndef _VNIC_NIC_H_ | ||
21 | #define _VNIC_NIC_H_ | ||
22 | |||
23 | #define NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD 0xffUL | ||
24 | #define NIC_CFG_RSS_DEFAULT_CPU_SHIFT 0 | ||
25 | #define NIC_CFG_RSS_HASH_TYPE (0xffUL << 8) | ||
26 | #define NIC_CFG_RSS_HASH_TYPE_MASK_FIELD 0xffUL | ||
27 | #define NIC_CFG_RSS_HASH_TYPE_SHIFT 8 | ||
28 | #define NIC_CFG_RSS_HASH_BITS (7UL << 16) | ||
29 | #define NIC_CFG_RSS_HASH_BITS_MASK_FIELD 7UL | ||
30 | #define NIC_CFG_RSS_HASH_BITS_SHIFT 16 | ||
31 | #define NIC_CFG_RSS_BASE_CPU (7UL << 19) | ||
32 | #define NIC_CFG_RSS_BASE_CPU_MASK_FIELD 7UL | ||
33 | #define NIC_CFG_RSS_BASE_CPU_SHIFT 19 | ||
34 | #define NIC_CFG_RSS_ENABLE (1UL << 22) | ||
35 | #define NIC_CFG_RSS_ENABLE_MASK_FIELD 1UL | ||
36 | #define NIC_CFG_RSS_ENABLE_SHIFT 22 | ||
37 | #define NIC_CFG_TSO_IPID_SPLIT_EN (1UL << 23) | ||
38 | #define NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD 1UL | ||
39 | #define NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT 23 | ||
40 | #define NIC_CFG_IG_VLAN_STRIP_EN (1UL << 24) | ||
41 | #define NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD 1UL | ||
42 | #define NIC_CFG_IG_VLAN_STRIP_EN_SHIFT 24 | ||
43 | |||
44 | static inline void vnic_set_nic_cfg(u32 *nic_cfg, | ||
45 | u8 rss_default_cpu, u8 rss_hash_type, | ||
46 | u8 rss_hash_bits, u8 rss_base_cpu, | ||
47 | u8 rss_enable, u8 tso_ipid_split_en, | ||
48 | u8 ig_vlan_strip_en) | ||
49 | { | ||
50 | *nic_cfg = (rss_default_cpu & NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD) | | ||
51 | ((rss_hash_type & NIC_CFG_RSS_HASH_TYPE_MASK_FIELD) | ||
52 | << NIC_CFG_RSS_HASH_TYPE_SHIFT) | | ||
53 | ((rss_hash_bits & NIC_CFG_RSS_HASH_BITS_MASK_FIELD) | ||
54 | << NIC_CFG_RSS_HASH_BITS_SHIFT) | | ||
55 | ((rss_base_cpu & NIC_CFG_RSS_BASE_CPU_MASK_FIELD) | ||
56 | << NIC_CFG_RSS_BASE_CPU_SHIFT) | | ||
57 | ((rss_enable & NIC_CFG_RSS_ENABLE_MASK_FIELD) | ||
58 | << NIC_CFG_RSS_ENABLE_SHIFT) | | ||
59 | ((tso_ipid_split_en & NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD) | ||
60 | << NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT) | | ||
61 | ((ig_vlan_strip_en & NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD) | ||
62 | << NIC_CFG_IG_VLAN_STRIP_EN_SHIFT); | ||
63 | } | ||
64 | |||
65 | #endif /* _VNIC_NIC_H_ */ | ||
diff --git a/drivers/net/enic/vnic_resource.h b/drivers/net/enic/vnic_resource.h new file mode 100644 index 000000000000..144d2812f081 --- /dev/null +++ b/drivers/net/enic/vnic_resource.h | |||
@@ -0,0 +1,63 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #ifndef _VNIC_RESOURCE_H_ | ||
21 | #define _VNIC_RESOURCE_H_ | ||
22 | |||
23 | #define VNIC_RES_MAGIC 0x766E6963L /* 'vnic' */ | ||
24 | #define VNIC_RES_VERSION 0x00000000L | ||
25 | |||
26 | /* vNIC resource types */ | ||
27 | enum vnic_res_type { | ||
28 | RES_TYPE_EOL, /* End-of-list */ | ||
29 | RES_TYPE_WQ, /* Work queues */ | ||
30 | RES_TYPE_RQ, /* Receive queues */ | ||
31 | RES_TYPE_CQ, /* Completion queues */ | ||
32 | RES_TYPE_RSVD1, | ||
33 | RES_TYPE_NIC_CFG, /* Enet NIC config registers */ | ||
34 | RES_TYPE_RSVD2, | ||
35 | RES_TYPE_RSVD3, | ||
36 | RES_TYPE_RSVD4, | ||
37 | RES_TYPE_RSVD5, | ||
38 | RES_TYPE_INTR_CTRL, /* Interrupt ctrl table */ | ||
39 | RES_TYPE_INTR_TABLE, /* MSI/MSI-X Interrupt table */ | ||
40 | RES_TYPE_INTR_PBA, /* MSI/MSI-X PBA table */ | ||
41 | RES_TYPE_INTR_PBA_LEGACY, /* Legacy intr status, r2c */ | ||
42 | RES_TYPE_RSVD6, | ||
43 | RES_TYPE_RSVD7, | ||
44 | RES_TYPE_DEVCMD, /* Device command region */ | ||
45 | RES_TYPE_PASS_THRU_PAGE, /* Pass-thru page */ | ||
46 | |||
47 | RES_TYPE_MAX, /* Count of resource types */ | ||
48 | }; | ||
49 | |||
50 | struct vnic_resource_header { | ||
51 | u32 magic; | ||
52 | u32 version; | ||
53 | }; | ||
54 | |||
55 | struct vnic_resource { | ||
56 | u8 type; | ||
57 | u8 bar; | ||
58 | u8 pad[2]; | ||
59 | u32 bar_offset; | ||
60 | u32 count; | ||
61 | }; | ||
62 | |||
63 | #endif /* _VNIC_RESOURCE_H_ */ | ||
diff --git a/drivers/net/enic/vnic_rq.c b/drivers/net/enic/vnic_rq.c new file mode 100644 index 000000000000..9365e63e821a --- /dev/null +++ b/drivers/net/enic/vnic_rq.c | |||
@@ -0,0 +1,199 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/errno.h> | ||
22 | #include <linux/types.h> | ||
23 | #include <linux/pci.h> | ||
24 | #include <linux/delay.h> | ||
25 | |||
26 | #include "vnic_dev.h" | ||
27 | #include "vnic_rq.h" | ||
28 | |||
29 | static int vnic_rq_alloc_bufs(struct vnic_rq *rq) | ||
30 | { | ||
31 | struct vnic_rq_buf *buf; | ||
32 | struct vnic_dev *vdev; | ||
33 | unsigned int i, j, count = rq->ring.desc_count; | ||
34 | unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count); | ||
35 | |||
36 | vdev = rq->vdev; | ||
37 | |||
38 | for (i = 0; i < blks; i++) { | ||
39 | rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC); | ||
40 | if (!rq->bufs[i]) { | ||
41 | printk(KERN_ERR "Failed to alloc rq_bufs\n"); | ||
42 | return -ENOMEM; | ||
43 | } | ||
44 | } | ||
45 | |||
46 | for (i = 0; i < blks; i++) { | ||
47 | buf = rq->bufs[i]; | ||
48 | for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES; j++) { | ||
49 | buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES + j; | ||
50 | buf->desc = (u8 *)rq->ring.descs + | ||
51 | rq->ring.desc_size * buf->index; | ||
52 | if (buf->index + 1 == count) { | ||
53 | buf->next = rq->bufs[0]; | ||
54 | break; | ||
55 | } else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES) { | ||
56 | buf->next = rq->bufs[i + 1]; | ||
57 | } else { | ||
58 | buf->next = buf + 1; | ||
59 | buf++; | ||
60 | } | ||
61 | } | ||
62 | } | ||
63 | |||
64 | rq->to_use = rq->to_clean = rq->bufs[0]; | ||
65 | rq->buf_index = 0; | ||
66 | |||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | void vnic_rq_free(struct vnic_rq *rq) | ||
71 | { | ||
72 | struct vnic_dev *vdev; | ||
73 | unsigned int i; | ||
74 | |||
75 | vdev = rq->vdev; | ||
76 | |||
77 | vnic_dev_free_desc_ring(vdev, &rq->ring); | ||
78 | |||
79 | for (i = 0; i < VNIC_RQ_BUF_BLKS_MAX; i++) { | ||
80 | kfree(rq->bufs[i]); | ||
81 | rq->bufs[i] = NULL; | ||
82 | } | ||
83 | |||
84 | rq->ctrl = NULL; | ||
85 | } | ||
86 | |||
87 | int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index, | ||
88 | unsigned int desc_count, unsigned int desc_size) | ||
89 | { | ||
90 | int err; | ||
91 | |||
92 | rq->index = index; | ||
93 | rq->vdev = vdev; | ||
94 | |||
95 | rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index); | ||
96 | if (!rq->ctrl) { | ||
97 | printk(KERN_ERR "Failed to hook RQ[%d] resource\n", index); | ||
98 | return -EINVAL; | ||
99 | } | ||
100 | |||
101 | vnic_rq_disable(rq); | ||
102 | |||
103 | err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size); | ||
104 | if (err) | ||
105 | return err; | ||
106 | |||
107 | err = vnic_rq_alloc_bufs(rq); | ||
108 | if (err) { | ||
109 | vnic_rq_free(rq); | ||
110 | return err; | ||
111 | } | ||
112 | |||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, | ||
117 | unsigned int error_interrupt_enable, | ||
118 | unsigned int error_interrupt_offset) | ||
119 | { | ||
120 | u64 paddr; | ||
121 | u32 fetch_index; | ||
122 | |||
123 | paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET; | ||
124 | writeq(paddr, &rq->ctrl->ring_base); | ||
125 | iowrite32(rq->ring.desc_count, &rq->ctrl->ring_size); | ||
126 | iowrite32(cq_index, &rq->ctrl->cq_index); | ||
127 | iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable); | ||
128 | iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset); | ||
129 | iowrite32(0, &rq->ctrl->dropped_packet_count); | ||
130 | iowrite32(0, &rq->ctrl->error_status); | ||
131 | |||
132 | /* Use current fetch_index as the ring starting point */ | ||
133 | fetch_index = ioread32(&rq->ctrl->fetch_index); | ||
134 | rq->to_use = rq->to_clean = | ||
135 | &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES] | ||
136 | [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES]; | ||
137 | iowrite32(fetch_index, &rq->ctrl->posted_index); | ||
138 | |||
139 | rq->buf_index = 0; | ||
140 | } | ||
141 | |||
142 | unsigned int vnic_rq_error_status(struct vnic_rq *rq) | ||
143 | { | ||
144 | return ioread32(&rq->ctrl->error_status); | ||
145 | } | ||
146 | |||
147 | void vnic_rq_enable(struct vnic_rq *rq) | ||
148 | { | ||
149 | iowrite32(1, &rq->ctrl->enable); | ||
150 | } | ||
151 | |||
152 | int vnic_rq_disable(struct vnic_rq *rq) | ||
153 | { | ||
154 | unsigned int wait; | ||
155 | |||
156 | iowrite32(0, &rq->ctrl->enable); | ||
157 | |||
158 | /* Wait for HW to ACK disable request */ | ||
159 | for (wait = 0; wait < 100; wait++) { | ||
160 | if (!(ioread32(&rq->ctrl->running))) | ||
161 | return 0; | ||
162 | udelay(1); | ||
163 | } | ||
164 | |||
165 | printk(KERN_ERR "Failed to disable RQ[%d]\n", rq->index); | ||
166 | |||
167 | return -ETIMEDOUT; | ||
168 | } | ||
169 | |||
170 | void vnic_rq_clean(struct vnic_rq *rq, | ||
171 | void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf)) | ||
172 | { | ||
173 | struct vnic_rq_buf *buf; | ||
174 | u32 fetch_index; | ||
175 | |||
176 | BUG_ON(ioread32(&rq->ctrl->enable)); | ||
177 | |||
178 | buf = rq->to_clean; | ||
179 | |||
180 | while (vnic_rq_desc_used(rq) > 0) { | ||
181 | |||
182 | (*buf_clean)(rq, buf); | ||
183 | |||
184 | buf = rq->to_clean = buf->next; | ||
185 | rq->ring.desc_avail++; | ||
186 | } | ||
187 | |||
188 | /* Use current fetch_index as the ring starting point */ | ||
189 | fetch_index = ioread32(&rq->ctrl->fetch_index); | ||
190 | rq->to_use = rq->to_clean = | ||
191 | &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES] | ||
192 | [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES]; | ||
193 | iowrite32(fetch_index, &rq->ctrl->posted_index); | ||
194 | |||
195 | rq->buf_index = 0; | ||
196 | |||
197 | vnic_dev_clear_desc_ring(&rq->ring); | ||
198 | } | ||
199 | |||
diff --git a/drivers/net/enic/vnic_rq.h b/drivers/net/enic/vnic_rq.h new file mode 100644 index 000000000000..82bfca67cc4d --- /dev/null +++ b/drivers/net/enic/vnic_rq.h | |||
@@ -0,0 +1,204 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #ifndef _VNIC_RQ_H_ | ||
21 | #define _VNIC_RQ_H_ | ||
22 | |||
23 | #include <linux/pci.h> | ||
24 | |||
25 | #include "vnic_dev.h" | ||
26 | #include "vnic_cq.h" | ||
27 | |||
28 | /* Receive queue control */ | ||
29 | struct vnic_rq_ctrl { | ||
30 | u64 ring_base; /* 0x00 */ | ||
31 | u32 ring_size; /* 0x08 */ | ||
32 | u32 pad0; | ||
33 | u32 posted_index; /* 0x10 */ | ||
34 | u32 pad1; | ||
35 | u32 cq_index; /* 0x18 */ | ||
36 | u32 pad2; | ||
37 | u32 enable; /* 0x20 */ | ||
38 | u32 pad3; | ||
39 | u32 running; /* 0x28 */ | ||
40 | u32 pad4; | ||
41 | u32 fetch_index; /* 0x30 */ | ||
42 | u32 pad5; | ||
43 | u32 error_interrupt_enable; /* 0x38 */ | ||
44 | u32 pad6; | ||
45 | u32 error_interrupt_offset; /* 0x40 */ | ||
46 | u32 pad7; | ||
47 | u32 error_status; /* 0x48 */ | ||
48 | u32 pad8; | ||
49 | u32 dropped_packet_count; /* 0x50 */ | ||
50 | u32 pad9; | ||
51 | u32 dropped_packet_count_rc; /* 0x58 */ | ||
52 | u32 pad10; | ||
53 | }; | ||
54 | |||
55 | /* Break the vnic_rq_buf allocations into blocks of 64 entries */ | ||
56 | #define VNIC_RQ_BUF_BLK_ENTRIES 64 | ||
57 | #define VNIC_RQ_BUF_BLK_SZ \ | ||
58 | (VNIC_RQ_BUF_BLK_ENTRIES * sizeof(struct vnic_rq_buf)) | ||
59 | #define VNIC_RQ_BUF_BLKS_NEEDED(entries) \ | ||
60 | DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES) | ||
61 | #define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096) | ||
62 | |||
63 | struct vnic_rq_buf { | ||
64 | struct vnic_rq_buf *next; | ||
65 | dma_addr_t dma_addr; | ||
66 | void *os_buf; | ||
67 | unsigned int os_buf_index; | ||
68 | unsigned int len; | ||
69 | unsigned int index; | ||
70 | void *desc; | ||
71 | }; | ||
72 | |||
73 | struct vnic_rq { | ||
74 | unsigned int index; | ||
75 | struct vnic_dev *vdev; | ||
76 | struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */ | ||
77 | struct vnic_dev_ring ring; | ||
78 | struct vnic_rq_buf *bufs[VNIC_RQ_BUF_BLKS_MAX]; | ||
79 | struct vnic_rq_buf *to_use; | ||
80 | struct vnic_rq_buf *to_clean; | ||
81 | void *os_buf_head; | ||
82 | unsigned int buf_index; | ||
83 | unsigned int pkts_outstanding; | ||
84 | }; | ||
85 | |||
86 | static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) | ||
87 | { | ||
88 | /* how many does SW own? */ | ||
89 | return rq->ring.desc_avail; | ||
90 | } | ||
91 | |||
92 | static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) | ||
93 | { | ||
94 | /* how many does HW own? */ | ||
95 | return rq->ring.desc_count - rq->ring.desc_avail - 1; | ||
96 | } | ||
97 | |||
98 | static inline void *vnic_rq_next_desc(struct vnic_rq *rq) | ||
99 | { | ||
100 | return rq->to_use->desc; | ||
101 | } | ||
102 | |||
103 | static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) | ||
104 | { | ||
105 | return rq->to_use->index; | ||
106 | } | ||
107 | |||
108 | static inline unsigned int vnic_rq_next_buf_index(struct vnic_rq *rq) | ||
109 | { | ||
110 | return rq->buf_index++; | ||
111 | } | ||
112 | |||
113 | static inline void vnic_rq_post(struct vnic_rq *rq, | ||
114 | void *os_buf, unsigned int os_buf_index, | ||
115 | dma_addr_t dma_addr, unsigned int len) | ||
116 | { | ||
117 | struct vnic_rq_buf *buf = rq->to_use; | ||
118 | |||
119 | buf->os_buf = os_buf; | ||
120 | buf->os_buf_index = os_buf_index; | ||
121 | buf->dma_addr = dma_addr; | ||
122 | buf->len = len; | ||
123 | |||
124 | buf = buf->next; | ||
125 | rq->to_use = buf; | ||
126 | rq->ring.desc_avail--; | ||
127 | |||
128 | /* Move the posted_index every nth descriptor | ||
129 | */ | ||
130 | |||
131 | #ifndef VNIC_RQ_RETURN_RATE | ||
132 | #define VNIC_RQ_RETURN_RATE 0xf /* keep 2^n - 1 */ | ||
133 | #endif | ||
134 | |||
135 | if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) | ||
136 | iowrite32(buf->index, &rq->ctrl->posted_index); | ||
137 | } | ||
138 | |||
139 | static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count) | ||
140 | { | ||
141 | rq->ring.desc_avail += count; | ||
142 | } | ||
143 | |||
144 | enum desc_return_options { | ||
145 | VNIC_RQ_RETURN_DESC, | ||
146 | VNIC_RQ_DEFER_RETURN_DESC, | ||
147 | }; | ||
148 | |||
149 | static inline void vnic_rq_service(struct vnic_rq *rq, | ||
150 | struct cq_desc *cq_desc, u16 completed_index, | ||
151 | int desc_return, void (*buf_service)(struct vnic_rq *rq, | ||
152 | struct cq_desc *cq_desc, struct vnic_rq_buf *buf, | ||
153 | int skipped, void *opaque), void *opaque) | ||
154 | { | ||
155 | struct vnic_rq_buf *buf; | ||
156 | int skipped; | ||
157 | |||
158 | buf = rq->to_clean; | ||
159 | while (1) { | ||
160 | |||
161 | skipped = (buf->index != completed_index); | ||
162 | |||
163 | (*buf_service)(rq, cq_desc, buf, skipped, opaque); | ||
164 | |||
165 | if (desc_return == VNIC_RQ_RETURN_DESC) | ||
166 | rq->ring.desc_avail++; | ||
167 | |||
168 | rq->to_clean = buf->next; | ||
169 | |||
170 | if (!skipped) | ||
171 | break; | ||
172 | |||
173 | buf = rq->to_clean; | ||
174 | } | ||
175 | } | ||
176 | |||
177 | static inline int vnic_rq_fill(struct vnic_rq *rq, | ||
178 | int (*buf_fill)(struct vnic_rq *rq)) | ||
179 | { | ||
180 | int err; | ||
181 | |||
182 | while (vnic_rq_desc_avail(rq) > 1) { | ||
183 | |||
184 | err = (*buf_fill)(rq); | ||
185 | if (err) | ||
186 | return err; | ||
187 | } | ||
188 | |||
189 | return 0; | ||
190 | } | ||
191 | |||
192 | void vnic_rq_free(struct vnic_rq *rq); | ||
193 | int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index, | ||
194 | unsigned int desc_count, unsigned int desc_size); | ||
195 | void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, | ||
196 | unsigned int error_interrupt_enable, | ||
197 | unsigned int error_interrupt_offset); | ||
198 | unsigned int vnic_rq_error_status(struct vnic_rq *rq); | ||
199 | void vnic_rq_enable(struct vnic_rq *rq); | ||
200 | int vnic_rq_disable(struct vnic_rq *rq); | ||
201 | void vnic_rq_clean(struct vnic_rq *rq, | ||
202 | void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf)); | ||
203 | |||
204 | #endif /* _VNIC_RQ_H_ */ | ||
diff --git a/drivers/net/enic/vnic_rss.h b/drivers/net/enic/vnic_rss.h new file mode 100644 index 000000000000..e325d65d7c34 --- /dev/null +++ b/drivers/net/enic/vnic_rss.h | |||
@@ -0,0 +1,32 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | */ | ||
5 | |||
6 | #ifndef _VNIC_RSS_H_ | ||
7 | #define _VNIC_RSS_H_ | ||
8 | |||
9 | /* RSS key array */ | ||
10 | union vnic_rss_key { | ||
11 | struct { | ||
12 | u8 b[10]; | ||
13 | u8 b_pad[6]; | ||
14 | } key[4]; | ||
15 | u64 raw[8]; | ||
16 | }; | ||
17 | |||
18 | /* RSS cpu array */ | ||
19 | union vnic_rss_cpu { | ||
20 | struct { | ||
21 | u8 b[4] ; | ||
22 | u8 b_pad[4]; | ||
23 | } cpu[32]; | ||
24 | u64 raw[32]; | ||
25 | }; | ||
26 | |||
27 | void vnic_set_rss_key(union vnic_rss_key *rss_key, u8 *key); | ||
28 | void vnic_set_rss_cpu(union vnic_rss_cpu *rss_cpu, u8 *cpu); | ||
29 | void vnic_get_rss_key(union vnic_rss_key *rss_key, u8 *key); | ||
30 | void vnic_get_rss_cpu(union vnic_rss_cpu *rss_cpu, u8 *cpu); | ||
31 | |||
32 | #endif /* _VNIC_RSS_H_ */ | ||
diff --git a/drivers/net/enic/vnic_stats.h b/drivers/net/enic/vnic_stats.h new file mode 100644 index 000000000000..9ff9614d89b1 --- /dev/null +++ b/drivers/net/enic/vnic_stats.h | |||
@@ -0,0 +1,70 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #ifndef _VNIC_STATS_H_ | ||
21 | #define _VNIC_STATS_H_ | ||
22 | |||
23 | /* Tx statistics */ | ||
24 | struct vnic_tx_stats { | ||
25 | u64 tx_frames_ok; | ||
26 | u64 tx_unicast_frames_ok; | ||
27 | u64 tx_multicast_frames_ok; | ||
28 | u64 tx_broadcast_frames_ok; | ||
29 | u64 tx_bytes_ok; | ||
30 | u64 tx_unicast_bytes_ok; | ||
31 | u64 tx_multicast_bytes_ok; | ||
32 | u64 tx_broadcast_bytes_ok; | ||
33 | u64 tx_drops; | ||
34 | u64 tx_errors; | ||
35 | u64 tx_tso; | ||
36 | u64 rsvd[16]; | ||
37 | }; | ||
38 | |||
39 | /* Rx statistics */ | ||
40 | struct vnic_rx_stats { | ||
41 | u64 rx_frames_ok; | ||
42 | u64 rx_frames_total; | ||
43 | u64 rx_unicast_frames_ok; | ||
44 | u64 rx_multicast_frames_ok; | ||
45 | u64 rx_broadcast_frames_ok; | ||
46 | u64 rx_bytes_ok; | ||
47 | u64 rx_unicast_bytes_ok; | ||
48 | u64 rx_multicast_bytes_ok; | ||
49 | u64 rx_broadcast_bytes_ok; | ||
50 | u64 rx_drop; | ||
51 | u64 rx_no_bufs; | ||
52 | u64 rx_errors; | ||
53 | u64 rx_rss; | ||
54 | u64 rx_crc_errors; | ||
55 | u64 rx_frames_64; | ||
56 | u64 rx_frames_127; | ||
57 | u64 rx_frames_255; | ||
58 | u64 rx_frames_511; | ||
59 | u64 rx_frames_1023; | ||
60 | u64 rx_frames_1518; | ||
61 | u64 rx_frames_to_max; | ||
62 | u64 rsvd[16]; | ||
63 | }; | ||
64 | |||
65 | struct vnic_stats { | ||
66 | struct vnic_tx_stats tx; | ||
67 | struct vnic_rx_stats rx; | ||
68 | }; | ||
69 | |||
70 | #endif /* _VNIC_STATS_H_ */ | ||
diff --git a/drivers/net/enic/vnic_wq.c b/drivers/net/enic/vnic_wq.c new file mode 100644 index 000000000000..a576d04708ef --- /dev/null +++ b/drivers/net/enic/vnic_wq.c | |||
@@ -0,0 +1,184 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/errno.h> | ||
22 | #include <linux/types.h> | ||
23 | #include <linux/pci.h> | ||
24 | #include <linux/delay.h> | ||
25 | |||
26 | #include "vnic_dev.h" | ||
27 | #include "vnic_wq.h" | ||
28 | |||
29 | static int vnic_wq_alloc_bufs(struct vnic_wq *wq) | ||
30 | { | ||
31 | struct vnic_wq_buf *buf; | ||
32 | struct vnic_dev *vdev; | ||
33 | unsigned int i, j, count = wq->ring.desc_count; | ||
34 | unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count); | ||
35 | |||
36 | vdev = wq->vdev; | ||
37 | |||
38 | for (i = 0; i < blks; i++) { | ||
39 | wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC); | ||
40 | if (!wq->bufs[i]) { | ||
41 | printk(KERN_ERR "Failed to alloc wq_bufs\n"); | ||
42 | return -ENOMEM; | ||
43 | } | ||
44 | } | ||
45 | |||
46 | for (i = 0; i < blks; i++) { | ||
47 | buf = wq->bufs[i]; | ||
48 | for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES; j++) { | ||
49 | buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES + j; | ||
50 | buf->desc = (u8 *)wq->ring.descs + | ||
51 | wq->ring.desc_size * buf->index; | ||
52 | if (buf->index + 1 == count) { | ||
53 | buf->next = wq->bufs[0]; | ||
54 | break; | ||
55 | } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES) { | ||
56 | buf->next = wq->bufs[i + 1]; | ||
57 | } else { | ||
58 | buf->next = buf + 1; | ||
59 | buf++; | ||
60 | } | ||
61 | } | ||
62 | } | ||
63 | |||
64 | wq->to_use = wq->to_clean = wq->bufs[0]; | ||
65 | |||
66 | return 0; | ||
67 | } | ||
68 | |||
69 | void vnic_wq_free(struct vnic_wq *wq) | ||
70 | { | ||
71 | struct vnic_dev *vdev; | ||
72 | unsigned int i; | ||
73 | |||
74 | vdev = wq->vdev; | ||
75 | |||
76 | vnic_dev_free_desc_ring(vdev, &wq->ring); | ||
77 | |||
78 | for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) { | ||
79 | kfree(wq->bufs[i]); | ||
80 | wq->bufs[i] = NULL; | ||
81 | } | ||
82 | |||
83 | wq->ctrl = NULL; | ||
84 | } | ||
85 | |||
86 | int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, | ||
87 | unsigned int desc_count, unsigned int desc_size) | ||
88 | { | ||
89 | int err; | ||
90 | |||
91 | wq->index = index; | ||
92 | wq->vdev = vdev; | ||
93 | |||
94 | wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index); | ||
95 | if (!wq->ctrl) { | ||
96 | printk(KERN_ERR "Failed to hook WQ[%d] resource\n", index); | ||
97 | return -EINVAL; | ||
98 | } | ||
99 | |||
100 | vnic_wq_disable(wq); | ||
101 | |||
102 | err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); | ||
103 | if (err) | ||
104 | return err; | ||
105 | |||
106 | err = vnic_wq_alloc_bufs(wq); | ||
107 | if (err) { | ||
108 | vnic_wq_free(wq); | ||
109 | return err; | ||
110 | } | ||
111 | |||
112 | return 0; | ||
113 | } | ||
114 | |||
115 | void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, | ||
116 | unsigned int error_interrupt_enable, | ||
117 | unsigned int error_interrupt_offset) | ||
118 | { | ||
119 | u64 paddr; | ||
120 | |||
121 | paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET; | ||
122 | writeq(paddr, &wq->ctrl->ring_base); | ||
123 | iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size); | ||
124 | iowrite32(0, &wq->ctrl->fetch_index); | ||
125 | iowrite32(0, &wq->ctrl->posted_index); | ||
126 | iowrite32(cq_index, &wq->ctrl->cq_index); | ||
127 | iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable); | ||
128 | iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset); | ||
129 | iowrite32(0, &wq->ctrl->error_status); | ||
130 | } | ||
131 | |||
132 | unsigned int vnic_wq_error_status(struct vnic_wq *wq) | ||
133 | { | ||
134 | return ioread32(&wq->ctrl->error_status); | ||
135 | } | ||
136 | |||
137 | void vnic_wq_enable(struct vnic_wq *wq) | ||
138 | { | ||
139 | iowrite32(1, &wq->ctrl->enable); | ||
140 | } | ||
141 | |||
142 | int vnic_wq_disable(struct vnic_wq *wq) | ||
143 | { | ||
144 | unsigned int wait; | ||
145 | |||
146 | iowrite32(0, &wq->ctrl->enable); | ||
147 | |||
148 | /* Wait for HW to ACK disable request */ | ||
149 | for (wait = 0; wait < 100; wait++) { | ||
150 | if (!(ioread32(&wq->ctrl->running))) | ||
151 | return 0; | ||
152 | udelay(1); | ||
153 | } | ||
154 | |||
155 | printk(KERN_ERR "Failed to disable WQ[%d]\n", wq->index); | ||
156 | |||
157 | return -ETIMEDOUT; | ||
158 | } | ||
159 | |||
160 | void vnic_wq_clean(struct vnic_wq *wq, | ||
161 | void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)) | ||
162 | { | ||
163 | struct vnic_wq_buf *buf; | ||
164 | |||
165 | BUG_ON(ioread32(&wq->ctrl->enable)); | ||
166 | |||
167 | buf = wq->to_clean; | ||
168 | |||
169 | while (vnic_wq_desc_used(wq) > 0) { | ||
170 | |||
171 | (*buf_clean)(wq, buf); | ||
172 | |||
173 | buf = wq->to_clean = buf->next; | ||
174 | wq->ring.desc_avail++; | ||
175 | } | ||
176 | |||
177 | wq->to_use = wq->to_clean = wq->bufs[0]; | ||
178 | |||
179 | iowrite32(0, &wq->ctrl->fetch_index); | ||
180 | iowrite32(0, &wq->ctrl->posted_index); | ||
181 | iowrite32(0, &wq->ctrl->error_status); | ||
182 | |||
183 | vnic_dev_clear_desc_ring(&wq->ring); | ||
184 | } | ||
diff --git a/drivers/net/enic/vnic_wq.h b/drivers/net/enic/vnic_wq.h new file mode 100644 index 000000000000..7081828d8a42 --- /dev/null +++ b/drivers/net/enic/vnic_wq.h | |||
@@ -0,0 +1,154 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #ifndef _VNIC_WQ_H_ | ||
21 | #define _VNIC_WQ_H_ | ||
22 | |||
23 | #include <linux/pci.h> | ||
24 | |||
25 | #include "vnic_dev.h" | ||
26 | #include "vnic_cq.h" | ||
27 | |||
28 | /* Work queue control */ | ||
29 | struct vnic_wq_ctrl { | ||
30 | u64 ring_base; /* 0x00 */ | ||
31 | u32 ring_size; /* 0x08 */ | ||
32 | u32 pad0; | ||
33 | u32 posted_index; /* 0x10 */ | ||
34 | u32 pad1; | ||
35 | u32 cq_index; /* 0x18 */ | ||
36 | u32 pad2; | ||
37 | u32 enable; /* 0x20 */ | ||
38 | u32 pad3; | ||
39 | u32 running; /* 0x28 */ | ||
40 | u32 pad4; | ||
41 | u32 fetch_index; /* 0x30 */ | ||
42 | u32 pad5; | ||
43 | u32 dca_value; /* 0x38 */ | ||
44 | u32 pad6; | ||
45 | u32 error_interrupt_enable; /* 0x40 */ | ||
46 | u32 pad7; | ||
47 | u32 error_interrupt_offset; /* 0x48 */ | ||
48 | u32 pad8; | ||
49 | u32 error_status; /* 0x50 */ | ||
50 | u32 pad9; | ||
51 | }; | ||
52 | |||
53 | struct vnic_wq_buf { | ||
54 | struct vnic_wq_buf *next; | ||
55 | dma_addr_t dma_addr; | ||
56 | void *os_buf; | ||
57 | unsigned int len; | ||
58 | unsigned int index; | ||
59 | int sop; | ||
60 | void *desc; | ||
61 | }; | ||
62 | |||
63 | /* Break the vnic_wq_buf allocations into blocks of 64 entries */ | ||
64 | #define VNIC_WQ_BUF_BLK_ENTRIES 64 | ||
65 | #define VNIC_WQ_BUF_BLK_SZ \ | ||
66 | (VNIC_WQ_BUF_BLK_ENTRIES * sizeof(struct vnic_wq_buf)) | ||
67 | #define VNIC_WQ_BUF_BLKS_NEEDED(entries) \ | ||
68 | DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES) | ||
69 | #define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096) | ||
70 | |||
71 | struct vnic_wq { | ||
72 | unsigned int index; | ||
73 | struct vnic_dev *vdev; | ||
74 | struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */ | ||
75 | struct vnic_dev_ring ring; | ||
76 | struct vnic_wq_buf *bufs[VNIC_WQ_BUF_BLKS_MAX]; | ||
77 | struct vnic_wq_buf *to_use; | ||
78 | struct vnic_wq_buf *to_clean; | ||
79 | unsigned int pkts_outstanding; | ||
80 | }; | ||
81 | |||
82 | static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) | ||
83 | { | ||
84 | /* how many does SW own? */ | ||
85 | return wq->ring.desc_avail; | ||
86 | } | ||
87 | |||
88 | static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq) | ||
89 | { | ||
90 | /* how many does HW own? */ | ||
91 | return wq->ring.desc_count - wq->ring.desc_avail - 1; | ||
92 | } | ||
93 | |||
94 | static inline void *vnic_wq_next_desc(struct vnic_wq *wq) | ||
95 | { | ||
96 | return wq->to_use->desc; | ||
97 | } | ||
98 | |||
99 | static inline void vnic_wq_post(struct vnic_wq *wq, | ||
100 | void *os_buf, dma_addr_t dma_addr, | ||
101 | unsigned int len, int sop, int eop) | ||
102 | { | ||
103 | struct vnic_wq_buf *buf = wq->to_use; | ||
104 | |||
105 | buf->sop = sop; | ||
106 | buf->os_buf = eop ? os_buf : NULL; | ||
107 | buf->dma_addr = dma_addr; | ||
108 | buf->len = len; | ||
109 | |||
110 | buf = buf->next; | ||
111 | if (eop) | ||
112 | iowrite32(buf->index, &wq->ctrl->posted_index); | ||
113 | wq->to_use = buf; | ||
114 | |||
115 | wq->ring.desc_avail--; | ||
116 | } | ||
117 | |||
118 | static inline void vnic_wq_service(struct vnic_wq *wq, | ||
119 | struct cq_desc *cq_desc, u16 completed_index, | ||
120 | void (*buf_service)(struct vnic_wq *wq, | ||
121 | struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque), | ||
122 | void *opaque) | ||
123 | { | ||
124 | struct vnic_wq_buf *buf; | ||
125 | |||
126 | buf = wq->to_clean; | ||
127 | while (1) { | ||
128 | |||
129 | (*buf_service)(wq, cq_desc, buf, opaque); | ||
130 | |||
131 | wq->ring.desc_avail++; | ||
132 | |||
133 | wq->to_clean = buf->next; | ||
134 | |||
135 | if (buf->index == completed_index) | ||
136 | break; | ||
137 | |||
138 | buf = wq->to_clean; | ||
139 | } | ||
140 | } | ||
141 | |||
142 | void vnic_wq_free(struct vnic_wq *wq); | ||
143 | int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, | ||
144 | unsigned int desc_count, unsigned int desc_size); | ||
145 | void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, | ||
146 | unsigned int error_interrupt_enable, | ||
147 | unsigned int error_interrupt_offset); | ||
148 | unsigned int vnic_wq_error_status(struct vnic_wq *wq); | ||
149 | void vnic_wq_enable(struct vnic_wq *wq); | ||
150 | int vnic_wq_disable(struct vnic_wq *wq); | ||
151 | void vnic_wq_clean(struct vnic_wq *wq, | ||
152 | void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)); | ||
153 | |||
154 | #endif /* _VNIC_WQ_H_ */ | ||
diff --git a/drivers/net/enic/wq_enet_desc.h b/drivers/net/enic/wq_enet_desc.h new file mode 100644 index 000000000000..483596c2d8bf --- /dev/null +++ b/drivers/net/enic/wq_enet_desc.h | |||
@@ -0,0 +1,98 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #ifndef _WQ_ENET_DESC_H_ | ||
21 | #define _WQ_ENET_DESC_H_ | ||
22 | |||
23 | /* Ethernet work queue descriptor: 16B */ | ||
24 | struct wq_enet_desc { | ||
25 | __le64 address; | ||
26 | __le16 length; | ||
27 | __le16 mss_loopback; | ||
28 | __le16 header_length_flags; | ||
29 | __le16 vlan_tag; | ||
30 | }; | ||
31 | |||
32 | #define WQ_ENET_ADDR_BITS 64 | ||
33 | #define WQ_ENET_LEN_BITS 14 | ||
34 | #define WQ_ENET_LEN_MASK ((1 << WQ_ENET_LEN_BITS) - 1) | ||
35 | #define WQ_ENET_MSS_BITS 14 | ||
36 | #define WQ_ENET_MSS_MASK ((1 << WQ_ENET_MSS_BITS) - 1) | ||
37 | #define WQ_ENET_MSS_SHIFT 2 | ||
38 | #define WQ_ENET_LOOPBACK_SHIFT 1 | ||
39 | #define WQ_ENET_HDRLEN_BITS 10 | ||
40 | #define WQ_ENET_HDRLEN_MASK ((1 << WQ_ENET_HDRLEN_BITS) - 1) | ||
41 | #define WQ_ENET_FLAGS_OM_BITS 2 | ||
42 | #define WQ_ENET_FLAGS_OM_MASK ((1 << WQ_ENET_FLAGS_OM_BITS) - 1) | ||
43 | #define WQ_ENET_FLAGS_EOP_SHIFT 12 | ||
44 | #define WQ_ENET_FLAGS_CQ_ENTRY_SHIFT 13 | ||
45 | #define WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT 14 | ||
46 | #define WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT 15 | ||
47 | |||
48 | #define WQ_ENET_OFFLOAD_MODE_CSUM 0 | ||
49 | #define WQ_ENET_OFFLOAD_MODE_RESERVED 1 | ||
50 | #define WQ_ENET_OFFLOAD_MODE_CSUM_L4 2 | ||
51 | #define WQ_ENET_OFFLOAD_MODE_TSO 3 | ||
52 | |||
53 | static inline void wq_enet_desc_enc(struct wq_enet_desc *desc, | ||
54 | u64 address, u16 length, u16 mss, u16 header_length, | ||
55 | u8 offload_mode, u8 eop, u8 cq_entry, u8 fcoe_encap, | ||
56 | u8 vlan_tag_insert, u16 vlan_tag, u8 loopback) | ||
57 | { | ||
58 | desc->address = cpu_to_le64(address); | ||
59 | desc->length = cpu_to_le16(length & WQ_ENET_LEN_MASK); | ||
60 | desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) << | ||
61 | WQ_ENET_MSS_SHIFT | (loopback & 1) << WQ_ENET_LOOPBACK_SHIFT); | ||
62 | desc->header_length_flags = cpu_to_le16( | ||
63 | (header_length & WQ_ENET_HDRLEN_MASK) | | ||
64 | (offload_mode & WQ_ENET_FLAGS_OM_MASK) << WQ_ENET_HDRLEN_BITS | | ||
65 | (eop & 1) << WQ_ENET_FLAGS_EOP_SHIFT | | ||
66 | (cq_entry & 1) << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT | | ||
67 | (fcoe_encap & 1) << WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT | | ||
68 | (vlan_tag_insert & 1) << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT); | ||
69 | desc->vlan_tag = cpu_to_le16(vlan_tag); | ||
70 | } | ||
71 | |||
72 | static inline void wq_enet_desc_dec(struct wq_enet_desc *desc, | ||
73 | u64 *address, u16 *length, u16 *mss, u16 *header_length, | ||
74 | u8 *offload_mode, u8 *eop, u8 *cq_entry, u8 *fcoe_encap, | ||
75 | u8 *vlan_tag_insert, u16 *vlan_tag, u8 *loopback) | ||
76 | { | ||
77 | *address = le64_to_cpu(desc->address); | ||
78 | *length = le16_to_cpu(desc->length) & WQ_ENET_LEN_MASK; | ||
79 | *mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) & | ||
80 | WQ_ENET_MSS_MASK; | ||
81 | *loopback = (u8)((le16_to_cpu(desc->mss_loopback) >> | ||
82 | WQ_ENET_LOOPBACK_SHIFT) & 1); | ||
83 | *header_length = le16_to_cpu(desc->header_length_flags) & | ||
84 | WQ_ENET_HDRLEN_MASK; | ||
85 | *offload_mode = (u8)((le16_to_cpu(desc->header_length_flags) >> | ||
86 | WQ_ENET_HDRLEN_BITS) & WQ_ENET_FLAGS_OM_MASK); | ||
87 | *eop = (u8)((le16_to_cpu(desc->header_length_flags) >> | ||
88 | WQ_ENET_FLAGS_EOP_SHIFT) & 1); | ||
89 | *cq_entry = (u8)((le16_to_cpu(desc->header_length_flags) >> | ||
90 | WQ_ENET_FLAGS_CQ_ENTRY_SHIFT) & 1); | ||
91 | *fcoe_encap = (u8)((le16_to_cpu(desc->header_length_flags) >> | ||
92 | WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT) & 1); | ||
93 | *vlan_tag_insert = (u8)((le16_to_cpu(desc->header_length_flags) >> | ||
94 | WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT) & 1); | ||
95 | *vlan_tag = le16_to_cpu(desc->vlan_tag); | ||
96 | } | ||
97 | |||
98 | #endif /* _WQ_ENET_DESC_H_ */ | ||
diff --git a/drivers/net/jme.c b/drivers/net/jme.c new file mode 100644 index 000000000000..f292df557544 --- /dev/null +++ b/drivers/net/jme.c | |||
@@ -0,0 +1,3019 @@ | |||
1 | /* | ||
2 | * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver | ||
3 | * | ||
4 | * Copyright 2008 JMicron Technology Corporation | ||
5 | * http://www.jmicron.com/ | ||
6 | * | ||
7 | * Author: Guo-Fu Tseng <cooldavid@cooldavid.org> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation; either version 2 of the License. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #include <linux/version.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <linux/kernel.h> | ||
27 | #include <linux/pci.h> | ||
28 | #include <linux/netdevice.h> | ||
29 | #include <linux/etherdevice.h> | ||
30 | #include <linux/ethtool.h> | ||
31 | #include <linux/mii.h> | ||
32 | #include <linux/crc32.h> | ||
33 | #include <linux/delay.h> | ||
34 | #include <linux/spinlock.h> | ||
35 | #include <linux/in.h> | ||
36 | #include <linux/ip.h> | ||
37 | #include <linux/ipv6.h> | ||
38 | #include <linux/tcp.h> | ||
39 | #include <linux/udp.h> | ||
40 | #include <linux/if_vlan.h> | ||
41 | #include "jme.h" | ||
42 | |||
43 | static int force_pseudohp = -1; | ||
44 | static int no_pseudohp = -1; | ||
45 | static int no_extplug = -1; | ||
46 | module_param(force_pseudohp, int, 0); | ||
47 | MODULE_PARM_DESC(force_pseudohp, | ||
48 | "Enable pseudo hot-plug feature manually by driver instead of BIOS."); | ||
49 | module_param(no_pseudohp, int, 0); | ||
50 | MODULE_PARM_DESC(no_pseudohp, "Disable pseudo hot-plug feature."); | ||
51 | module_param(no_extplug, int, 0); | ||
52 | MODULE_PARM_DESC(no_extplug, | ||
53 | "Do not use external plug signal for pseudo hot-plug."); | ||
54 | |||
55 | static int | ||
56 | jme_mdio_read(struct net_device *netdev, int phy, int reg) | ||
57 | { | ||
58 | struct jme_adapter *jme = netdev_priv(netdev); | ||
59 | int i, val, again = (reg == MII_BMSR) ? 1 : 0; | ||
60 | |||
61 | read_again: | ||
62 | jwrite32(jme, JME_SMI, SMI_OP_REQ | | ||
63 | smi_phy_addr(phy) | | ||
64 | smi_reg_addr(reg)); | ||
65 | |||
66 | wmb(); | ||
67 | for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) { | ||
68 | udelay(20); | ||
69 | val = jread32(jme, JME_SMI); | ||
70 | if ((val & SMI_OP_REQ) == 0) | ||
71 | break; | ||
72 | } | ||
73 | |||
74 | if (i == 0) { | ||
75 | jeprintk(jme->pdev, "phy(%d) read timeout : %d\n", phy, reg); | ||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | if (again--) | ||
80 | goto read_again; | ||
81 | |||
82 | return (val & SMI_DATA_MASK) >> SMI_DATA_SHIFT; | ||
83 | } | ||
84 | |||
85 | static void | ||
86 | jme_mdio_write(struct net_device *netdev, | ||
87 | int phy, int reg, int val) | ||
88 | { | ||
89 | struct jme_adapter *jme = netdev_priv(netdev); | ||
90 | int i; | ||
91 | |||
92 | jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ | | ||
93 | ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) | | ||
94 | smi_phy_addr(phy) | smi_reg_addr(reg)); | ||
95 | |||
96 | wmb(); | ||
97 | for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) { | ||
98 | udelay(20); | ||
99 | if ((jread32(jme, JME_SMI) & SMI_OP_REQ) == 0) | ||
100 | break; | ||
101 | } | ||
102 | |||
103 | if (i == 0) | ||
104 | jeprintk(jme->pdev, "phy(%d) write timeout : %d\n", phy, reg); | ||
105 | |||
106 | return; | ||
107 | } | ||
108 | |||
109 | static inline void | ||
110 | jme_reset_phy_processor(struct jme_adapter *jme) | ||
111 | { | ||
112 | u32 val; | ||
113 | |||
114 | jme_mdio_write(jme->dev, | ||
115 | jme->mii_if.phy_id, | ||
116 | MII_ADVERTISE, ADVERTISE_ALL | | ||
117 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); | ||
118 | |||
119 | if (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) | ||
120 | jme_mdio_write(jme->dev, | ||
121 | jme->mii_if.phy_id, | ||
122 | MII_CTRL1000, | ||
123 | ADVERTISE_1000FULL | ADVERTISE_1000HALF); | ||
124 | |||
125 | val = jme_mdio_read(jme->dev, | ||
126 | jme->mii_if.phy_id, | ||
127 | MII_BMCR); | ||
128 | |||
129 | jme_mdio_write(jme->dev, | ||
130 | jme->mii_if.phy_id, | ||
131 | MII_BMCR, val | BMCR_RESET); | ||
132 | |||
133 | return; | ||
134 | } | ||
135 | |||
136 | static void | ||
137 | jme_setup_wakeup_frame(struct jme_adapter *jme, | ||
138 | u32 *mask, u32 crc, int fnr) | ||
139 | { | ||
140 | int i; | ||
141 | |||
142 | /* | ||
143 | * Setup CRC pattern | ||
144 | */ | ||
145 | jwrite32(jme, JME_WFOI, WFOI_CRC_SEL | (fnr & WFOI_FRAME_SEL)); | ||
146 | wmb(); | ||
147 | jwrite32(jme, JME_WFODP, crc); | ||
148 | wmb(); | ||
149 | |||
150 | /* | ||
151 | * Setup Mask | ||
152 | */ | ||
153 | for (i = 0 ; i < WAKEUP_FRAME_MASK_DWNR ; ++i) { | ||
154 | jwrite32(jme, JME_WFOI, | ||
155 | ((i << WFOI_MASK_SHIFT) & WFOI_MASK_SEL) | | ||
156 | (fnr & WFOI_FRAME_SEL)); | ||
157 | wmb(); | ||
158 | jwrite32(jme, JME_WFODP, mask[i]); | ||
159 | wmb(); | ||
160 | } | ||
161 | } | ||
162 | |||
163 | static inline void | ||
164 | jme_reset_mac_processor(struct jme_adapter *jme) | ||
165 | { | ||
166 | u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0}; | ||
167 | u32 crc = 0xCDCDCDCD; | ||
168 | u32 gpreg0; | ||
169 | int i; | ||
170 | |||
171 | jwrite32(jme, JME_GHC, jme->reg_ghc | GHC_SWRST); | ||
172 | udelay(2); | ||
173 | jwrite32(jme, JME_GHC, jme->reg_ghc); | ||
174 | |||
175 | jwrite32(jme, JME_RXDBA_LO, 0x00000000); | ||
176 | jwrite32(jme, JME_RXDBA_HI, 0x00000000); | ||
177 | jwrite32(jme, JME_RXQDC, 0x00000000); | ||
178 | jwrite32(jme, JME_RXNDA, 0x00000000); | ||
179 | jwrite32(jme, JME_TXDBA_LO, 0x00000000); | ||
180 | jwrite32(jme, JME_TXDBA_HI, 0x00000000); | ||
181 | jwrite32(jme, JME_TXQDC, 0x00000000); | ||
182 | jwrite32(jme, JME_TXNDA, 0x00000000); | ||
183 | |||
184 | jwrite32(jme, JME_RXMCHT_LO, 0x00000000); | ||
185 | jwrite32(jme, JME_RXMCHT_HI, 0x00000000); | ||
186 | for (i = 0 ; i < WAKEUP_FRAME_NR ; ++i) | ||
187 | jme_setup_wakeup_frame(jme, mask, crc, i); | ||
188 | if (jme->fpgaver) | ||
189 | gpreg0 = GPREG0_DEFAULT | GPREG0_LNKINTPOLL; | ||
190 | else | ||
191 | gpreg0 = GPREG0_DEFAULT; | ||
192 | jwrite32(jme, JME_GPREG0, gpreg0); | ||
193 | jwrite32(jme, JME_GPREG1, 0); | ||
194 | } | ||
195 | |||
196 | static inline void | ||
197 | jme_reset_ghc_speed(struct jme_adapter *jme) | ||
198 | { | ||
199 | jme->reg_ghc &= ~(GHC_SPEED_1000M | GHC_DPX); | ||
200 | jwrite32(jme, JME_GHC, jme->reg_ghc); | ||
201 | } | ||
202 | |||
203 | static inline void | ||
204 | jme_clear_pm(struct jme_adapter *jme) | ||
205 | { | ||
206 | jwrite32(jme, JME_PMCS, 0xFFFF0000 | jme->reg_pmcs); | ||
207 | pci_set_power_state(jme->pdev, PCI_D0); | ||
208 | pci_enable_wake(jme->pdev, PCI_D0, false); | ||
209 | } | ||
210 | |||
211 | static int | ||
212 | jme_reload_eeprom(struct jme_adapter *jme) | ||
213 | { | ||
214 | u32 val; | ||
215 | int i; | ||
216 | |||
217 | val = jread32(jme, JME_SMBCSR); | ||
218 | |||
219 | if (val & SMBCSR_EEPROMD) { | ||
220 | val |= SMBCSR_CNACK; | ||
221 | jwrite32(jme, JME_SMBCSR, val); | ||
222 | val |= SMBCSR_RELOAD; | ||
223 | jwrite32(jme, JME_SMBCSR, val); | ||
224 | mdelay(12); | ||
225 | |||
226 | for (i = JME_EEPROM_RELOAD_TIMEOUT; i > 0; --i) { | ||
227 | mdelay(1); | ||
228 | if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0) | ||
229 | break; | ||
230 | } | ||
231 | |||
232 | if (i == 0) { | ||
233 | jeprintk(jme->pdev, "eeprom reload timeout\n"); | ||
234 | return -EIO; | ||
235 | } | ||
236 | } | ||
237 | |||
238 | return 0; | ||
239 | } | ||
240 | |||
241 | static void | ||
242 | jme_load_macaddr(struct net_device *netdev) | ||
243 | { | ||
244 | struct jme_adapter *jme = netdev_priv(netdev); | ||
245 | unsigned char macaddr[6]; | ||
246 | u32 val; | ||
247 | |||
248 | spin_lock_bh(&jme->macaddr_lock); | ||
249 | val = jread32(jme, JME_RXUMA_LO); | ||
250 | macaddr[0] = (val >> 0) & 0xFF; | ||
251 | macaddr[1] = (val >> 8) & 0xFF; | ||
252 | macaddr[2] = (val >> 16) & 0xFF; | ||
253 | macaddr[3] = (val >> 24) & 0xFF; | ||
254 | val = jread32(jme, JME_RXUMA_HI); | ||
255 | macaddr[4] = (val >> 0) & 0xFF; | ||
256 | macaddr[5] = (val >> 8) & 0xFF; | ||
257 | memcpy(netdev->dev_addr, macaddr, 6); | ||
258 | spin_unlock_bh(&jme->macaddr_lock); | ||
259 | } | ||
260 | |||
261 | static inline void | ||
262 | jme_set_rx_pcc(struct jme_adapter *jme, int p) | ||
263 | { | ||
264 | switch (p) { | ||
265 | case PCC_OFF: | ||
266 | jwrite32(jme, JME_PCCRX0, | ||
267 | ((PCC_OFF_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | | ||
268 | ((PCC_OFF_CNT << PCCRX_SHIFT) & PCCRX_MASK)); | ||
269 | break; | ||
270 | case PCC_P1: | ||
271 | jwrite32(jme, JME_PCCRX0, | ||
272 | ((PCC_P1_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | | ||
273 | ((PCC_P1_CNT << PCCRX_SHIFT) & PCCRX_MASK)); | ||
274 | break; | ||
275 | case PCC_P2: | ||
276 | jwrite32(jme, JME_PCCRX0, | ||
277 | ((PCC_P2_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | | ||
278 | ((PCC_P2_CNT << PCCRX_SHIFT) & PCCRX_MASK)); | ||
279 | break; | ||
280 | case PCC_P3: | ||
281 | jwrite32(jme, JME_PCCRX0, | ||
282 | ((PCC_P3_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | | ||
283 | ((PCC_P3_CNT << PCCRX_SHIFT) & PCCRX_MASK)); | ||
284 | break; | ||
285 | default: | ||
286 | break; | ||
287 | } | ||
288 | wmb(); | ||
289 | |||
290 | if (!(test_bit(JME_FLAG_POLL, &jme->flags))) | ||
291 | msg_rx_status(jme, "Switched to PCC_P%d\n", p); | ||
292 | } | ||
293 | |||
294 | static void | ||
295 | jme_start_irq(struct jme_adapter *jme) | ||
296 | { | ||
297 | register struct dynpcc_info *dpi = &(jme->dpi); | ||
298 | |||
299 | jme_set_rx_pcc(jme, PCC_P1); | ||
300 | dpi->cur = PCC_P1; | ||
301 | dpi->attempt = PCC_P1; | ||
302 | dpi->cnt = 0; | ||
303 | |||
304 | jwrite32(jme, JME_PCCTX, | ||
305 | ((PCC_TX_TO << PCCTXTO_SHIFT) & PCCTXTO_MASK) | | ||
306 | ((PCC_TX_CNT << PCCTX_SHIFT) & PCCTX_MASK) | | ||
307 | PCCTXQ0_EN | ||
308 | ); | ||
309 | |||
310 | /* | ||
311 | * Enable Interrupts | ||
312 | */ | ||
313 | jwrite32(jme, JME_IENS, INTR_ENABLE); | ||
314 | } | ||
315 | |||
316 | static inline void | ||
317 | jme_stop_irq(struct jme_adapter *jme) | ||
318 | { | ||
319 | /* | ||
320 | * Disable Interrupts | ||
321 | */ | ||
322 | jwrite32f(jme, JME_IENC, INTR_ENABLE); | ||
323 | } | ||
324 | |||
325 | static inline void | ||
326 | jme_enable_shadow(struct jme_adapter *jme) | ||
327 | { | ||
328 | jwrite32(jme, | ||
329 | JME_SHBA_LO, | ||
330 | ((u32)jme->shadow_dma & ~((u32)0x1F)) | SHBA_POSTEN); | ||
331 | } | ||
332 | |||
333 | static inline void | ||
334 | jme_disable_shadow(struct jme_adapter *jme) | ||
335 | { | ||
336 | jwrite32(jme, JME_SHBA_LO, 0x0); | ||
337 | } | ||
338 | |||
339 | static u32 | ||
340 | jme_linkstat_from_phy(struct jme_adapter *jme) | ||
341 | { | ||
342 | u32 phylink, bmsr; | ||
343 | |||
344 | phylink = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 17); | ||
345 | bmsr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMSR); | ||
346 | if (bmsr & BMSR_ANCOMP) | ||
347 | phylink |= PHY_LINK_AUTONEG_COMPLETE; | ||
348 | |||
349 | return phylink; | ||
350 | } | ||
351 | |||
352 | static inline void | ||
353 | jme_set_phyfifoa(struct jme_adapter *jme) | ||
354 | { | ||
355 | jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004); | ||
356 | } | ||
357 | |||
358 | static inline void | ||
359 | jme_set_phyfifob(struct jme_adapter *jme) | ||
360 | { | ||
361 | jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0000); | ||
362 | } | ||
363 | |||
364 | static int | ||
365 | jme_check_link(struct net_device *netdev, int testonly) | ||
366 | { | ||
367 | struct jme_adapter *jme = netdev_priv(netdev); | ||
368 | u32 phylink, ghc, cnt = JME_SPDRSV_TIMEOUT, bmcr; | ||
369 | char linkmsg[64]; | ||
370 | int rc = 0; | ||
371 | |||
372 | linkmsg[0] = '\0'; | ||
373 | |||
374 | if (jme->fpgaver) | ||
375 | phylink = jme_linkstat_from_phy(jme); | ||
376 | else | ||
377 | phylink = jread32(jme, JME_PHY_LINK); | ||
378 | |||
379 | if (phylink & PHY_LINK_UP) { | ||
380 | if (!(phylink & PHY_LINK_AUTONEG_COMPLETE)) { | ||
381 | /* | ||
382 | * If we did not enable AN | ||
383 | * Speed/Duplex Info should be obtained from SMI | ||
384 | */ | ||
385 | phylink = PHY_LINK_UP; | ||
386 | |||
387 | bmcr = jme_mdio_read(jme->dev, | ||
388 | jme->mii_if.phy_id, | ||
389 | MII_BMCR); | ||
390 | |||
391 | phylink |= ((bmcr & BMCR_SPEED1000) && | ||
392 | (bmcr & BMCR_SPEED100) == 0) ? | ||
393 | PHY_LINK_SPEED_1000M : | ||
394 | (bmcr & BMCR_SPEED100) ? | ||
395 | PHY_LINK_SPEED_100M : | ||
396 | PHY_LINK_SPEED_10M; | ||
397 | |||
398 | phylink |= (bmcr & BMCR_FULLDPLX) ? | ||
399 | PHY_LINK_DUPLEX : 0; | ||
400 | |||
401 | strcat(linkmsg, "Forced: "); | ||
402 | } else { | ||
403 | /* | ||
404 | * Keep polling for speed/duplex resolve complete | ||
405 | */ | ||
406 | while (!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) && | ||
407 | --cnt) { | ||
408 | |||
409 | udelay(1); | ||
410 | |||
411 | if (jme->fpgaver) | ||
412 | phylink = jme_linkstat_from_phy(jme); | ||
413 | else | ||
414 | phylink = jread32(jme, JME_PHY_LINK); | ||
415 | } | ||
416 | if (!cnt) | ||
417 | jeprintk(jme->pdev, | ||
418 | "Waiting speed resolve timeout.\n"); | ||
419 | |||
420 | strcat(linkmsg, "ANed: "); | ||
421 | } | ||
422 | |||
423 | if (jme->phylink == phylink) { | ||
424 | rc = 1; | ||
425 | goto out; | ||
426 | } | ||
427 | if (testonly) | ||
428 | goto out; | ||
429 | |||
430 | jme->phylink = phylink; | ||
431 | |||
432 | ghc = jme->reg_ghc & ~(GHC_SPEED_10M | | ||
433 | GHC_SPEED_100M | | ||
434 | GHC_SPEED_1000M | | ||
435 | GHC_DPX); | ||
436 | switch (phylink & PHY_LINK_SPEED_MASK) { | ||
437 | case PHY_LINK_SPEED_10M: | ||
438 | ghc |= GHC_SPEED_10M; | ||
439 | strcat(linkmsg, "10 Mbps, "); | ||
440 | if (is_buggy250(jme->pdev->device, jme->chiprev)) | ||
441 | jme_set_phyfifoa(jme); | ||
442 | break; | ||
443 | case PHY_LINK_SPEED_100M: | ||
444 | ghc |= GHC_SPEED_100M; | ||
445 | strcat(linkmsg, "100 Mbps, "); | ||
446 | if (is_buggy250(jme->pdev->device, jme->chiprev)) | ||
447 | jme_set_phyfifob(jme); | ||
448 | break; | ||
449 | case PHY_LINK_SPEED_1000M: | ||
450 | ghc |= GHC_SPEED_1000M; | ||
451 | strcat(linkmsg, "1000 Mbps, "); | ||
452 | if (is_buggy250(jme->pdev->device, jme->chiprev)) | ||
453 | jme_set_phyfifoa(jme); | ||
454 | break; | ||
455 | default: | ||
456 | break; | ||
457 | } | ||
458 | ghc |= (phylink & PHY_LINK_DUPLEX) ? GHC_DPX : 0; | ||
459 | |||
460 | strcat(linkmsg, (phylink & PHY_LINK_DUPLEX) ? | ||
461 | "Full-Duplex, " : | ||
462 | "Half-Duplex, "); | ||
463 | |||
464 | if (phylink & PHY_LINK_MDI_STAT) | ||
465 | strcat(linkmsg, "MDI-X"); | ||
466 | else | ||
467 | strcat(linkmsg, "MDI"); | ||
468 | |||
469 | if (phylink & PHY_LINK_DUPLEX) { | ||
470 | jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT); | ||
471 | } else { | ||
472 | jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT | | ||
473 | TXMCS_BACKOFF | | ||
474 | TXMCS_CARRIERSENSE | | ||
475 | TXMCS_COLLISION); | ||
476 | jwrite32(jme, JME_TXTRHD, TXTRHD_TXPEN | | ||
477 | ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) | | ||
478 | TXTRHD_TXREN | | ||
479 | ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL)); | ||
480 | } | ||
481 | |||
482 | jme->reg_ghc = ghc; | ||
483 | jwrite32(jme, JME_GHC, ghc); | ||
484 | |||
485 | msg_link(jme, "Link is up at %s.\n", linkmsg); | ||
486 | netif_carrier_on(netdev); | ||
487 | } else { | ||
488 | if (testonly) | ||
489 | goto out; | ||
490 | |||
491 | msg_link(jme, "Link is down.\n"); | ||
492 | jme->phylink = 0; | ||
493 | netif_carrier_off(netdev); | ||
494 | } | ||
495 | |||
496 | out: | ||
497 | return rc; | ||
498 | } | ||
499 | |||
500 | static int | ||
501 | jme_setup_tx_resources(struct jme_adapter *jme) | ||
502 | { | ||
503 | struct jme_ring *txring = &(jme->txring[0]); | ||
504 | |||
505 | txring->alloc = dma_alloc_coherent(&(jme->pdev->dev), | ||
506 | TX_RING_ALLOC_SIZE(jme->tx_ring_size), | ||
507 | &(txring->dmaalloc), | ||
508 | GFP_ATOMIC); | ||
509 | |||
510 | if (!txring->alloc) { | ||
511 | txring->desc = NULL; | ||
512 | txring->dmaalloc = 0; | ||
513 | txring->dma = 0; | ||
514 | return -ENOMEM; | ||
515 | } | ||
516 | |||
517 | /* | ||
518 | * 16 Bytes align | ||
519 | */ | ||
520 | txring->desc = (void *)ALIGN((unsigned long)(txring->alloc), | ||
521 | RING_DESC_ALIGN); | ||
522 | txring->dma = ALIGN(txring->dmaalloc, RING_DESC_ALIGN); | ||
523 | txring->next_to_use = 0; | ||
524 | atomic_set(&txring->next_to_clean, 0); | ||
525 | atomic_set(&txring->nr_free, jme->tx_ring_size); | ||
526 | |||
527 | /* | ||
528 | * Initialize Transmit Descriptors | ||
529 | */ | ||
530 | memset(txring->alloc, 0, TX_RING_ALLOC_SIZE(jme->tx_ring_size)); | ||
531 | memset(txring->bufinf, 0, | ||
532 | sizeof(struct jme_buffer_info) * jme->tx_ring_size); | ||
533 | |||
534 | return 0; | ||
535 | } | ||
536 | |||
537 | static void | ||
538 | jme_free_tx_resources(struct jme_adapter *jme) | ||
539 | { | ||
540 | int i; | ||
541 | struct jme_ring *txring = &(jme->txring[0]); | ||
542 | struct jme_buffer_info *txbi = txring->bufinf; | ||
543 | |||
544 | if (txring->alloc) { | ||
545 | for (i = 0 ; i < jme->tx_ring_size ; ++i) { | ||
546 | txbi = txring->bufinf + i; | ||
547 | if (txbi->skb) { | ||
548 | dev_kfree_skb(txbi->skb); | ||
549 | txbi->skb = NULL; | ||
550 | } | ||
551 | txbi->mapping = 0; | ||
552 | txbi->len = 0; | ||
553 | txbi->nr_desc = 0; | ||
554 | txbi->start_xmit = 0; | ||
555 | } | ||
556 | |||
557 | dma_free_coherent(&(jme->pdev->dev), | ||
558 | TX_RING_ALLOC_SIZE(jme->tx_ring_size), | ||
559 | txring->alloc, | ||
560 | txring->dmaalloc); | ||
561 | |||
562 | txring->alloc = NULL; | ||
563 | txring->desc = NULL; | ||
564 | txring->dmaalloc = 0; | ||
565 | txring->dma = 0; | ||
566 | } | ||
567 | txring->next_to_use = 0; | ||
568 | atomic_set(&txring->next_to_clean, 0); | ||
569 | atomic_set(&txring->nr_free, 0); | ||
570 | |||
571 | } | ||
572 | |||
573 | static inline void | ||
574 | jme_enable_tx_engine(struct jme_adapter *jme) | ||
575 | { | ||
576 | /* | ||
577 | * Select Queue 0 | ||
578 | */ | ||
579 | jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0); | ||
580 | wmb(); | ||
581 | |||
582 | /* | ||
583 | * Setup TX Queue 0 DMA Bass Address | ||
584 | */ | ||
585 | jwrite32(jme, JME_TXDBA_LO, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL); | ||
586 | jwrite32(jme, JME_TXDBA_HI, (__u64)(jme->txring[0].dma) >> 32); | ||
587 | jwrite32(jme, JME_TXNDA, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL); | ||
588 | |||
589 | /* | ||
590 | * Setup TX Descptor Count | ||
591 | */ | ||
592 | jwrite32(jme, JME_TXQDC, jme->tx_ring_size); | ||
593 | |||
594 | /* | ||
595 | * Enable TX Engine | ||
596 | */ | ||
597 | wmb(); | ||
598 | jwrite32(jme, JME_TXCS, jme->reg_txcs | | ||
599 | TXCS_SELECT_QUEUE0 | | ||
600 | TXCS_ENABLE); | ||
601 | |||
602 | } | ||
603 | |||
604 | static inline void | ||
605 | jme_restart_tx_engine(struct jme_adapter *jme) | ||
606 | { | ||
607 | /* | ||
608 | * Restart TX Engine | ||
609 | */ | ||
610 | jwrite32(jme, JME_TXCS, jme->reg_txcs | | ||
611 | TXCS_SELECT_QUEUE0 | | ||
612 | TXCS_ENABLE); | ||
613 | } | ||
614 | |||
615 | static inline void | ||
616 | jme_disable_tx_engine(struct jme_adapter *jme) | ||
617 | { | ||
618 | int i; | ||
619 | u32 val; | ||
620 | |||
621 | /* | ||
622 | * Disable TX Engine | ||
623 | */ | ||
624 | jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0); | ||
625 | wmb(); | ||
626 | |||
627 | val = jread32(jme, JME_TXCS); | ||
628 | for (i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i) { | ||
629 | mdelay(1); | ||
630 | val = jread32(jme, JME_TXCS); | ||
631 | rmb(); | ||
632 | } | ||
633 | |||
634 | if (!i) | ||
635 | jeprintk(jme->pdev, "Disable TX engine timeout.\n"); | ||
636 | } | ||
637 | |||
638 | static void | ||
639 | jme_set_clean_rxdesc(struct jme_adapter *jme, int i) | ||
640 | { | ||
641 | struct jme_ring *rxring = jme->rxring; | ||
642 | register struct rxdesc *rxdesc = rxring->desc; | ||
643 | struct jme_buffer_info *rxbi = rxring->bufinf; | ||
644 | rxdesc += i; | ||
645 | rxbi += i; | ||
646 | |||
647 | rxdesc->dw[0] = 0; | ||
648 | rxdesc->dw[1] = 0; | ||
649 | rxdesc->desc1.bufaddrh = cpu_to_le32((__u64)rxbi->mapping >> 32); | ||
650 | rxdesc->desc1.bufaddrl = cpu_to_le32( | ||
651 | (__u64)rxbi->mapping & 0xFFFFFFFFUL); | ||
652 | rxdesc->desc1.datalen = cpu_to_le16(rxbi->len); | ||
653 | if (jme->dev->features & NETIF_F_HIGHDMA) | ||
654 | rxdesc->desc1.flags = RXFLAG_64BIT; | ||
655 | wmb(); | ||
656 | rxdesc->desc1.flags |= RXFLAG_OWN | RXFLAG_INT; | ||
657 | } | ||
658 | |||
659 | static int | ||
660 | jme_make_new_rx_buf(struct jme_adapter *jme, int i) | ||
661 | { | ||
662 | struct jme_ring *rxring = &(jme->rxring[0]); | ||
663 | struct jme_buffer_info *rxbi = rxring->bufinf + i; | ||
664 | struct sk_buff *skb; | ||
665 | |||
666 | skb = netdev_alloc_skb(jme->dev, | ||
667 | jme->dev->mtu + RX_EXTRA_LEN); | ||
668 | if (unlikely(!skb)) | ||
669 | return -ENOMEM; | ||
670 | |||
671 | rxbi->skb = skb; | ||
672 | rxbi->len = skb_tailroom(skb); | ||
673 | rxbi->mapping = pci_map_page(jme->pdev, | ||
674 | virt_to_page(skb->data), | ||
675 | offset_in_page(skb->data), | ||
676 | rxbi->len, | ||
677 | PCI_DMA_FROMDEVICE); | ||
678 | |||
679 | return 0; | ||
680 | } | ||
681 | |||
682 | static void | ||
683 | jme_free_rx_buf(struct jme_adapter *jme, int i) | ||
684 | { | ||
685 | struct jme_ring *rxring = &(jme->rxring[0]); | ||
686 | struct jme_buffer_info *rxbi = rxring->bufinf; | ||
687 | rxbi += i; | ||
688 | |||
689 | if (rxbi->skb) { | ||
690 | pci_unmap_page(jme->pdev, | ||
691 | rxbi->mapping, | ||
692 | rxbi->len, | ||
693 | PCI_DMA_FROMDEVICE); | ||
694 | dev_kfree_skb(rxbi->skb); | ||
695 | rxbi->skb = NULL; | ||
696 | rxbi->mapping = 0; | ||
697 | rxbi->len = 0; | ||
698 | } | ||
699 | } | ||
700 | |||
701 | static void | ||
702 | jme_free_rx_resources(struct jme_adapter *jme) | ||
703 | { | ||
704 | int i; | ||
705 | struct jme_ring *rxring = &(jme->rxring[0]); | ||
706 | |||
707 | if (rxring->alloc) { | ||
708 | for (i = 0 ; i < jme->rx_ring_size ; ++i) | ||
709 | jme_free_rx_buf(jme, i); | ||
710 | |||
711 | dma_free_coherent(&(jme->pdev->dev), | ||
712 | RX_RING_ALLOC_SIZE(jme->rx_ring_size), | ||
713 | rxring->alloc, | ||
714 | rxring->dmaalloc); | ||
715 | rxring->alloc = NULL; | ||
716 | rxring->desc = NULL; | ||
717 | rxring->dmaalloc = 0; | ||
718 | rxring->dma = 0; | ||
719 | } | ||
720 | rxring->next_to_use = 0; | ||
721 | atomic_set(&rxring->next_to_clean, 0); | ||
722 | } | ||
723 | |||
724 | static int | ||
725 | jme_setup_rx_resources(struct jme_adapter *jme) | ||
726 | { | ||
727 | int i; | ||
728 | struct jme_ring *rxring = &(jme->rxring[0]); | ||
729 | |||
730 | rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev), | ||
731 | RX_RING_ALLOC_SIZE(jme->rx_ring_size), | ||
732 | &(rxring->dmaalloc), | ||
733 | GFP_ATOMIC); | ||
734 | if (!rxring->alloc) { | ||
735 | rxring->desc = NULL; | ||
736 | rxring->dmaalloc = 0; | ||
737 | rxring->dma = 0; | ||
738 | return -ENOMEM; | ||
739 | } | ||
740 | |||
741 | /* | ||
742 | * 16 Bytes align | ||
743 | */ | ||
744 | rxring->desc = (void *)ALIGN((unsigned long)(rxring->alloc), | ||
745 | RING_DESC_ALIGN); | ||
746 | rxring->dma = ALIGN(rxring->dmaalloc, RING_DESC_ALIGN); | ||
747 | rxring->next_to_use = 0; | ||
748 | atomic_set(&rxring->next_to_clean, 0); | ||
749 | |||
750 | /* | ||
751 | * Initiallize Receive Descriptors | ||
752 | */ | ||
753 | for (i = 0 ; i < jme->rx_ring_size ; ++i) { | ||
754 | if (unlikely(jme_make_new_rx_buf(jme, i))) { | ||
755 | jme_free_rx_resources(jme); | ||
756 | return -ENOMEM; | ||
757 | } | ||
758 | |||
759 | jme_set_clean_rxdesc(jme, i); | ||
760 | } | ||
761 | |||
762 | return 0; | ||
763 | } | ||
764 | |||
765 | static inline void | ||
766 | jme_enable_rx_engine(struct jme_adapter *jme) | ||
767 | { | ||
768 | /* | ||
769 | * Select Queue 0 | ||
770 | */ | ||
771 | jwrite32(jme, JME_RXCS, jme->reg_rxcs | | ||
772 | RXCS_QUEUESEL_Q0); | ||
773 | wmb(); | ||
774 | |||
775 | /* | ||
776 | * Setup RX DMA Bass Address | ||
777 | */ | ||
778 | jwrite32(jme, JME_RXDBA_LO, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL); | ||
779 | jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32); | ||
780 | jwrite32(jme, JME_RXNDA, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL); | ||
781 | |||
782 | /* | ||
783 | * Setup RX Descriptor Count | ||
784 | */ | ||
785 | jwrite32(jme, JME_RXQDC, jme->rx_ring_size); | ||
786 | |||
787 | /* | ||
788 | * Setup Unicast Filter | ||
789 | */ | ||
790 | jme_set_multi(jme->dev); | ||
791 | |||
792 | /* | ||
793 | * Enable RX Engine | ||
794 | */ | ||
795 | wmb(); | ||
796 | jwrite32(jme, JME_RXCS, jme->reg_rxcs | | ||
797 | RXCS_QUEUESEL_Q0 | | ||
798 | RXCS_ENABLE | | ||
799 | RXCS_QST); | ||
800 | } | ||
801 | |||
802 | static inline void | ||
803 | jme_restart_rx_engine(struct jme_adapter *jme) | ||
804 | { | ||
805 | /* | ||
806 | * Start RX Engine | ||
807 | */ | ||
808 | jwrite32(jme, JME_RXCS, jme->reg_rxcs | | ||
809 | RXCS_QUEUESEL_Q0 | | ||
810 | RXCS_ENABLE | | ||
811 | RXCS_QST); | ||
812 | } | ||
813 | |||
814 | static inline void | ||
815 | jme_disable_rx_engine(struct jme_adapter *jme) | ||
816 | { | ||
817 | int i; | ||
818 | u32 val; | ||
819 | |||
820 | /* | ||
821 | * Disable RX Engine | ||
822 | */ | ||
823 | jwrite32(jme, JME_RXCS, jme->reg_rxcs); | ||
824 | wmb(); | ||
825 | |||
826 | val = jread32(jme, JME_RXCS); | ||
827 | for (i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i) { | ||
828 | mdelay(1); | ||
829 | val = jread32(jme, JME_RXCS); | ||
830 | rmb(); | ||
831 | } | ||
832 | |||
833 | if (!i) | ||
834 | jeprintk(jme->pdev, "Disable RX engine timeout.\n"); | ||
835 | |||
836 | } | ||
837 | |||
838 | static int | ||
839 | jme_rxsum_ok(struct jme_adapter *jme, u16 flags) | ||
840 | { | ||
841 | if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4))) | ||
842 | return false; | ||
843 | |||
844 | if (unlikely(!(flags & RXWBFLAG_MF) && | ||
845 | (flags & RXWBFLAG_TCPON) && !(flags & RXWBFLAG_TCPCS))) { | ||
846 | msg_rx_err(jme, "TCP Checksum error.\n"); | ||
847 | goto out_sumerr; | ||
848 | } | ||
849 | |||
850 | if (unlikely(!(flags & RXWBFLAG_MF) && | ||
851 | (flags & RXWBFLAG_UDPON) && !(flags & RXWBFLAG_UDPCS))) { | ||
852 | msg_rx_err(jme, "UDP Checksum error.\n"); | ||
853 | goto out_sumerr; | ||
854 | } | ||
855 | |||
856 | if (unlikely((flags & RXWBFLAG_IPV4) && !(flags & RXWBFLAG_IPCS))) { | ||
857 | msg_rx_err(jme, "IPv4 Checksum error.\n"); | ||
858 | goto out_sumerr; | ||
859 | } | ||
860 | |||
861 | return true; | ||
862 | |||
863 | out_sumerr: | ||
864 | return false; | ||
865 | } | ||
866 | |||
867 | static void | ||
868 | jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx) | ||
869 | { | ||
870 | struct jme_ring *rxring = &(jme->rxring[0]); | ||
871 | struct rxdesc *rxdesc = rxring->desc; | ||
872 | struct jme_buffer_info *rxbi = rxring->bufinf; | ||
873 | struct sk_buff *skb; | ||
874 | int framesize; | ||
875 | |||
876 | rxdesc += idx; | ||
877 | rxbi += idx; | ||
878 | |||
879 | skb = rxbi->skb; | ||
880 | pci_dma_sync_single_for_cpu(jme->pdev, | ||
881 | rxbi->mapping, | ||
882 | rxbi->len, | ||
883 | PCI_DMA_FROMDEVICE); | ||
884 | |||
885 | if (unlikely(jme_make_new_rx_buf(jme, idx))) { | ||
886 | pci_dma_sync_single_for_device(jme->pdev, | ||
887 | rxbi->mapping, | ||
888 | rxbi->len, | ||
889 | PCI_DMA_FROMDEVICE); | ||
890 | |||
891 | ++(NET_STAT(jme).rx_dropped); | ||
892 | } else { | ||
893 | framesize = le16_to_cpu(rxdesc->descwb.framesize) | ||
894 | - RX_PREPAD_SIZE; | ||
895 | |||
896 | skb_reserve(skb, RX_PREPAD_SIZE); | ||
897 | skb_put(skb, framesize); | ||
898 | skb->protocol = eth_type_trans(skb, jme->dev); | ||
899 | |||
900 | if (jme_rxsum_ok(jme, rxdesc->descwb.flags)) | ||
901 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
902 | else | ||
903 | skb->ip_summed = CHECKSUM_NONE; | ||
904 | |||
905 | if (rxdesc->descwb.flags & RXWBFLAG_TAGON) { | ||
906 | if (jme->vlgrp) { | ||
907 | jme->jme_vlan_rx(skb, jme->vlgrp, | ||
908 | le32_to_cpu(rxdesc->descwb.vlan)); | ||
909 | NET_STAT(jme).rx_bytes += 4; | ||
910 | } | ||
911 | } else { | ||
912 | jme->jme_rx(skb); | ||
913 | } | ||
914 | |||
915 | if ((le16_to_cpu(rxdesc->descwb.flags) & RXWBFLAG_DEST) == | ||
916 | RXWBFLAG_DEST_MUL) | ||
917 | ++(NET_STAT(jme).multicast); | ||
918 | |||
919 | jme->dev->last_rx = jiffies; | ||
920 | NET_STAT(jme).rx_bytes += framesize; | ||
921 | ++(NET_STAT(jme).rx_packets); | ||
922 | } | ||
923 | |||
924 | jme_set_clean_rxdesc(jme, idx); | ||
925 | |||
926 | } | ||
927 | |||
928 | static int | ||
929 | jme_process_receive(struct jme_adapter *jme, int limit) | ||
930 | { | ||
931 | struct jme_ring *rxring = &(jme->rxring[0]); | ||
932 | struct rxdesc *rxdesc = rxring->desc; | ||
933 | int i, j, ccnt, desccnt, mask = jme->rx_ring_mask; | ||
934 | |||
935 | if (unlikely(!atomic_dec_and_test(&jme->rx_cleaning))) | ||
936 | goto out_inc; | ||
937 | |||
938 | if (unlikely(atomic_read(&jme->link_changing) != 1)) | ||
939 | goto out_inc; | ||
940 | |||
941 | if (unlikely(!netif_carrier_ok(jme->dev))) | ||
942 | goto out_inc; | ||
943 | |||
944 | i = atomic_read(&rxring->next_to_clean); | ||
945 | while (limit-- > 0) { | ||
946 | rxdesc = rxring->desc; | ||
947 | rxdesc += i; | ||
948 | |||
949 | if ((rxdesc->descwb.flags & RXWBFLAG_OWN) || | ||
950 | !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL)) | ||
951 | goto out; | ||
952 | |||
953 | desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT; | ||
954 | |||
955 | if (unlikely(desccnt > 1 || | ||
956 | rxdesc->descwb.errstat & RXWBERR_ALLERR)) { | ||
957 | |||
958 | if (rxdesc->descwb.errstat & RXWBERR_CRCERR) | ||
959 | ++(NET_STAT(jme).rx_crc_errors); | ||
960 | else if (rxdesc->descwb.errstat & RXWBERR_OVERUN) | ||
961 | ++(NET_STAT(jme).rx_fifo_errors); | ||
962 | else | ||
963 | ++(NET_STAT(jme).rx_errors); | ||
964 | |||
965 | if (desccnt > 1) | ||
966 | limit -= desccnt - 1; | ||
967 | |||
968 | for (j = i, ccnt = desccnt ; ccnt-- ; ) { | ||
969 | jme_set_clean_rxdesc(jme, j); | ||
970 | j = (j + 1) & (mask); | ||
971 | } | ||
972 | |||
973 | } else { | ||
974 | jme_alloc_and_feed_skb(jme, i); | ||
975 | } | ||
976 | |||
977 | i = (i + desccnt) & (mask); | ||
978 | } | ||
979 | |||
980 | out: | ||
981 | atomic_set(&rxring->next_to_clean, i); | ||
982 | |||
983 | out_inc: | ||
984 | atomic_inc(&jme->rx_cleaning); | ||
985 | |||
986 | return limit > 0 ? limit : 0; | ||
987 | |||
988 | } | ||
989 | |||
990 | static void | ||
991 | jme_attempt_pcc(struct dynpcc_info *dpi, int atmp) | ||
992 | { | ||
993 | if (likely(atmp == dpi->cur)) { | ||
994 | dpi->cnt = 0; | ||
995 | return; | ||
996 | } | ||
997 | |||
998 | if (dpi->attempt == atmp) { | ||
999 | ++(dpi->cnt); | ||
1000 | } else { | ||
1001 | dpi->attempt = atmp; | ||
1002 | dpi->cnt = 0; | ||
1003 | } | ||
1004 | |||
1005 | } | ||
1006 | |||
1007 | static void | ||
1008 | jme_dynamic_pcc(struct jme_adapter *jme) | ||
1009 | { | ||
1010 | register struct dynpcc_info *dpi = &(jme->dpi); | ||
1011 | |||
1012 | if ((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P3_THRESHOLD) | ||
1013 | jme_attempt_pcc(dpi, PCC_P3); | ||
1014 | else if ((NET_STAT(jme).rx_packets - dpi->last_pkts) > PCC_P2_THRESHOLD | ||
1015 | || dpi->intr_cnt > PCC_INTR_THRESHOLD) | ||
1016 | jme_attempt_pcc(dpi, PCC_P2); | ||
1017 | else | ||
1018 | jme_attempt_pcc(dpi, PCC_P1); | ||
1019 | |||
1020 | if (unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) { | ||
1021 | if (dpi->attempt < dpi->cur) | ||
1022 | tasklet_schedule(&jme->rxclean_task); | ||
1023 | jme_set_rx_pcc(jme, dpi->attempt); | ||
1024 | dpi->cur = dpi->attempt; | ||
1025 | dpi->cnt = 0; | ||
1026 | } | ||
1027 | } | ||
1028 | |||
1029 | static void | ||
1030 | jme_start_pcc_timer(struct jme_adapter *jme) | ||
1031 | { | ||
1032 | struct dynpcc_info *dpi = &(jme->dpi); | ||
1033 | dpi->last_bytes = NET_STAT(jme).rx_bytes; | ||
1034 | dpi->last_pkts = NET_STAT(jme).rx_packets; | ||
1035 | dpi->intr_cnt = 0; | ||
1036 | jwrite32(jme, JME_TMCSR, | ||
1037 | TMCSR_EN | ((0xFFFFFF - PCC_INTERVAL_US) & TMCSR_CNT)); | ||
1038 | } | ||
1039 | |||
1040 | static inline void | ||
1041 | jme_stop_pcc_timer(struct jme_adapter *jme) | ||
1042 | { | ||
1043 | jwrite32(jme, JME_TMCSR, 0); | ||
1044 | } | ||
1045 | |||
1046 | static void | ||
1047 | jme_shutdown_nic(struct jme_adapter *jme) | ||
1048 | { | ||
1049 | u32 phylink; | ||
1050 | |||
1051 | phylink = jme_linkstat_from_phy(jme); | ||
1052 | |||
1053 | if (!(phylink & PHY_LINK_UP)) { | ||
1054 | /* | ||
1055 | * Disable all interrupt before issue timer | ||
1056 | */ | ||
1057 | jme_stop_irq(jme); | ||
1058 | jwrite32(jme, JME_TIMER2, TMCSR_EN | 0xFFFFFE); | ||
1059 | } | ||
1060 | } | ||
1061 | |||
1062 | static void | ||
1063 | jme_pcc_tasklet(unsigned long arg) | ||
1064 | { | ||
1065 | struct jme_adapter *jme = (struct jme_adapter *)arg; | ||
1066 | struct net_device *netdev = jme->dev; | ||
1067 | |||
1068 | if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) { | ||
1069 | jme_shutdown_nic(jme); | ||
1070 | return; | ||
1071 | } | ||
1072 | |||
1073 | if (unlikely(!netif_carrier_ok(netdev) || | ||
1074 | (atomic_read(&jme->link_changing) != 1) | ||
1075 | )) { | ||
1076 | jme_stop_pcc_timer(jme); | ||
1077 | return; | ||
1078 | } | ||
1079 | |||
1080 | if (!(test_bit(JME_FLAG_POLL, &jme->flags))) | ||
1081 | jme_dynamic_pcc(jme); | ||
1082 | |||
1083 | jme_start_pcc_timer(jme); | ||
1084 | } | ||
1085 | |||
1086 | static inline void | ||
1087 | jme_polling_mode(struct jme_adapter *jme) | ||
1088 | { | ||
1089 | jme_set_rx_pcc(jme, PCC_OFF); | ||
1090 | } | ||
1091 | |||
1092 | static inline void | ||
1093 | jme_interrupt_mode(struct jme_adapter *jme) | ||
1094 | { | ||
1095 | jme_set_rx_pcc(jme, PCC_P1); | ||
1096 | } | ||
1097 | |||
1098 | static inline int | ||
1099 | jme_pseudo_hotplug_enabled(struct jme_adapter *jme) | ||
1100 | { | ||
1101 | u32 apmc; | ||
1102 | apmc = jread32(jme, JME_APMC); | ||
1103 | return apmc & JME_APMC_PSEUDO_HP_EN; | ||
1104 | } | ||
1105 | |||
1106 | static void | ||
1107 | jme_start_shutdown_timer(struct jme_adapter *jme) | ||
1108 | { | ||
1109 | u32 apmc; | ||
1110 | |||
1111 | apmc = jread32(jme, JME_APMC) | JME_APMC_PCIE_SD_EN; | ||
1112 | apmc &= ~JME_APMC_EPIEN_CTRL; | ||
1113 | if (!no_extplug) { | ||
1114 | jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_EN); | ||
1115 | wmb(); | ||
1116 | } | ||
1117 | jwrite32f(jme, JME_APMC, apmc); | ||
1118 | |||
1119 | jwrite32f(jme, JME_TIMER2, 0); | ||
1120 | set_bit(JME_FLAG_SHUTDOWN, &jme->flags); | ||
1121 | jwrite32(jme, JME_TMCSR, | ||
1122 | TMCSR_EN | ((0xFFFFFF - APMC_PHP_SHUTDOWN_DELAY) & TMCSR_CNT)); | ||
1123 | } | ||
1124 | |||
1125 | static void | ||
1126 | jme_stop_shutdown_timer(struct jme_adapter *jme) | ||
1127 | { | ||
1128 | u32 apmc; | ||
1129 | |||
1130 | jwrite32f(jme, JME_TMCSR, 0); | ||
1131 | jwrite32f(jme, JME_TIMER2, 0); | ||
1132 | clear_bit(JME_FLAG_SHUTDOWN, &jme->flags); | ||
1133 | |||
1134 | apmc = jread32(jme, JME_APMC); | ||
1135 | apmc &= ~(JME_APMC_PCIE_SD_EN | JME_APMC_EPIEN_CTRL); | ||
1136 | jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_DIS); | ||
1137 | wmb(); | ||
1138 | jwrite32f(jme, JME_APMC, apmc); | ||
1139 | } | ||
1140 | |||
1141 | static void | ||
1142 | jme_link_change_tasklet(unsigned long arg) | ||
1143 | { | ||
1144 | struct jme_adapter *jme = (struct jme_adapter *)arg; | ||
1145 | struct net_device *netdev = jme->dev; | ||
1146 | int rc; | ||
1147 | |||
1148 | while (!atomic_dec_and_test(&jme->link_changing)) { | ||
1149 | atomic_inc(&jme->link_changing); | ||
1150 | msg_intr(jme, "Get link change lock failed.\n"); | ||
1151 | while (atomic_read(&jme->link_changing) != 1) | ||
1152 | msg_intr(jme, "Waiting link change lock.\n"); | ||
1153 | } | ||
1154 | |||
1155 | if (jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu) | ||
1156 | goto out; | ||
1157 | |||
1158 | jme->old_mtu = netdev->mtu; | ||
1159 | netif_stop_queue(netdev); | ||
1160 | if (jme_pseudo_hotplug_enabled(jme)) | ||
1161 | jme_stop_shutdown_timer(jme); | ||
1162 | |||
1163 | jme_stop_pcc_timer(jme); | ||
1164 | tasklet_disable(&jme->txclean_task); | ||
1165 | tasklet_disable(&jme->rxclean_task); | ||
1166 | tasklet_disable(&jme->rxempty_task); | ||
1167 | |||
1168 | if (netif_carrier_ok(netdev)) { | ||
1169 | jme_reset_ghc_speed(jme); | ||
1170 | jme_disable_rx_engine(jme); | ||
1171 | jme_disable_tx_engine(jme); | ||
1172 | jme_reset_mac_processor(jme); | ||
1173 | jme_free_rx_resources(jme); | ||
1174 | jme_free_tx_resources(jme); | ||
1175 | |||
1176 | if (test_bit(JME_FLAG_POLL, &jme->flags)) | ||
1177 | jme_polling_mode(jme); | ||
1178 | |||
1179 | netif_carrier_off(netdev); | ||
1180 | } | ||
1181 | |||
1182 | jme_check_link(netdev, 0); | ||
1183 | if (netif_carrier_ok(netdev)) { | ||
1184 | rc = jme_setup_rx_resources(jme); | ||
1185 | if (rc) { | ||
1186 | jeprintk(jme->pdev, "Allocating resources for RX error" | ||
1187 | ", Device STOPPED!\n"); | ||
1188 | goto out_enable_tasklet; | ||
1189 | } | ||
1190 | |||
1191 | rc = jme_setup_tx_resources(jme); | ||
1192 | if (rc) { | ||
1193 | jeprintk(jme->pdev, "Allocating resources for TX error" | ||
1194 | ", Device STOPPED!\n"); | ||
1195 | goto err_out_free_rx_resources; | ||
1196 | } | ||
1197 | |||
1198 | jme_enable_rx_engine(jme); | ||
1199 | jme_enable_tx_engine(jme); | ||
1200 | |||
1201 | netif_start_queue(netdev); | ||
1202 | |||
1203 | if (test_bit(JME_FLAG_POLL, &jme->flags)) | ||
1204 | jme_interrupt_mode(jme); | ||
1205 | |||
1206 | jme_start_pcc_timer(jme); | ||
1207 | } else if (jme_pseudo_hotplug_enabled(jme)) { | ||
1208 | jme_start_shutdown_timer(jme); | ||
1209 | } | ||
1210 | |||
1211 | goto out_enable_tasklet; | ||
1212 | |||
1213 | err_out_free_rx_resources: | ||
1214 | jme_free_rx_resources(jme); | ||
1215 | out_enable_tasklet: | ||
1216 | tasklet_enable(&jme->txclean_task); | ||
1217 | tasklet_hi_enable(&jme->rxclean_task); | ||
1218 | tasklet_hi_enable(&jme->rxempty_task); | ||
1219 | out: | ||
1220 | atomic_inc(&jme->link_changing); | ||
1221 | } | ||
1222 | |||
1223 | static void | ||
1224 | jme_rx_clean_tasklet(unsigned long arg) | ||
1225 | { | ||
1226 | struct jme_adapter *jme = (struct jme_adapter *)arg; | ||
1227 | struct dynpcc_info *dpi = &(jme->dpi); | ||
1228 | |||
1229 | jme_process_receive(jme, jme->rx_ring_size); | ||
1230 | ++(dpi->intr_cnt); | ||
1231 | |||
1232 | } | ||
1233 | |||
1234 | static int | ||
1235 | jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget)) | ||
1236 | { | ||
1237 | struct jme_adapter *jme = jme_napi_priv(holder); | ||
1238 | struct net_device *netdev = jme->dev; | ||
1239 | int rest; | ||
1240 | |||
1241 | rest = jme_process_receive(jme, JME_NAPI_WEIGHT_VAL(budget)); | ||
1242 | |||
1243 | while (atomic_read(&jme->rx_empty) > 0) { | ||
1244 | atomic_dec(&jme->rx_empty); | ||
1245 | ++(NET_STAT(jme).rx_dropped); | ||
1246 | jme_restart_rx_engine(jme); | ||
1247 | } | ||
1248 | atomic_inc(&jme->rx_empty); | ||
1249 | |||
1250 | if (rest) { | ||
1251 | JME_RX_COMPLETE(netdev, holder); | ||
1252 | jme_interrupt_mode(jme); | ||
1253 | } | ||
1254 | |||
1255 | JME_NAPI_WEIGHT_SET(budget, rest); | ||
1256 | return JME_NAPI_WEIGHT_VAL(budget) - rest; | ||
1257 | } | ||
1258 | |||
1259 | static void | ||
1260 | jme_rx_empty_tasklet(unsigned long arg) | ||
1261 | { | ||
1262 | struct jme_adapter *jme = (struct jme_adapter *)arg; | ||
1263 | |||
1264 | if (unlikely(atomic_read(&jme->link_changing) != 1)) | ||
1265 | return; | ||
1266 | |||
1267 | if (unlikely(!netif_carrier_ok(jme->dev))) | ||
1268 | return; | ||
1269 | |||
1270 | msg_rx_status(jme, "RX Queue Full!\n"); | ||
1271 | |||
1272 | jme_rx_clean_tasklet(arg); | ||
1273 | |||
1274 | while (atomic_read(&jme->rx_empty) > 0) { | ||
1275 | atomic_dec(&jme->rx_empty); | ||
1276 | ++(NET_STAT(jme).rx_dropped); | ||
1277 | jme_restart_rx_engine(jme); | ||
1278 | } | ||
1279 | atomic_inc(&jme->rx_empty); | ||
1280 | } | ||
1281 | |||
1282 | static void | ||
1283 | jme_wake_queue_if_stopped(struct jme_adapter *jme) | ||
1284 | { | ||
1285 | struct jme_ring *txring = jme->txring; | ||
1286 | |||
1287 | smp_wmb(); | ||
1288 | if (unlikely(netif_queue_stopped(jme->dev) && | ||
1289 | atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) { | ||
1290 | msg_tx_done(jme, "TX Queue Waked.\n"); | ||
1291 | netif_wake_queue(jme->dev); | ||
1292 | } | ||
1293 | |||
1294 | } | ||
1295 | |||
1296 | static void | ||
1297 | jme_tx_clean_tasklet(unsigned long arg) | ||
1298 | { | ||
1299 | struct jme_adapter *jme = (struct jme_adapter *)arg; | ||
1300 | struct jme_ring *txring = &(jme->txring[0]); | ||
1301 | struct txdesc *txdesc = txring->desc; | ||
1302 | struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi; | ||
1303 | int i, j, cnt = 0, max, err, mask; | ||
1304 | |||
1305 | tx_dbg(jme, "Into txclean.\n"); | ||
1306 | |||
1307 | if (unlikely(!atomic_dec_and_test(&jme->tx_cleaning))) | ||
1308 | goto out; | ||
1309 | |||
1310 | if (unlikely(atomic_read(&jme->link_changing) != 1)) | ||
1311 | goto out; | ||
1312 | |||
1313 | if (unlikely(!netif_carrier_ok(jme->dev))) | ||
1314 | goto out; | ||
1315 | |||
1316 | max = jme->tx_ring_size - atomic_read(&txring->nr_free); | ||
1317 | mask = jme->tx_ring_mask; | ||
1318 | |||
1319 | for (i = atomic_read(&txring->next_to_clean) ; cnt < max ; ) { | ||
1320 | |||
1321 | ctxbi = txbi + i; | ||
1322 | |||
1323 | if (likely(ctxbi->skb && | ||
1324 | !(txdesc[i].descwb.flags & TXWBFLAG_OWN))) { | ||
1325 | |||
1326 | tx_dbg(jme, "txclean: %d+%d@%lu\n", | ||
1327 | i, ctxbi->nr_desc, jiffies); | ||
1328 | |||
1329 | err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR; | ||
1330 | |||
1331 | for (j = 1 ; j < ctxbi->nr_desc ; ++j) { | ||
1332 | ttxbi = txbi + ((i + j) & (mask)); | ||
1333 | txdesc[(i + j) & (mask)].dw[0] = 0; | ||
1334 | |||
1335 | pci_unmap_page(jme->pdev, | ||
1336 | ttxbi->mapping, | ||
1337 | ttxbi->len, | ||
1338 | PCI_DMA_TODEVICE); | ||
1339 | |||
1340 | ttxbi->mapping = 0; | ||
1341 | ttxbi->len = 0; | ||
1342 | } | ||
1343 | |||
1344 | dev_kfree_skb(ctxbi->skb); | ||
1345 | |||
1346 | cnt += ctxbi->nr_desc; | ||
1347 | |||
1348 | if (unlikely(err)) { | ||
1349 | ++(NET_STAT(jme).tx_carrier_errors); | ||
1350 | } else { | ||
1351 | ++(NET_STAT(jme).tx_packets); | ||
1352 | NET_STAT(jme).tx_bytes += ctxbi->len; | ||
1353 | } | ||
1354 | |||
1355 | ctxbi->skb = NULL; | ||
1356 | ctxbi->len = 0; | ||
1357 | ctxbi->start_xmit = 0; | ||
1358 | |||
1359 | } else { | ||
1360 | break; | ||
1361 | } | ||
1362 | |||
1363 | i = (i + ctxbi->nr_desc) & mask; | ||
1364 | |||
1365 | ctxbi->nr_desc = 0; | ||
1366 | } | ||
1367 | |||
1368 | tx_dbg(jme, "txclean: done %d@%lu.\n", i, jiffies); | ||
1369 | atomic_set(&txring->next_to_clean, i); | ||
1370 | atomic_add(cnt, &txring->nr_free); | ||
1371 | |||
1372 | jme_wake_queue_if_stopped(jme); | ||
1373 | |||
1374 | out: | ||
1375 | atomic_inc(&jme->tx_cleaning); | ||
1376 | } | ||
1377 | |||
1378 | static void | ||
1379 | jme_intr_msi(struct jme_adapter *jme, u32 intrstat) | ||
1380 | { | ||
1381 | /* | ||
1382 | * Disable interrupt | ||
1383 | */ | ||
1384 | jwrite32f(jme, JME_IENC, INTR_ENABLE); | ||
1385 | |||
1386 | if (intrstat & (INTR_LINKCH | INTR_SWINTR)) { | ||
1387 | /* | ||
1388 | * Link change event is critical | ||
1389 | * all other events are ignored | ||
1390 | */ | ||
1391 | jwrite32(jme, JME_IEVE, intrstat); | ||
1392 | tasklet_schedule(&jme->linkch_task); | ||
1393 | goto out_reenable; | ||
1394 | } | ||
1395 | |||
1396 | if (intrstat & INTR_TMINTR) { | ||
1397 | jwrite32(jme, JME_IEVE, INTR_TMINTR); | ||
1398 | tasklet_schedule(&jme->pcc_task); | ||
1399 | } | ||
1400 | |||
1401 | if (intrstat & (INTR_PCCTXTO | INTR_PCCTX)) { | ||
1402 | jwrite32(jme, JME_IEVE, INTR_PCCTXTO | INTR_PCCTX | INTR_TX0); | ||
1403 | tasklet_schedule(&jme->txclean_task); | ||
1404 | } | ||
1405 | |||
1406 | if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) { | ||
1407 | jwrite32(jme, JME_IEVE, (intrstat & (INTR_PCCRX0TO | | ||
1408 | INTR_PCCRX0 | | ||
1409 | INTR_RX0EMP)) | | ||
1410 | INTR_RX0); | ||
1411 | } | ||
1412 | |||
1413 | if (test_bit(JME_FLAG_POLL, &jme->flags)) { | ||
1414 | if (intrstat & INTR_RX0EMP) | ||
1415 | atomic_inc(&jme->rx_empty); | ||
1416 | |||
1417 | if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) { | ||
1418 | if (likely(JME_RX_SCHEDULE_PREP(jme))) { | ||
1419 | jme_polling_mode(jme); | ||
1420 | JME_RX_SCHEDULE(jme); | ||
1421 | } | ||
1422 | } | ||
1423 | } else { | ||
1424 | if (intrstat & INTR_RX0EMP) { | ||
1425 | atomic_inc(&jme->rx_empty); | ||
1426 | tasklet_hi_schedule(&jme->rxempty_task); | ||
1427 | } else if (intrstat & (INTR_PCCRX0TO | INTR_PCCRX0)) { | ||
1428 | tasklet_hi_schedule(&jme->rxclean_task); | ||
1429 | } | ||
1430 | } | ||
1431 | |||
1432 | out_reenable: | ||
1433 | /* | ||
1434 | * Re-enable interrupt | ||
1435 | */ | ||
1436 | jwrite32f(jme, JME_IENS, INTR_ENABLE); | ||
1437 | } | ||
1438 | |||
1439 | static irqreturn_t | ||
1440 | jme_intr(int irq, void *dev_id) | ||
1441 | { | ||
1442 | struct net_device *netdev = dev_id; | ||
1443 | struct jme_adapter *jme = netdev_priv(netdev); | ||
1444 | u32 intrstat; | ||
1445 | |||
1446 | intrstat = jread32(jme, JME_IEVE); | ||
1447 | |||
1448 | /* | ||
1449 | * Check if it's really an interrupt for us | ||
1450 | */ | ||
1451 | if (unlikely(intrstat == 0)) | ||
1452 | return IRQ_NONE; | ||
1453 | |||
1454 | /* | ||
1455 | * Check if the device still exist | ||
1456 | */ | ||
1457 | if (unlikely(intrstat == ~((typeof(intrstat))0))) | ||
1458 | return IRQ_NONE; | ||
1459 | |||
1460 | jme_intr_msi(jme, intrstat); | ||
1461 | |||
1462 | return IRQ_HANDLED; | ||
1463 | } | ||
1464 | |||
1465 | static irqreturn_t | ||
1466 | jme_msi(int irq, void *dev_id) | ||
1467 | { | ||
1468 | struct net_device *netdev = dev_id; | ||
1469 | struct jme_adapter *jme = netdev_priv(netdev); | ||
1470 | u32 intrstat; | ||
1471 | |||
1472 | pci_dma_sync_single_for_cpu(jme->pdev, | ||
1473 | jme->shadow_dma, | ||
1474 | sizeof(u32) * SHADOW_REG_NR, | ||
1475 | PCI_DMA_FROMDEVICE); | ||
1476 | intrstat = jme->shadow_regs[SHADOW_IEVE]; | ||
1477 | jme->shadow_regs[SHADOW_IEVE] = 0; | ||
1478 | |||
1479 | jme_intr_msi(jme, intrstat); | ||
1480 | |||
1481 | return IRQ_HANDLED; | ||
1482 | } | ||
1483 | |||
1484 | static void | ||
1485 | jme_reset_link(struct jme_adapter *jme) | ||
1486 | { | ||
1487 | jwrite32(jme, JME_TMCSR, TMCSR_SWIT); | ||
1488 | } | ||
1489 | |||
1490 | static void | ||
1491 | jme_restart_an(struct jme_adapter *jme) | ||
1492 | { | ||
1493 | u32 bmcr; | ||
1494 | |||
1495 | spin_lock_bh(&jme->phy_lock); | ||
1496 | bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); | ||
1497 | bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); | ||
1498 | jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr); | ||
1499 | spin_unlock_bh(&jme->phy_lock); | ||
1500 | } | ||
1501 | |||
1502 | static int | ||
1503 | jme_request_irq(struct jme_adapter *jme) | ||
1504 | { | ||
1505 | int rc; | ||
1506 | struct net_device *netdev = jme->dev; | ||
1507 | irq_handler_t handler = jme_intr; | ||
1508 | int irq_flags = IRQF_SHARED; | ||
1509 | |||
1510 | if (!pci_enable_msi(jme->pdev)) { | ||
1511 | set_bit(JME_FLAG_MSI, &jme->flags); | ||
1512 | handler = jme_msi; | ||
1513 | irq_flags = 0; | ||
1514 | } | ||
1515 | |||
1516 | rc = request_irq(jme->pdev->irq, handler, irq_flags, netdev->name, | ||
1517 | netdev); | ||
1518 | if (rc) { | ||
1519 | jeprintk(jme->pdev, | ||
1520 | "Unable to request %s interrupt (return: %d)\n", | ||
1521 | test_bit(JME_FLAG_MSI, &jme->flags) ? "MSI" : "INTx", | ||
1522 | rc); | ||
1523 | |||
1524 | if (test_bit(JME_FLAG_MSI, &jme->flags)) { | ||
1525 | pci_disable_msi(jme->pdev); | ||
1526 | clear_bit(JME_FLAG_MSI, &jme->flags); | ||
1527 | } | ||
1528 | } else { | ||
1529 | netdev->irq = jme->pdev->irq; | ||
1530 | } | ||
1531 | |||
1532 | return rc; | ||
1533 | } | ||
1534 | |||
1535 | static void | ||
1536 | jme_free_irq(struct jme_adapter *jme) | ||
1537 | { | ||
1538 | free_irq(jme->pdev->irq, jme->dev); | ||
1539 | if (test_bit(JME_FLAG_MSI, &jme->flags)) { | ||
1540 | pci_disable_msi(jme->pdev); | ||
1541 | clear_bit(JME_FLAG_MSI, &jme->flags); | ||
1542 | jme->dev->irq = jme->pdev->irq; | ||
1543 | } | ||
1544 | } | ||
1545 | |||
1546 | static int | ||
1547 | jme_open(struct net_device *netdev) | ||
1548 | { | ||
1549 | struct jme_adapter *jme = netdev_priv(netdev); | ||
1550 | int rc; | ||
1551 | |||
1552 | jme_clear_pm(jme); | ||
1553 | JME_NAPI_ENABLE(jme); | ||
1554 | |||
1555 | tasklet_enable(&jme->txclean_task); | ||
1556 | tasklet_hi_enable(&jme->rxclean_task); | ||
1557 | tasklet_hi_enable(&jme->rxempty_task); | ||
1558 | |||
1559 | rc = jme_request_irq(jme); | ||
1560 | if (rc) | ||
1561 | goto err_out; | ||
1562 | |||
1563 | jme_enable_shadow(jme); | ||
1564 | jme_start_irq(jme); | ||
1565 | |||
1566 | if (test_bit(JME_FLAG_SSET, &jme->flags)) | ||
1567 | jme_set_settings(netdev, &jme->old_ecmd); | ||
1568 | else | ||
1569 | jme_reset_phy_processor(jme); | ||
1570 | |||
1571 | jme_reset_link(jme); | ||
1572 | |||
1573 | return 0; | ||
1574 | |||
1575 | err_out: | ||
1576 | netif_stop_queue(netdev); | ||
1577 | netif_carrier_off(netdev); | ||
1578 | return rc; | ||
1579 | } | ||
1580 | |||
1581 | static void | ||
1582 | jme_set_100m_half(struct jme_adapter *jme) | ||
1583 | { | ||
1584 | u32 bmcr, tmp; | ||
1585 | |||
1586 | bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); | ||
1587 | tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | | ||
1588 | BMCR_SPEED1000 | BMCR_FULLDPLX); | ||
1589 | tmp |= BMCR_SPEED100; | ||
1590 | |||
1591 | if (bmcr != tmp) | ||
1592 | jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, tmp); | ||
1593 | |||
1594 | if (jme->fpgaver) | ||
1595 | jwrite32(jme, JME_GHC, GHC_SPEED_100M | GHC_LINK_POLL); | ||
1596 | else | ||
1597 | jwrite32(jme, JME_GHC, GHC_SPEED_100M); | ||
1598 | } | ||
1599 | |||
1600 | #define JME_WAIT_LINK_TIME 2000 /* 2000ms */ | ||
1601 | static void | ||
1602 | jme_wait_link(struct jme_adapter *jme) | ||
1603 | { | ||
1604 | u32 phylink, to = JME_WAIT_LINK_TIME; | ||
1605 | |||
1606 | mdelay(1000); | ||
1607 | phylink = jme_linkstat_from_phy(jme); | ||
1608 | while (!(phylink & PHY_LINK_UP) && (to -= 10) > 0) { | ||
1609 | mdelay(10); | ||
1610 | phylink = jme_linkstat_from_phy(jme); | ||
1611 | } | ||
1612 | } | ||
1613 | |||
1614 | static inline void | ||
1615 | jme_phy_off(struct jme_adapter *jme) | ||
1616 | { | ||
1617 | jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN); | ||
1618 | } | ||
1619 | |||
1620 | static int | ||
1621 | jme_close(struct net_device *netdev) | ||
1622 | { | ||
1623 | struct jme_adapter *jme = netdev_priv(netdev); | ||
1624 | |||
1625 | netif_stop_queue(netdev); | ||
1626 | netif_carrier_off(netdev); | ||
1627 | |||
1628 | jme_stop_irq(jme); | ||
1629 | jme_disable_shadow(jme); | ||
1630 | jme_free_irq(jme); | ||
1631 | |||
1632 | JME_NAPI_DISABLE(jme); | ||
1633 | |||
1634 | tasklet_kill(&jme->linkch_task); | ||
1635 | tasklet_kill(&jme->txclean_task); | ||
1636 | tasklet_kill(&jme->rxclean_task); | ||
1637 | tasklet_kill(&jme->rxempty_task); | ||
1638 | |||
1639 | jme_reset_ghc_speed(jme); | ||
1640 | jme_disable_rx_engine(jme); | ||
1641 | jme_disable_tx_engine(jme); | ||
1642 | jme_reset_mac_processor(jme); | ||
1643 | jme_free_rx_resources(jme); | ||
1644 | jme_free_tx_resources(jme); | ||
1645 | jme->phylink = 0; | ||
1646 | jme_phy_off(jme); | ||
1647 | |||
1648 | return 0; | ||
1649 | } | ||
1650 | |||
1651 | static int | ||
1652 | jme_alloc_txdesc(struct jme_adapter *jme, | ||
1653 | struct sk_buff *skb) | ||
1654 | { | ||
1655 | struct jme_ring *txring = jme->txring; | ||
1656 | int idx, nr_alloc, mask = jme->tx_ring_mask; | ||
1657 | |||
1658 | idx = txring->next_to_use; | ||
1659 | nr_alloc = skb_shinfo(skb)->nr_frags + 2; | ||
1660 | |||
1661 | if (unlikely(atomic_read(&txring->nr_free) < nr_alloc)) | ||
1662 | return -1; | ||
1663 | |||
1664 | atomic_sub(nr_alloc, &txring->nr_free); | ||
1665 | |||
1666 | txring->next_to_use = (txring->next_to_use + nr_alloc) & mask; | ||
1667 | |||
1668 | return idx; | ||
1669 | } | ||
1670 | |||
1671 | static void | ||
1672 | jme_fill_tx_map(struct pci_dev *pdev, | ||
1673 | struct txdesc *txdesc, | ||
1674 | struct jme_buffer_info *txbi, | ||
1675 | struct page *page, | ||
1676 | u32 page_offset, | ||
1677 | u32 len, | ||
1678 | u8 hidma) | ||
1679 | { | ||
1680 | dma_addr_t dmaaddr; | ||
1681 | |||
1682 | dmaaddr = pci_map_page(pdev, | ||
1683 | page, | ||
1684 | page_offset, | ||
1685 | len, | ||
1686 | PCI_DMA_TODEVICE); | ||
1687 | |||
1688 | pci_dma_sync_single_for_device(pdev, | ||
1689 | dmaaddr, | ||
1690 | len, | ||
1691 | PCI_DMA_TODEVICE); | ||
1692 | |||
1693 | txdesc->dw[0] = 0; | ||
1694 | txdesc->dw[1] = 0; | ||
1695 | txdesc->desc2.flags = TXFLAG_OWN; | ||
1696 | txdesc->desc2.flags |= (hidma) ? TXFLAG_64BIT : 0; | ||
1697 | txdesc->desc2.datalen = cpu_to_le16(len); | ||
1698 | txdesc->desc2.bufaddrh = cpu_to_le32((__u64)dmaaddr >> 32); | ||
1699 | txdesc->desc2.bufaddrl = cpu_to_le32( | ||
1700 | (__u64)dmaaddr & 0xFFFFFFFFUL); | ||
1701 | |||
1702 | txbi->mapping = dmaaddr; | ||
1703 | txbi->len = len; | ||
1704 | } | ||
1705 | |||
1706 | static void | ||
1707 | jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) | ||
1708 | { | ||
1709 | struct jme_ring *txring = jme->txring; | ||
1710 | struct txdesc *txdesc = txring->desc, *ctxdesc; | ||
1711 | struct jme_buffer_info *txbi = txring->bufinf, *ctxbi; | ||
1712 | u8 hidma = jme->dev->features & NETIF_F_HIGHDMA; | ||
1713 | int i, nr_frags = skb_shinfo(skb)->nr_frags; | ||
1714 | int mask = jme->tx_ring_mask; | ||
1715 | struct skb_frag_struct *frag; | ||
1716 | u32 len; | ||
1717 | |||
1718 | for (i = 0 ; i < nr_frags ; ++i) { | ||
1719 | frag = &skb_shinfo(skb)->frags[i]; | ||
1720 | ctxdesc = txdesc + ((idx + i + 2) & (mask)); | ||
1721 | ctxbi = txbi + ((idx + i + 2) & (mask)); | ||
1722 | |||
1723 | jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, frag->page, | ||
1724 | frag->page_offset, frag->size, hidma); | ||
1725 | } | ||
1726 | |||
1727 | len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; | ||
1728 | ctxdesc = txdesc + ((idx + 1) & (mask)); | ||
1729 | ctxbi = txbi + ((idx + 1) & (mask)); | ||
1730 | jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data), | ||
1731 | offset_in_page(skb->data), len, hidma); | ||
1732 | |||
1733 | } | ||
1734 | |||
1735 | static int | ||
1736 | jme_expand_header(struct jme_adapter *jme, struct sk_buff *skb) | ||
1737 | { | ||
1738 | if (unlikely(skb_shinfo(skb)->gso_size && | ||
1739 | skb_header_cloned(skb) && | ||
1740 | pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) { | ||
1741 | dev_kfree_skb(skb); | ||
1742 | return -1; | ||
1743 | } | ||
1744 | |||
1745 | return 0; | ||
1746 | } | ||
1747 | |||
1748 | static int | ||
1749 | jme_tx_tso(struct sk_buff *skb, | ||
1750 | u16 *mss, u8 *flags) | ||
1751 | { | ||
1752 | *mss = skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT; | ||
1753 | if (*mss) { | ||
1754 | *flags |= TXFLAG_LSEN; | ||
1755 | |||
1756 | if (skb->protocol == htons(ETH_P_IP)) { | ||
1757 | struct iphdr *iph = ip_hdr(skb); | ||
1758 | |||
1759 | iph->check = 0; | ||
1760 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, | ||
1761 | iph->daddr, 0, | ||
1762 | IPPROTO_TCP, | ||
1763 | 0); | ||
1764 | } else { | ||
1765 | struct ipv6hdr *ip6h = ipv6_hdr(skb); | ||
1766 | |||
1767 | tcp_hdr(skb)->check = ~csum_ipv6_magic(&ip6h->saddr, | ||
1768 | &ip6h->daddr, 0, | ||
1769 | IPPROTO_TCP, | ||
1770 | 0); | ||
1771 | } | ||
1772 | |||
1773 | return 0; | ||
1774 | } | ||
1775 | |||
1776 | return 1; | ||
1777 | } | ||
1778 | |||
1779 | static void | ||
1780 | jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags) | ||
1781 | { | ||
1782 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
1783 | u8 ip_proto; | ||
1784 | |||
1785 | switch (skb->protocol) { | ||
1786 | case htons(ETH_P_IP): | ||
1787 | ip_proto = ip_hdr(skb)->protocol; | ||
1788 | break; | ||
1789 | case htons(ETH_P_IPV6): | ||
1790 | ip_proto = ipv6_hdr(skb)->nexthdr; | ||
1791 | break; | ||
1792 | default: | ||
1793 | ip_proto = 0; | ||
1794 | break; | ||
1795 | } | ||
1796 | |||
1797 | switch (ip_proto) { | ||
1798 | case IPPROTO_TCP: | ||
1799 | *flags |= TXFLAG_TCPCS; | ||
1800 | break; | ||
1801 | case IPPROTO_UDP: | ||
1802 | *flags |= TXFLAG_UDPCS; | ||
1803 | break; | ||
1804 | default: | ||
1805 | msg_tx_err(jme, "Error upper layer protocol.\n"); | ||
1806 | break; | ||
1807 | } | ||
1808 | } | ||
1809 | } | ||
1810 | |||
1811 | static inline void | ||
1812 | jme_tx_vlan(struct sk_buff *skb, u16 *vlan, u8 *flags) | ||
1813 | { | ||
1814 | if (vlan_tx_tag_present(skb)) { | ||
1815 | *flags |= TXFLAG_TAGON; | ||
1816 | *vlan = vlan_tx_tag_get(skb); | ||
1817 | } | ||
1818 | } | ||
1819 | |||
1820 | static int | ||
1821 | jme_fill_first_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx) | ||
1822 | { | ||
1823 | struct jme_ring *txring = jme->txring; | ||
1824 | struct txdesc *txdesc; | ||
1825 | struct jme_buffer_info *txbi; | ||
1826 | u8 flags; | ||
1827 | |||
1828 | txdesc = (struct txdesc *)txring->desc + idx; | ||
1829 | txbi = txring->bufinf + idx; | ||
1830 | |||
1831 | txdesc->dw[0] = 0; | ||
1832 | txdesc->dw[1] = 0; | ||
1833 | txdesc->dw[2] = 0; | ||
1834 | txdesc->dw[3] = 0; | ||
1835 | txdesc->desc1.pktsize = cpu_to_le16(skb->len); | ||
1836 | /* | ||
1837 | * Set OWN bit at final. | ||
1838 | * When kernel transmit faster than NIC. | ||
1839 | * And NIC trying to send this descriptor before we tell | ||
1840 | * it to start sending this TX queue. | ||
1841 | * Other fields are already filled correctly. | ||
1842 | */ | ||
1843 | wmb(); | ||
1844 | flags = TXFLAG_OWN | TXFLAG_INT; | ||
1845 | /* | ||
1846 | * Set checksum flags while not tso | ||
1847 | */ | ||
1848 | if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags)) | ||
1849 | jme_tx_csum(jme, skb, &flags); | ||
1850 | jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags); | ||
1851 | txdesc->desc1.flags = flags; | ||
1852 | /* | ||
1853 | * Set tx buffer info after telling NIC to send | ||
1854 | * For better tx_clean timing | ||
1855 | */ | ||
1856 | wmb(); | ||
1857 | txbi->nr_desc = skb_shinfo(skb)->nr_frags + 2; | ||
1858 | txbi->skb = skb; | ||
1859 | txbi->len = skb->len; | ||
1860 | txbi->start_xmit = jiffies; | ||
1861 | if (!txbi->start_xmit) | ||
1862 | txbi->start_xmit = (0UL-1); | ||
1863 | |||
1864 | return 0; | ||
1865 | } | ||
1866 | |||
1867 | static void | ||
1868 | jme_stop_queue_if_full(struct jme_adapter *jme) | ||
1869 | { | ||
1870 | struct jme_ring *txring = jme->txring; | ||
1871 | struct jme_buffer_info *txbi = txring->bufinf; | ||
1872 | int idx = atomic_read(&txring->next_to_clean); | ||
1873 | |||
1874 | txbi += idx; | ||
1875 | |||
1876 | smp_wmb(); | ||
1877 | if (unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) { | ||
1878 | netif_stop_queue(jme->dev); | ||
1879 | msg_tx_queued(jme, "TX Queue Paused.\n"); | ||
1880 | smp_wmb(); | ||
1881 | if (atomic_read(&txring->nr_free) | ||
1882 | >= (jme->tx_wake_threshold)) { | ||
1883 | netif_wake_queue(jme->dev); | ||
1884 | msg_tx_queued(jme, "TX Queue Fast Waked.\n"); | ||
1885 | } | ||
1886 | } | ||
1887 | |||
1888 | if (unlikely(txbi->start_xmit && | ||
1889 | (jiffies - txbi->start_xmit) >= TX_TIMEOUT && | ||
1890 | txbi->skb)) { | ||
1891 | netif_stop_queue(jme->dev); | ||
1892 | msg_tx_queued(jme, "TX Queue Stopped %d@%lu.\n", idx, jiffies); | ||
1893 | } | ||
1894 | } | ||
1895 | |||
1896 | /* | ||
1897 | * This function is already protected by netif_tx_lock() | ||
1898 | */ | ||
1899 | |||
1900 | static int | ||
1901 | jme_start_xmit(struct sk_buff *skb, struct net_device *netdev) | ||
1902 | { | ||
1903 | struct jme_adapter *jme = netdev_priv(netdev); | ||
1904 | int idx; | ||
1905 | |||
1906 | if (unlikely(jme_expand_header(jme, skb))) { | ||
1907 | ++(NET_STAT(jme).tx_dropped); | ||
1908 | return NETDEV_TX_OK; | ||
1909 | } | ||
1910 | |||
1911 | idx = jme_alloc_txdesc(jme, skb); | ||
1912 | |||
1913 | if (unlikely(idx < 0)) { | ||
1914 | netif_stop_queue(netdev); | ||
1915 | msg_tx_err(jme, "BUG! Tx ring full when queue awake!\n"); | ||
1916 | |||
1917 | return NETDEV_TX_BUSY; | ||
1918 | } | ||
1919 | |||
1920 | jme_map_tx_skb(jme, skb, idx); | ||
1921 | jme_fill_first_tx_desc(jme, skb, idx); | ||
1922 | |||
1923 | jwrite32(jme, JME_TXCS, jme->reg_txcs | | ||
1924 | TXCS_SELECT_QUEUE0 | | ||
1925 | TXCS_QUEUE0S | | ||
1926 | TXCS_ENABLE); | ||
1927 | netdev->trans_start = jiffies; | ||
1928 | |||
1929 | tx_dbg(jme, "xmit: %d+%d@%lu\n", idx, | ||
1930 | skb_shinfo(skb)->nr_frags + 2, | ||
1931 | jiffies); | ||
1932 | jme_stop_queue_if_full(jme); | ||
1933 | |||
1934 | return NETDEV_TX_OK; | ||
1935 | } | ||
1936 | |||
1937 | static int | ||
1938 | jme_set_macaddr(struct net_device *netdev, void *p) | ||
1939 | { | ||
1940 | struct jme_adapter *jme = netdev_priv(netdev); | ||
1941 | struct sockaddr *addr = p; | ||
1942 | u32 val; | ||
1943 | |||
1944 | if (netif_running(netdev)) | ||
1945 | return -EBUSY; | ||
1946 | |||
1947 | spin_lock_bh(&jme->macaddr_lock); | ||
1948 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | ||
1949 | |||
1950 | val = (addr->sa_data[3] & 0xff) << 24 | | ||
1951 | (addr->sa_data[2] & 0xff) << 16 | | ||
1952 | (addr->sa_data[1] & 0xff) << 8 | | ||
1953 | (addr->sa_data[0] & 0xff); | ||
1954 | jwrite32(jme, JME_RXUMA_LO, val); | ||
1955 | val = (addr->sa_data[5] & 0xff) << 8 | | ||
1956 | (addr->sa_data[4] & 0xff); | ||
1957 | jwrite32(jme, JME_RXUMA_HI, val); | ||
1958 | spin_unlock_bh(&jme->macaddr_lock); | ||
1959 | |||
1960 | return 0; | ||
1961 | } | ||
1962 | |||
1963 | static void | ||
1964 | jme_set_multi(struct net_device *netdev) | ||
1965 | { | ||
1966 | struct jme_adapter *jme = netdev_priv(netdev); | ||
1967 | u32 mc_hash[2] = {}; | ||
1968 | int i; | ||
1969 | |||
1970 | spin_lock_bh(&jme->rxmcs_lock); | ||
1971 | |||
1972 | jme->reg_rxmcs |= RXMCS_BRDFRAME | RXMCS_UNIFRAME; | ||
1973 | |||
1974 | if (netdev->flags & IFF_PROMISC) { | ||
1975 | jme->reg_rxmcs |= RXMCS_ALLFRAME; | ||
1976 | } else if (netdev->flags & IFF_ALLMULTI) { | ||
1977 | jme->reg_rxmcs |= RXMCS_ALLMULFRAME; | ||
1978 | } else if (netdev->flags & IFF_MULTICAST) { | ||
1979 | struct dev_mc_list *mclist; | ||
1980 | int bit_nr; | ||
1981 | |||
1982 | jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED; | ||
1983 | for (i = 0, mclist = netdev->mc_list; | ||
1984 | mclist && i < netdev->mc_count; | ||
1985 | ++i, mclist = mclist->next) { | ||
1986 | |||
1987 | bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F; | ||
1988 | mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F); | ||
1989 | } | ||
1990 | |||
1991 | jwrite32(jme, JME_RXMCHT_LO, mc_hash[0]); | ||
1992 | jwrite32(jme, JME_RXMCHT_HI, mc_hash[1]); | ||
1993 | } | ||
1994 | |||
1995 | wmb(); | ||
1996 | jwrite32(jme, JME_RXMCS, jme->reg_rxmcs); | ||
1997 | |||
1998 | spin_unlock_bh(&jme->rxmcs_lock); | ||
1999 | } | ||
2000 | |||
2001 | static int | ||
2002 | jme_change_mtu(struct net_device *netdev, int new_mtu) | ||
2003 | { | ||
2004 | struct jme_adapter *jme = netdev_priv(netdev); | ||
2005 | |||
2006 | if (new_mtu == jme->old_mtu) | ||
2007 | return 0; | ||
2008 | |||
2009 | if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) || | ||
2010 | ((new_mtu) < IPV6_MIN_MTU)) | ||
2011 | return -EINVAL; | ||
2012 | |||
2013 | if (new_mtu > 4000) { | ||
2014 | jme->reg_rxcs &= ~RXCS_FIFOTHNP; | ||
2015 | jme->reg_rxcs |= RXCS_FIFOTHNP_64QW; | ||
2016 | jme_restart_rx_engine(jme); | ||
2017 | } else { | ||
2018 | jme->reg_rxcs &= ~RXCS_FIFOTHNP; | ||
2019 | jme->reg_rxcs |= RXCS_FIFOTHNP_128QW; | ||
2020 | jme_restart_rx_engine(jme); | ||
2021 | } | ||
2022 | |||
2023 | if (new_mtu > 1900) { | ||
2024 | netdev->features &= ~(NETIF_F_HW_CSUM | | ||
2025 | NETIF_F_TSO | | ||
2026 | NETIF_F_TSO6); | ||
2027 | } else { | ||
2028 | if (test_bit(JME_FLAG_TXCSUM, &jme->flags)) | ||
2029 | netdev->features |= NETIF_F_HW_CSUM; | ||
2030 | if (test_bit(JME_FLAG_TSO, &jme->flags)) | ||
2031 | netdev->features |= NETIF_F_TSO | NETIF_F_TSO6; | ||
2032 | } | ||
2033 | |||
2034 | netdev->mtu = new_mtu; | ||
2035 | jme_reset_link(jme); | ||
2036 | |||
2037 | return 0; | ||
2038 | } | ||
2039 | |||
2040 | static void | ||
2041 | jme_tx_timeout(struct net_device *netdev) | ||
2042 | { | ||
2043 | struct jme_adapter *jme = netdev_priv(netdev); | ||
2044 | |||
2045 | jme->phylink = 0; | ||
2046 | jme_reset_phy_processor(jme); | ||
2047 | if (test_bit(JME_FLAG_SSET, &jme->flags)) | ||
2048 | jme_set_settings(netdev, &jme->old_ecmd); | ||
2049 | |||
2050 | /* | ||
2051 | * Force to Reset the link again | ||
2052 | */ | ||
2053 | jme_reset_link(jme); | ||
2054 | } | ||
2055 | |||
2056 | static void | ||
2057 | jme_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) | ||
2058 | { | ||
2059 | struct jme_adapter *jme = netdev_priv(netdev); | ||
2060 | |||
2061 | jme->vlgrp = grp; | ||
2062 | } | ||
2063 | |||
2064 | static void | ||
2065 | jme_get_drvinfo(struct net_device *netdev, | ||
2066 | struct ethtool_drvinfo *info) | ||
2067 | { | ||
2068 | struct jme_adapter *jme = netdev_priv(netdev); | ||
2069 | |||
2070 | strcpy(info->driver, DRV_NAME); | ||
2071 | strcpy(info->version, DRV_VERSION); | ||
2072 | strcpy(info->bus_info, pci_name(jme->pdev)); | ||
2073 | } | ||
2074 | |||
2075 | static int | ||
2076 | jme_get_regs_len(struct net_device *netdev) | ||
2077 | { | ||
2078 | return JME_REG_LEN; | ||
2079 | } | ||
2080 | |||
2081 | static void | ||
2082 | mmapio_memcpy(struct jme_adapter *jme, u32 *p, u32 reg, int len) | ||
2083 | { | ||
2084 | int i; | ||
2085 | |||
2086 | for (i = 0 ; i < len ; i += 4) | ||
2087 | p[i >> 2] = jread32(jme, reg + i); | ||
2088 | } | ||
2089 | |||
2090 | static void | ||
2091 | mdio_memcpy(struct jme_adapter *jme, u32 *p, int reg_nr) | ||
2092 | { | ||
2093 | int i; | ||
2094 | u16 *p16 = (u16 *)p; | ||
2095 | |||
2096 | for (i = 0 ; i < reg_nr ; ++i) | ||
2097 | p16[i] = jme_mdio_read(jme->dev, jme->mii_if.phy_id, i); | ||
2098 | } | ||
2099 | |||
2100 | static void | ||
2101 | jme_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) | ||
2102 | { | ||
2103 | struct jme_adapter *jme = netdev_priv(netdev); | ||
2104 | u32 *p32 = (u32 *)p; | ||
2105 | |||
2106 | memset(p, 0xFF, JME_REG_LEN); | ||
2107 | |||
2108 | regs->version = 1; | ||
2109 | mmapio_memcpy(jme, p32, JME_MAC, JME_MAC_LEN); | ||
2110 | |||
2111 | p32 += 0x100 >> 2; | ||
2112 | mmapio_memcpy(jme, p32, JME_PHY, JME_PHY_LEN); | ||
2113 | |||
2114 | p32 += 0x100 >> 2; | ||
2115 | mmapio_memcpy(jme, p32, JME_MISC, JME_MISC_LEN); | ||
2116 | |||
2117 | p32 += 0x100 >> 2; | ||
2118 | mmapio_memcpy(jme, p32, JME_RSS, JME_RSS_LEN); | ||
2119 | |||
2120 | p32 += 0x100 >> 2; | ||
2121 | mdio_memcpy(jme, p32, JME_PHY_REG_NR); | ||
2122 | } | ||
2123 | |||
2124 | static int | ||
2125 | jme_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd) | ||
2126 | { | ||
2127 | struct jme_adapter *jme = netdev_priv(netdev); | ||
2128 | |||
2129 | ecmd->tx_coalesce_usecs = PCC_TX_TO; | ||
2130 | ecmd->tx_max_coalesced_frames = PCC_TX_CNT; | ||
2131 | |||
2132 | if (test_bit(JME_FLAG_POLL, &jme->flags)) { | ||
2133 | ecmd->use_adaptive_rx_coalesce = false; | ||
2134 | ecmd->rx_coalesce_usecs = 0; | ||
2135 | ecmd->rx_max_coalesced_frames = 0; | ||
2136 | return 0; | ||
2137 | } | ||
2138 | |||
2139 | ecmd->use_adaptive_rx_coalesce = true; | ||
2140 | |||
2141 | switch (jme->dpi.cur) { | ||
2142 | case PCC_P1: | ||
2143 | ecmd->rx_coalesce_usecs = PCC_P1_TO; | ||
2144 | ecmd->rx_max_coalesced_frames = PCC_P1_CNT; | ||
2145 | break; | ||
2146 | case PCC_P2: | ||
2147 | ecmd->rx_coalesce_usecs = PCC_P2_TO; | ||
2148 | ecmd->rx_max_coalesced_frames = PCC_P2_CNT; | ||
2149 | break; | ||
2150 | case PCC_P3: | ||
2151 | ecmd->rx_coalesce_usecs = PCC_P3_TO; | ||
2152 | ecmd->rx_max_coalesced_frames = PCC_P3_CNT; | ||
2153 | break; | ||
2154 | default: | ||
2155 | break; | ||
2156 | } | ||
2157 | |||
2158 | return 0; | ||
2159 | } | ||
2160 | |||
2161 | static int | ||
2162 | jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd) | ||
2163 | { | ||
2164 | struct jme_adapter *jme = netdev_priv(netdev); | ||
2165 | struct dynpcc_info *dpi = &(jme->dpi); | ||
2166 | |||
2167 | if (netif_running(netdev)) | ||
2168 | return -EBUSY; | ||
2169 | |||
2170 | if (ecmd->use_adaptive_rx_coalesce | ||
2171 | && test_bit(JME_FLAG_POLL, &jme->flags)) { | ||
2172 | clear_bit(JME_FLAG_POLL, &jme->flags); | ||
2173 | jme->jme_rx = netif_rx; | ||
2174 | jme->jme_vlan_rx = vlan_hwaccel_rx; | ||
2175 | dpi->cur = PCC_P1; | ||
2176 | dpi->attempt = PCC_P1; | ||
2177 | dpi->cnt = 0; | ||
2178 | jme_set_rx_pcc(jme, PCC_P1); | ||
2179 | jme_interrupt_mode(jme); | ||
2180 | } else if (!(ecmd->use_adaptive_rx_coalesce) | ||
2181 | && !(test_bit(JME_FLAG_POLL, &jme->flags))) { | ||
2182 | set_bit(JME_FLAG_POLL, &jme->flags); | ||
2183 | jme->jme_rx = netif_receive_skb; | ||
2184 | jme->jme_vlan_rx = vlan_hwaccel_receive_skb; | ||
2185 | jme_interrupt_mode(jme); | ||
2186 | } | ||
2187 | |||
2188 | return 0; | ||
2189 | } | ||
2190 | |||
2191 | static void | ||
2192 | jme_get_pauseparam(struct net_device *netdev, | ||
2193 | struct ethtool_pauseparam *ecmd) | ||
2194 | { | ||
2195 | struct jme_adapter *jme = netdev_priv(netdev); | ||
2196 | u32 val; | ||
2197 | |||
2198 | ecmd->tx_pause = (jme->reg_txpfc & TXPFC_PF_EN) != 0; | ||
2199 | ecmd->rx_pause = (jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0; | ||
2200 | |||
2201 | spin_lock_bh(&jme->phy_lock); | ||
2202 | val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE); | ||
2203 | spin_unlock_bh(&jme->phy_lock); | ||
2204 | |||
2205 | ecmd->autoneg = | ||
2206 | (val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0; | ||
2207 | } | ||
2208 | |||
2209 | static int | ||
2210 | jme_set_pauseparam(struct net_device *netdev, | ||
2211 | struct ethtool_pauseparam *ecmd) | ||
2212 | { | ||
2213 | struct jme_adapter *jme = netdev_priv(netdev); | ||
2214 | u32 val; | ||
2215 | |||
2216 | if (((jme->reg_txpfc & TXPFC_PF_EN) != 0) ^ | ||
2217 | (ecmd->tx_pause != 0)) { | ||
2218 | |||
2219 | if (ecmd->tx_pause) | ||
2220 | jme->reg_txpfc |= TXPFC_PF_EN; | ||
2221 | else | ||
2222 | jme->reg_txpfc &= ~TXPFC_PF_EN; | ||
2223 | |||
2224 | jwrite32(jme, JME_TXPFC, jme->reg_txpfc); | ||
2225 | } | ||
2226 | |||
2227 | spin_lock_bh(&jme->rxmcs_lock); | ||
2228 | if (((jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0) ^ | ||
2229 | (ecmd->rx_pause != 0)) { | ||
2230 | |||
2231 | if (ecmd->rx_pause) | ||
2232 | jme->reg_rxmcs |= RXMCS_FLOWCTRL; | ||
2233 | else | ||
2234 | jme->reg_rxmcs &= ~RXMCS_FLOWCTRL; | ||
2235 | |||
2236 | jwrite32(jme, JME_RXMCS, jme->reg_rxmcs); | ||
2237 | } | ||
2238 | spin_unlock_bh(&jme->rxmcs_lock); | ||
2239 | |||
2240 | spin_lock_bh(&jme->phy_lock); | ||
2241 | val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE); | ||
2242 | if (((val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0) ^ | ||
2243 | (ecmd->autoneg != 0)) { | ||
2244 | |||
2245 | if (ecmd->autoneg) | ||
2246 | val |= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); | ||
2247 | else | ||
2248 | val &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); | ||
2249 | |||
2250 | jme_mdio_write(jme->dev, jme->mii_if.phy_id, | ||
2251 | MII_ADVERTISE, val); | ||
2252 | } | ||
2253 | spin_unlock_bh(&jme->phy_lock); | ||
2254 | |||
2255 | return 0; | ||
2256 | } | ||
2257 | |||
2258 | static void | ||
2259 | jme_get_wol(struct net_device *netdev, | ||
2260 | struct ethtool_wolinfo *wol) | ||
2261 | { | ||
2262 | struct jme_adapter *jme = netdev_priv(netdev); | ||
2263 | |||
2264 | wol->supported = WAKE_MAGIC | WAKE_PHY; | ||
2265 | |||
2266 | wol->wolopts = 0; | ||
2267 | |||
2268 | if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN)) | ||
2269 | wol->wolopts |= WAKE_PHY; | ||
2270 | |||
2271 | if (jme->reg_pmcs & PMCS_MFEN) | ||
2272 | wol->wolopts |= WAKE_MAGIC; | ||
2273 | |||
2274 | } | ||
2275 | |||
2276 | static int | ||
2277 | jme_set_wol(struct net_device *netdev, | ||
2278 | struct ethtool_wolinfo *wol) | ||
2279 | { | ||
2280 | struct jme_adapter *jme = netdev_priv(netdev); | ||
2281 | |||
2282 | if (wol->wolopts & (WAKE_MAGICSECURE | | ||
2283 | WAKE_UCAST | | ||
2284 | WAKE_MCAST | | ||
2285 | WAKE_BCAST | | ||
2286 | WAKE_ARP)) | ||
2287 | return -EOPNOTSUPP; | ||
2288 | |||
2289 | jme->reg_pmcs = 0; | ||
2290 | |||
2291 | if (wol->wolopts & WAKE_PHY) | ||
2292 | jme->reg_pmcs |= PMCS_LFEN | PMCS_LREN; | ||
2293 | |||
2294 | if (wol->wolopts & WAKE_MAGIC) | ||
2295 | jme->reg_pmcs |= PMCS_MFEN; | ||
2296 | |||
2297 | jwrite32(jme, JME_PMCS, jme->reg_pmcs); | ||
2298 | |||
2299 | return 0; | ||
2300 | } | ||
2301 | |||
2302 | static int | ||
2303 | jme_get_settings(struct net_device *netdev, | ||
2304 | struct ethtool_cmd *ecmd) | ||
2305 | { | ||
2306 | struct jme_adapter *jme = netdev_priv(netdev); | ||
2307 | int rc; | ||
2308 | |||
2309 | spin_lock_bh(&jme->phy_lock); | ||
2310 | rc = mii_ethtool_gset(&(jme->mii_if), ecmd); | ||
2311 | spin_unlock_bh(&jme->phy_lock); | ||
2312 | return rc; | ||
2313 | } | ||
2314 | |||
2315 | static int | ||
2316 | jme_set_settings(struct net_device *netdev, | ||
2317 | struct ethtool_cmd *ecmd) | ||
2318 | { | ||
2319 | struct jme_adapter *jme = netdev_priv(netdev); | ||
2320 | int rc, fdc = 0; | ||
2321 | |||
2322 | if (ecmd->speed == SPEED_1000 && ecmd->autoneg != AUTONEG_ENABLE) | ||
2323 | return -EINVAL; | ||
2324 | |||
2325 | if (jme->mii_if.force_media && | ||
2326 | ecmd->autoneg != AUTONEG_ENABLE && | ||
2327 | (jme->mii_if.full_duplex != ecmd->duplex)) | ||
2328 | fdc = 1; | ||
2329 | |||
2330 | spin_lock_bh(&jme->phy_lock); | ||
2331 | rc = mii_ethtool_sset(&(jme->mii_if), ecmd); | ||
2332 | spin_unlock_bh(&jme->phy_lock); | ||
2333 | |||
2334 | if (!rc && fdc) | ||
2335 | jme_reset_link(jme); | ||
2336 | |||
2337 | if (!rc) { | ||
2338 | set_bit(JME_FLAG_SSET, &jme->flags); | ||
2339 | jme->old_ecmd = *ecmd; | ||
2340 | } | ||
2341 | |||
2342 | return rc; | ||
2343 | } | ||
2344 | |||
2345 | static u32 | ||
2346 | jme_get_link(struct net_device *netdev) | ||
2347 | { | ||
2348 | struct jme_adapter *jme = netdev_priv(netdev); | ||
2349 | return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP; | ||
2350 | } | ||
2351 | |||
2352 | static u32 | ||
2353 | jme_get_msglevel(struct net_device *netdev) | ||
2354 | { | ||
2355 | struct jme_adapter *jme = netdev_priv(netdev); | ||
2356 | return jme->msg_enable; | ||
2357 | } | ||
2358 | |||
2359 | static void | ||
2360 | jme_set_msglevel(struct net_device *netdev, u32 value) | ||
2361 | { | ||
2362 | struct jme_adapter *jme = netdev_priv(netdev); | ||
2363 | jme->msg_enable = value; | ||
2364 | } | ||
2365 | |||
2366 | static u32 | ||
2367 | jme_get_rx_csum(struct net_device *netdev) | ||
2368 | { | ||
2369 | struct jme_adapter *jme = netdev_priv(netdev); | ||
2370 | return jme->reg_rxmcs & RXMCS_CHECKSUM; | ||
2371 | } | ||
2372 | |||
2373 | static int | ||
2374 | jme_set_rx_csum(struct net_device *netdev, u32 on) | ||
2375 | { | ||
2376 | struct jme_adapter *jme = netdev_priv(netdev); | ||
2377 | |||
2378 | spin_lock_bh(&jme->rxmcs_lock); | ||
2379 | if (on) | ||
2380 | jme->reg_rxmcs |= RXMCS_CHECKSUM; | ||
2381 | else | ||
2382 | jme->reg_rxmcs &= ~RXMCS_CHECKSUM; | ||
2383 | jwrite32(jme, JME_RXMCS, jme->reg_rxmcs); | ||
2384 | spin_unlock_bh(&jme->rxmcs_lock); | ||
2385 | |||
2386 | return 0; | ||
2387 | } | ||
2388 | |||
2389 | static int | ||
2390 | jme_set_tx_csum(struct net_device *netdev, u32 on) | ||
2391 | { | ||
2392 | struct jme_adapter *jme = netdev_priv(netdev); | ||
2393 | |||
2394 | if (on) { | ||
2395 | set_bit(JME_FLAG_TXCSUM, &jme->flags); | ||
2396 | if (netdev->mtu <= 1900) | ||
2397 | netdev->features |= NETIF_F_HW_CSUM; | ||
2398 | } else { | ||
2399 | clear_bit(JME_FLAG_TXCSUM, &jme->flags); | ||
2400 | netdev->features &= ~NETIF_F_HW_CSUM; | ||
2401 | } | ||
2402 | |||
2403 | return 0; | ||
2404 | } | ||
2405 | |||
2406 | static int | ||
2407 | jme_set_tso(struct net_device *netdev, u32 on) | ||
2408 | { | ||
2409 | struct jme_adapter *jme = netdev_priv(netdev); | ||
2410 | |||
2411 | if (on) { | ||
2412 | set_bit(JME_FLAG_TSO, &jme->flags); | ||
2413 | if (netdev->mtu <= 1900) | ||
2414 | netdev->features |= NETIF_F_TSO | NETIF_F_TSO6; | ||
2415 | } else { | ||
2416 | clear_bit(JME_FLAG_TSO, &jme->flags); | ||
2417 | netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); | ||
2418 | } | ||
2419 | |||
2420 | return 0; | ||
2421 | } | ||
2422 | |||
2423 | static int | ||
2424 | jme_nway_reset(struct net_device *netdev) | ||
2425 | { | ||
2426 | struct jme_adapter *jme = netdev_priv(netdev); | ||
2427 | jme_restart_an(jme); | ||
2428 | return 0; | ||
2429 | } | ||
2430 | |||
2431 | static u8 | ||
2432 | jme_smb_read(struct jme_adapter *jme, unsigned int addr) | ||
2433 | { | ||
2434 | u32 val; | ||
2435 | int to; | ||
2436 | |||
2437 | val = jread32(jme, JME_SMBCSR); | ||
2438 | to = JME_SMB_BUSY_TIMEOUT; | ||
2439 | while ((val & SMBCSR_BUSY) && --to) { | ||
2440 | msleep(1); | ||
2441 | val = jread32(jme, JME_SMBCSR); | ||
2442 | } | ||
2443 | if (!to) { | ||
2444 | msg_hw(jme, "SMB Bus Busy.\n"); | ||
2445 | return 0xFF; | ||
2446 | } | ||
2447 | |||
2448 | jwrite32(jme, JME_SMBINTF, | ||
2449 | ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) | | ||
2450 | SMBINTF_HWRWN_READ | | ||
2451 | SMBINTF_HWCMD); | ||
2452 | |||
2453 | val = jread32(jme, JME_SMBINTF); | ||
2454 | to = JME_SMB_BUSY_TIMEOUT; | ||
2455 | while ((val & SMBINTF_HWCMD) && --to) { | ||
2456 | msleep(1); | ||
2457 | val = jread32(jme, JME_SMBINTF); | ||
2458 | } | ||
2459 | if (!to) { | ||
2460 | msg_hw(jme, "SMB Bus Busy.\n"); | ||
2461 | return 0xFF; | ||
2462 | } | ||
2463 | |||
2464 | return (val & SMBINTF_HWDATR) >> SMBINTF_HWDATR_SHIFT; | ||
2465 | } | ||
2466 | |||
2467 | static void | ||
2468 | jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data) | ||
2469 | { | ||
2470 | u32 val; | ||
2471 | int to; | ||
2472 | |||
2473 | val = jread32(jme, JME_SMBCSR); | ||
2474 | to = JME_SMB_BUSY_TIMEOUT; | ||
2475 | while ((val & SMBCSR_BUSY) && --to) { | ||
2476 | msleep(1); | ||
2477 | val = jread32(jme, JME_SMBCSR); | ||
2478 | } | ||
2479 | if (!to) { | ||
2480 | msg_hw(jme, "SMB Bus Busy.\n"); | ||
2481 | return; | ||
2482 | } | ||
2483 | |||
2484 | jwrite32(jme, JME_SMBINTF, | ||
2485 | ((data << SMBINTF_HWDATW_SHIFT) & SMBINTF_HWDATW) | | ||
2486 | ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) | | ||
2487 | SMBINTF_HWRWN_WRITE | | ||
2488 | SMBINTF_HWCMD); | ||
2489 | |||
2490 | val = jread32(jme, JME_SMBINTF); | ||
2491 | to = JME_SMB_BUSY_TIMEOUT; | ||
2492 | while ((val & SMBINTF_HWCMD) && --to) { | ||
2493 | msleep(1); | ||
2494 | val = jread32(jme, JME_SMBINTF); | ||
2495 | } | ||
2496 | if (!to) { | ||
2497 | msg_hw(jme, "SMB Bus Busy.\n"); | ||
2498 | return; | ||
2499 | } | ||
2500 | |||
2501 | mdelay(2); | ||
2502 | } | ||
2503 | |||
2504 | static int | ||
2505 | jme_get_eeprom_len(struct net_device *netdev) | ||
2506 | { | ||
2507 | struct jme_adapter *jme = netdev_priv(netdev); | ||
2508 | u32 val; | ||
2509 | val = jread32(jme, JME_SMBCSR); | ||
2510 | return (val & SMBCSR_EEPROMD) ? JME_SMB_LEN : 0; | ||
2511 | } | ||
2512 | |||
2513 | static int | ||
2514 | jme_get_eeprom(struct net_device *netdev, | ||
2515 | struct ethtool_eeprom *eeprom, u8 *data) | ||
2516 | { | ||
2517 | struct jme_adapter *jme = netdev_priv(netdev); | ||
2518 | int i, offset = eeprom->offset, len = eeprom->len; | ||
2519 | |||
2520 | /* | ||
2521 | * ethtool will check the boundary for us | ||
2522 | */ | ||
2523 | eeprom->magic = JME_EEPROM_MAGIC; | ||
2524 | for (i = 0 ; i < len ; ++i) | ||
2525 | data[i] = jme_smb_read(jme, i + offset); | ||
2526 | |||
2527 | return 0; | ||
2528 | } | ||
2529 | |||
2530 | static int | ||
2531 | jme_set_eeprom(struct net_device *netdev, | ||
2532 | struct ethtool_eeprom *eeprom, u8 *data) | ||
2533 | { | ||
2534 | struct jme_adapter *jme = netdev_priv(netdev); | ||
2535 | int i, offset = eeprom->offset, len = eeprom->len; | ||
2536 | |||
2537 | if (eeprom->magic != JME_EEPROM_MAGIC) | ||
2538 | return -EINVAL; | ||
2539 | |||
2540 | /* | ||
2541 | * ethtool will check the boundary for us | ||
2542 | */ | ||
2543 | for (i = 0 ; i < len ; ++i) | ||
2544 | jme_smb_write(jme, i + offset, data[i]); | ||
2545 | |||
2546 | return 0; | ||
2547 | } | ||
2548 | |||
2549 | static const struct ethtool_ops jme_ethtool_ops = { | ||
2550 | .get_drvinfo = jme_get_drvinfo, | ||
2551 | .get_regs_len = jme_get_regs_len, | ||
2552 | .get_regs = jme_get_regs, | ||
2553 | .get_coalesce = jme_get_coalesce, | ||
2554 | .set_coalesce = jme_set_coalesce, | ||
2555 | .get_pauseparam = jme_get_pauseparam, | ||
2556 | .set_pauseparam = jme_set_pauseparam, | ||
2557 | .get_wol = jme_get_wol, | ||
2558 | .set_wol = jme_set_wol, | ||
2559 | .get_settings = jme_get_settings, | ||
2560 | .set_settings = jme_set_settings, | ||
2561 | .get_link = jme_get_link, | ||
2562 | .get_msglevel = jme_get_msglevel, | ||
2563 | .set_msglevel = jme_set_msglevel, | ||
2564 | .get_rx_csum = jme_get_rx_csum, | ||
2565 | .set_rx_csum = jme_set_rx_csum, | ||
2566 | .set_tx_csum = jme_set_tx_csum, | ||
2567 | .set_tso = jme_set_tso, | ||
2568 | .set_sg = ethtool_op_set_sg, | ||
2569 | .nway_reset = jme_nway_reset, | ||
2570 | .get_eeprom_len = jme_get_eeprom_len, | ||
2571 | .get_eeprom = jme_get_eeprom, | ||
2572 | .set_eeprom = jme_set_eeprom, | ||
2573 | }; | ||
2574 | |||
2575 | static int | ||
2576 | jme_pci_dma64(struct pci_dev *pdev) | ||
2577 | { | ||
2578 | if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) | ||
2579 | if (!pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) | ||
2580 | return 1; | ||
2581 | |||
2582 | if (!pci_set_dma_mask(pdev, DMA_40BIT_MASK)) | ||
2583 | if (!pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK)) | ||
2584 | return 1; | ||
2585 | |||
2586 | if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) | ||
2587 | if (!pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) | ||
2588 | return 0; | ||
2589 | |||
2590 | return -1; | ||
2591 | } | ||
2592 | |||
2593 | static inline void | ||
2594 | jme_phy_init(struct jme_adapter *jme) | ||
2595 | { | ||
2596 | u16 reg26; | ||
2597 | |||
2598 | reg26 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 26); | ||
2599 | jme_mdio_write(jme->dev, jme->mii_if.phy_id, 26, reg26 | 0x1000); | ||
2600 | } | ||
2601 | |||
2602 | static inline void | ||
2603 | jme_check_hw_ver(struct jme_adapter *jme) | ||
2604 | { | ||
2605 | u32 chipmode; | ||
2606 | |||
2607 | chipmode = jread32(jme, JME_CHIPMODE); | ||
2608 | |||
2609 | jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT; | ||
2610 | jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT; | ||
2611 | } | ||
2612 | |||
2613 | static int __devinit | ||
2614 | jme_init_one(struct pci_dev *pdev, | ||
2615 | const struct pci_device_id *ent) | ||
2616 | { | ||
2617 | int rc = 0, using_dac, i; | ||
2618 | struct net_device *netdev; | ||
2619 | struct jme_adapter *jme; | ||
2620 | u16 bmcr, bmsr; | ||
2621 | u32 apmc; | ||
2622 | |||
2623 | /* | ||
2624 | * set up PCI device basics | ||
2625 | */ | ||
2626 | rc = pci_enable_device(pdev); | ||
2627 | if (rc) { | ||
2628 | jeprintk(pdev, "Cannot enable PCI device.\n"); | ||
2629 | goto err_out; | ||
2630 | } | ||
2631 | |||
2632 | using_dac = jme_pci_dma64(pdev); | ||
2633 | if (using_dac < 0) { | ||
2634 | jeprintk(pdev, "Cannot set PCI DMA Mask.\n"); | ||
2635 | rc = -EIO; | ||
2636 | goto err_out_disable_pdev; | ||
2637 | } | ||
2638 | |||
2639 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { | ||
2640 | jeprintk(pdev, "No PCI resource region found.\n"); | ||
2641 | rc = -ENOMEM; | ||
2642 | goto err_out_disable_pdev; | ||
2643 | } | ||
2644 | |||
2645 | rc = pci_request_regions(pdev, DRV_NAME); | ||
2646 | if (rc) { | ||
2647 | jeprintk(pdev, "Cannot obtain PCI resource region.\n"); | ||
2648 | goto err_out_disable_pdev; | ||
2649 | } | ||
2650 | |||
2651 | pci_set_master(pdev); | ||
2652 | |||
2653 | /* | ||
2654 | * alloc and init net device | ||
2655 | */ | ||
2656 | netdev = alloc_etherdev(sizeof(*jme)); | ||
2657 | if (!netdev) { | ||
2658 | jeprintk(pdev, "Cannot allocate netdev structure.\n"); | ||
2659 | rc = -ENOMEM; | ||
2660 | goto err_out_release_regions; | ||
2661 | } | ||
2662 | netdev->open = jme_open; | ||
2663 | netdev->stop = jme_close; | ||
2664 | netdev->hard_start_xmit = jme_start_xmit; | ||
2665 | netdev->set_mac_address = jme_set_macaddr; | ||
2666 | netdev->set_multicast_list = jme_set_multi; | ||
2667 | netdev->change_mtu = jme_change_mtu; | ||
2668 | netdev->ethtool_ops = &jme_ethtool_ops; | ||
2669 | netdev->tx_timeout = jme_tx_timeout; | ||
2670 | netdev->watchdog_timeo = TX_TIMEOUT; | ||
2671 | netdev->vlan_rx_register = jme_vlan_rx_register; | ||
2672 | NETDEV_GET_STATS(netdev, &jme_get_stats); | ||
2673 | netdev->features = NETIF_F_HW_CSUM | | ||
2674 | NETIF_F_SG | | ||
2675 | NETIF_F_TSO | | ||
2676 | NETIF_F_TSO6 | | ||
2677 | NETIF_F_HW_VLAN_TX | | ||
2678 | NETIF_F_HW_VLAN_RX; | ||
2679 | if (using_dac) | ||
2680 | netdev->features |= NETIF_F_HIGHDMA; | ||
2681 | |||
2682 | SET_NETDEV_DEV(netdev, &pdev->dev); | ||
2683 | pci_set_drvdata(pdev, netdev); | ||
2684 | |||
2685 | /* | ||
2686 | * init adapter info | ||
2687 | */ | ||
2688 | jme = netdev_priv(netdev); | ||
2689 | jme->pdev = pdev; | ||
2690 | jme->dev = netdev; | ||
2691 | jme->jme_rx = netif_rx; | ||
2692 | jme->jme_vlan_rx = vlan_hwaccel_rx; | ||
2693 | jme->old_mtu = netdev->mtu = 1500; | ||
2694 | jme->phylink = 0; | ||
2695 | jme->tx_ring_size = 1 << 10; | ||
2696 | jme->tx_ring_mask = jme->tx_ring_size - 1; | ||
2697 | jme->tx_wake_threshold = 1 << 9; | ||
2698 | jme->rx_ring_size = 1 << 9; | ||
2699 | jme->rx_ring_mask = jme->rx_ring_size - 1; | ||
2700 | jme->msg_enable = JME_DEF_MSG_ENABLE; | ||
2701 | jme->regs = ioremap(pci_resource_start(pdev, 0), | ||
2702 | pci_resource_len(pdev, 0)); | ||
2703 | if (!(jme->regs)) { | ||
2704 | jeprintk(pdev, "Mapping PCI resource region error.\n"); | ||
2705 | rc = -ENOMEM; | ||
2706 | goto err_out_free_netdev; | ||
2707 | } | ||
2708 | jme->shadow_regs = pci_alloc_consistent(pdev, | ||
2709 | sizeof(u32) * SHADOW_REG_NR, | ||
2710 | &(jme->shadow_dma)); | ||
2711 | if (!(jme->shadow_regs)) { | ||
2712 | jeprintk(pdev, "Allocating shadow register mapping error.\n"); | ||
2713 | rc = -ENOMEM; | ||
2714 | goto err_out_unmap; | ||
2715 | } | ||
2716 | |||
2717 | if (no_pseudohp) { | ||
2718 | apmc = jread32(jme, JME_APMC) & ~JME_APMC_PSEUDO_HP_EN; | ||
2719 | jwrite32(jme, JME_APMC, apmc); | ||
2720 | } else if (force_pseudohp) { | ||
2721 | apmc = jread32(jme, JME_APMC) | JME_APMC_PSEUDO_HP_EN; | ||
2722 | jwrite32(jme, JME_APMC, apmc); | ||
2723 | } | ||
2724 | |||
2725 | NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, jme->rx_ring_size >> 2) | ||
2726 | |||
2727 | spin_lock_init(&jme->phy_lock); | ||
2728 | spin_lock_init(&jme->macaddr_lock); | ||
2729 | spin_lock_init(&jme->rxmcs_lock); | ||
2730 | |||
2731 | atomic_set(&jme->link_changing, 1); | ||
2732 | atomic_set(&jme->rx_cleaning, 1); | ||
2733 | atomic_set(&jme->tx_cleaning, 1); | ||
2734 | atomic_set(&jme->rx_empty, 1); | ||
2735 | |||
2736 | tasklet_init(&jme->pcc_task, | ||
2737 | &jme_pcc_tasklet, | ||
2738 | (unsigned long) jme); | ||
2739 | tasklet_init(&jme->linkch_task, | ||
2740 | &jme_link_change_tasklet, | ||
2741 | (unsigned long) jme); | ||
2742 | tasklet_init(&jme->txclean_task, | ||
2743 | &jme_tx_clean_tasklet, | ||
2744 | (unsigned long) jme); | ||
2745 | tasklet_init(&jme->rxclean_task, | ||
2746 | &jme_rx_clean_tasklet, | ||
2747 | (unsigned long) jme); | ||
2748 | tasklet_init(&jme->rxempty_task, | ||
2749 | &jme_rx_empty_tasklet, | ||
2750 | (unsigned long) jme); | ||
2751 | tasklet_disable_nosync(&jme->txclean_task); | ||
2752 | tasklet_disable_nosync(&jme->rxclean_task); | ||
2753 | tasklet_disable_nosync(&jme->rxempty_task); | ||
2754 | jme->dpi.cur = PCC_P1; | ||
2755 | |||
2756 | jme->reg_ghc = 0; | ||
2757 | jme->reg_rxcs = RXCS_DEFAULT; | ||
2758 | jme->reg_rxmcs = RXMCS_DEFAULT; | ||
2759 | jme->reg_txpfc = 0; | ||
2760 | jme->reg_pmcs = PMCS_MFEN; | ||
2761 | set_bit(JME_FLAG_TXCSUM, &jme->flags); | ||
2762 | set_bit(JME_FLAG_TSO, &jme->flags); | ||
2763 | |||
2764 | /* | ||
2765 | * Get Max Read Req Size from PCI Config Space | ||
2766 | */ | ||
2767 | pci_read_config_byte(pdev, PCI_DCSR_MRRS, &jme->mrrs); | ||
2768 | jme->mrrs &= PCI_DCSR_MRRS_MASK; | ||
2769 | switch (jme->mrrs) { | ||
2770 | case MRRS_128B: | ||
2771 | jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B; | ||
2772 | break; | ||
2773 | case MRRS_256B: | ||
2774 | jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B; | ||
2775 | break; | ||
2776 | default: | ||
2777 | jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B; | ||
2778 | break; | ||
2779 | }; | ||
2780 | |||
2781 | /* | ||
2782 | * Must check before reset_mac_processor | ||
2783 | */ | ||
2784 | jme_check_hw_ver(jme); | ||
2785 | jme->mii_if.dev = netdev; | ||
2786 | if (jme->fpgaver) { | ||
2787 | jme->mii_if.phy_id = 0; | ||
2788 | for (i = 1 ; i < 32 ; ++i) { | ||
2789 | bmcr = jme_mdio_read(netdev, i, MII_BMCR); | ||
2790 | bmsr = jme_mdio_read(netdev, i, MII_BMSR); | ||
2791 | if (bmcr != 0xFFFFU && (bmcr != 0 || bmsr != 0)) { | ||
2792 | jme->mii_if.phy_id = i; | ||
2793 | break; | ||
2794 | } | ||
2795 | } | ||
2796 | |||
2797 | if (!jme->mii_if.phy_id) { | ||
2798 | rc = -EIO; | ||
2799 | jeprintk(pdev, "Can not find phy_id.\n"); | ||
2800 | goto err_out_free_shadow; | ||
2801 | } | ||
2802 | |||
2803 | jme->reg_ghc |= GHC_LINK_POLL; | ||
2804 | } else { | ||
2805 | jme->mii_if.phy_id = 1; | ||
2806 | } | ||
2807 | if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) | ||
2808 | jme->mii_if.supports_gmii = true; | ||
2809 | else | ||
2810 | jme->mii_if.supports_gmii = false; | ||
2811 | jme->mii_if.mdio_read = jme_mdio_read; | ||
2812 | jme->mii_if.mdio_write = jme_mdio_write; | ||
2813 | |||
2814 | jme_clear_pm(jme); | ||
2815 | jme_set_phyfifoa(jme); | ||
2816 | pci_read_config_byte(pdev, PCI_REVISION_ID, &jme->rev); | ||
2817 | if (!jme->fpgaver) | ||
2818 | jme_phy_init(jme); | ||
2819 | jme_phy_off(jme); | ||
2820 | |||
2821 | /* | ||
2822 | * Reset MAC processor and reload EEPROM for MAC Address | ||
2823 | */ | ||
2824 | jme_reset_mac_processor(jme); | ||
2825 | rc = jme_reload_eeprom(jme); | ||
2826 | if (rc) { | ||
2827 | jeprintk(pdev, | ||
2828 | "Reload eeprom for reading MAC Address error.\n"); | ||
2829 | goto err_out_free_shadow; | ||
2830 | } | ||
2831 | jme_load_macaddr(netdev); | ||
2832 | |||
2833 | /* | ||
2834 | * Tell stack that we are not ready to work until open() | ||
2835 | */ | ||
2836 | netif_carrier_off(netdev); | ||
2837 | netif_stop_queue(netdev); | ||
2838 | |||
2839 | /* | ||
2840 | * Register netdev | ||
2841 | */ | ||
2842 | rc = register_netdev(netdev); | ||
2843 | if (rc) { | ||
2844 | jeprintk(pdev, "Cannot register net device.\n"); | ||
2845 | goto err_out_free_shadow; | ||
2846 | } | ||
2847 | |||
2848 | msg_probe(jme, | ||
2849 | "JMC250 gigabit%s ver:%x rev:%x " | ||
2850 | "macaddr:%02x:%02x:%02x:%02x:%02x:%02x\n", | ||
2851 | (jme->fpgaver != 0) ? " (FPGA)" : "", | ||
2852 | (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev, | ||
2853 | jme->rev, | ||
2854 | netdev->dev_addr[0], | ||
2855 | netdev->dev_addr[1], | ||
2856 | netdev->dev_addr[2], | ||
2857 | netdev->dev_addr[3], | ||
2858 | netdev->dev_addr[4], | ||
2859 | netdev->dev_addr[5]); | ||
2860 | |||
2861 | return 0; | ||
2862 | |||
2863 | err_out_free_shadow: | ||
2864 | pci_free_consistent(pdev, | ||
2865 | sizeof(u32) * SHADOW_REG_NR, | ||
2866 | jme->shadow_regs, | ||
2867 | jme->shadow_dma); | ||
2868 | err_out_unmap: | ||
2869 | iounmap(jme->regs); | ||
2870 | err_out_free_netdev: | ||
2871 | pci_set_drvdata(pdev, NULL); | ||
2872 | free_netdev(netdev); | ||
2873 | err_out_release_regions: | ||
2874 | pci_release_regions(pdev); | ||
2875 | err_out_disable_pdev: | ||
2876 | pci_disable_device(pdev); | ||
2877 | err_out: | ||
2878 | return rc; | ||
2879 | } | ||
2880 | |||
2881 | static void __devexit | ||
2882 | jme_remove_one(struct pci_dev *pdev) | ||
2883 | { | ||
2884 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
2885 | struct jme_adapter *jme = netdev_priv(netdev); | ||
2886 | |||
2887 | unregister_netdev(netdev); | ||
2888 | pci_free_consistent(pdev, | ||
2889 | sizeof(u32) * SHADOW_REG_NR, | ||
2890 | jme->shadow_regs, | ||
2891 | jme->shadow_dma); | ||
2892 | iounmap(jme->regs); | ||
2893 | pci_set_drvdata(pdev, NULL); | ||
2894 | free_netdev(netdev); | ||
2895 | pci_release_regions(pdev); | ||
2896 | pci_disable_device(pdev); | ||
2897 | |||
2898 | } | ||
2899 | |||
2900 | static int | ||
2901 | jme_suspend(struct pci_dev *pdev, pm_message_t state) | ||
2902 | { | ||
2903 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
2904 | struct jme_adapter *jme = netdev_priv(netdev); | ||
2905 | |||
2906 | atomic_dec(&jme->link_changing); | ||
2907 | |||
2908 | netif_device_detach(netdev); | ||
2909 | netif_stop_queue(netdev); | ||
2910 | jme_stop_irq(jme); | ||
2911 | |||
2912 | tasklet_disable(&jme->txclean_task); | ||
2913 | tasklet_disable(&jme->rxclean_task); | ||
2914 | tasklet_disable(&jme->rxempty_task); | ||
2915 | |||
2916 | jme_disable_shadow(jme); | ||
2917 | |||
2918 | if (netif_carrier_ok(netdev)) { | ||
2919 | if (test_bit(JME_FLAG_POLL, &jme->flags)) | ||
2920 | jme_polling_mode(jme); | ||
2921 | |||
2922 | jme_stop_pcc_timer(jme); | ||
2923 | jme_reset_ghc_speed(jme); | ||
2924 | jme_disable_rx_engine(jme); | ||
2925 | jme_disable_tx_engine(jme); | ||
2926 | jme_reset_mac_processor(jme); | ||
2927 | jme_free_rx_resources(jme); | ||
2928 | jme_free_tx_resources(jme); | ||
2929 | netif_carrier_off(netdev); | ||
2930 | jme->phylink = 0; | ||
2931 | } | ||
2932 | |||
2933 | tasklet_enable(&jme->txclean_task); | ||
2934 | tasklet_hi_enable(&jme->rxclean_task); | ||
2935 | tasklet_hi_enable(&jme->rxempty_task); | ||
2936 | |||
2937 | pci_save_state(pdev); | ||
2938 | if (jme->reg_pmcs) { | ||
2939 | jme_set_100m_half(jme); | ||
2940 | |||
2941 | if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN)) | ||
2942 | jme_wait_link(jme); | ||
2943 | |||
2944 | jwrite32(jme, JME_PMCS, jme->reg_pmcs); | ||
2945 | |||
2946 | pci_enable_wake(pdev, PCI_D3cold, true); | ||
2947 | } else { | ||
2948 | jme_phy_off(jme); | ||
2949 | } | ||
2950 | pci_set_power_state(pdev, PCI_D3cold); | ||
2951 | |||
2952 | return 0; | ||
2953 | } | ||
2954 | |||
2955 | static int | ||
2956 | jme_resume(struct pci_dev *pdev) | ||
2957 | { | ||
2958 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
2959 | struct jme_adapter *jme = netdev_priv(netdev); | ||
2960 | |||
2961 | jme_clear_pm(jme); | ||
2962 | pci_restore_state(pdev); | ||
2963 | |||
2964 | if (test_bit(JME_FLAG_SSET, &jme->flags)) | ||
2965 | jme_set_settings(netdev, &jme->old_ecmd); | ||
2966 | else | ||
2967 | jme_reset_phy_processor(jme); | ||
2968 | |||
2969 | jme_enable_shadow(jme); | ||
2970 | jme_start_irq(jme); | ||
2971 | netif_device_attach(netdev); | ||
2972 | |||
2973 | atomic_inc(&jme->link_changing); | ||
2974 | |||
2975 | jme_reset_link(jme); | ||
2976 | |||
2977 | return 0; | ||
2978 | } | ||
2979 | |||
2980 | static struct pci_device_id jme_pci_tbl[] = { | ||
2981 | { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC250) }, | ||
2982 | { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC260) }, | ||
2983 | { } | ||
2984 | }; | ||
2985 | |||
2986 | static struct pci_driver jme_driver = { | ||
2987 | .name = DRV_NAME, | ||
2988 | .id_table = jme_pci_tbl, | ||
2989 | .probe = jme_init_one, | ||
2990 | .remove = __devexit_p(jme_remove_one), | ||
2991 | #ifdef CONFIG_PM | ||
2992 | .suspend = jme_suspend, | ||
2993 | .resume = jme_resume, | ||
2994 | #endif /* CONFIG_PM */ | ||
2995 | }; | ||
2996 | |||
2997 | static int __init | ||
2998 | jme_init_module(void) | ||
2999 | { | ||
3000 | printk(KERN_INFO PFX "JMicron JMC250 gigabit ethernet " | ||
3001 | "driver version %s\n", DRV_VERSION); | ||
3002 | return pci_register_driver(&jme_driver); | ||
3003 | } | ||
3004 | |||
3005 | static void __exit | ||
3006 | jme_cleanup_module(void) | ||
3007 | { | ||
3008 | pci_unregister_driver(&jme_driver); | ||
3009 | } | ||
3010 | |||
3011 | module_init(jme_init_module); | ||
3012 | module_exit(jme_cleanup_module); | ||
3013 | |||
3014 | MODULE_AUTHOR("Guo-Fu Tseng <cooldavid@cooldavid.org>"); | ||
3015 | MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver"); | ||
3016 | MODULE_LICENSE("GPL"); | ||
3017 | MODULE_VERSION(DRV_VERSION); | ||
3018 | MODULE_DEVICE_TABLE(pci, jme_pci_tbl); | ||
3019 | |||
diff --git a/drivers/net/jme.h b/drivers/net/jme.h new file mode 100644 index 000000000000..b29688431a6d --- /dev/null +++ b/drivers/net/jme.h | |||
@@ -0,0 +1,1199 @@ | |||
1 | /* | ||
2 | * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver | ||
3 | * | ||
4 | * Copyright 2008 JMicron Technology Corporation | ||
5 | * http://www.jmicron.com/ | ||
6 | * | ||
7 | * Author: Guo-Fu Tseng <cooldavid@cooldavid.org> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation; either version 2 of the License. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #ifndef __JME_H_INCLUDED__ | ||
25 | #define __JME_H_INCLUDEE__ | ||
26 | |||
27 | #define DRV_NAME "jme" | ||
28 | #define DRV_VERSION "1.0.2" | ||
29 | #define PFX DRV_NAME ": " | ||
30 | |||
31 | #define PCI_DEVICE_ID_JMICRON_JMC250 0x0250 | ||
32 | #define PCI_DEVICE_ID_JMICRON_JMC260 0x0260 | ||
33 | |||
34 | /* | ||
35 | * Message related definitions | ||
36 | */ | ||
37 | #define JME_DEF_MSG_ENABLE \ | ||
38 | (NETIF_MSG_PROBE | \ | ||
39 | NETIF_MSG_LINK | \ | ||
40 | NETIF_MSG_RX_ERR | \ | ||
41 | NETIF_MSG_TX_ERR | \ | ||
42 | NETIF_MSG_HW) | ||
43 | |||
44 | #define jeprintk(pdev, fmt, args...) \ | ||
45 | printk(KERN_ERR PFX fmt, ## args) | ||
46 | |||
47 | #ifdef TX_DEBUG | ||
48 | #define tx_dbg(priv, fmt, args...) \ | ||
49 | printk(KERN_DEBUG "%s: " fmt, (priv)->dev->name, ## args) | ||
50 | #else | ||
51 | #define tx_dbg(priv, fmt, args...) | ||
52 | #endif | ||
53 | |||
54 | #define jme_msg(msglvl, type, priv, fmt, args...) \ | ||
55 | if (netif_msg_##type(priv)) \ | ||
56 | printk(msglvl "%s: " fmt, (priv)->dev->name, ## args) | ||
57 | |||
58 | #define msg_probe(priv, fmt, args...) \ | ||
59 | jme_msg(KERN_INFO, probe, priv, fmt, ## args) | ||
60 | |||
61 | #define msg_link(priv, fmt, args...) \ | ||
62 | jme_msg(KERN_INFO, link, priv, fmt, ## args) | ||
63 | |||
64 | #define msg_intr(priv, fmt, args...) \ | ||
65 | jme_msg(KERN_INFO, intr, priv, fmt, ## args) | ||
66 | |||
67 | #define msg_rx_err(priv, fmt, args...) \ | ||
68 | jme_msg(KERN_ERR, rx_err, priv, fmt, ## args) | ||
69 | |||
70 | #define msg_rx_status(priv, fmt, args...) \ | ||
71 | jme_msg(KERN_INFO, rx_status, priv, fmt, ## args) | ||
72 | |||
73 | #define msg_tx_err(priv, fmt, args...) \ | ||
74 | jme_msg(KERN_ERR, tx_err, priv, fmt, ## args) | ||
75 | |||
76 | #define msg_tx_done(priv, fmt, args...) \ | ||
77 | jme_msg(KERN_INFO, tx_done, priv, fmt, ## args) | ||
78 | |||
79 | #define msg_tx_queued(priv, fmt, args...) \ | ||
80 | jme_msg(KERN_INFO, tx_queued, priv, fmt, ## args) | ||
81 | |||
82 | #define msg_hw(priv, fmt, args...) \ | ||
83 | jme_msg(KERN_ERR, hw, priv, fmt, ## args) | ||
84 | |||
85 | /* | ||
86 | * Extra PCI Configuration space interface | ||
87 | */ | ||
88 | #define PCI_DCSR_MRRS 0x59 | ||
89 | #define PCI_DCSR_MRRS_MASK 0x70 | ||
90 | |||
91 | enum pci_dcsr_mrrs_vals { | ||
92 | MRRS_128B = 0x00, | ||
93 | MRRS_256B = 0x10, | ||
94 | MRRS_512B = 0x20, | ||
95 | MRRS_1024B = 0x30, | ||
96 | MRRS_2048B = 0x40, | ||
97 | MRRS_4096B = 0x50, | ||
98 | }; | ||
99 | |||
100 | #define PCI_SPI 0xB0 | ||
101 | |||
102 | enum pci_spi_bits { | ||
103 | SPI_EN = 0x10, | ||
104 | SPI_MISO = 0x08, | ||
105 | SPI_MOSI = 0x04, | ||
106 | SPI_SCLK = 0x02, | ||
107 | SPI_CS = 0x01, | ||
108 | }; | ||
109 | |||
110 | struct jme_spi_op { | ||
111 | void __user *uwbuf; | ||
112 | void __user *urbuf; | ||
113 | __u8 wn; /* Number of write actions */ | ||
114 | __u8 rn; /* Number of read actions */ | ||
115 | __u8 bitn; /* Number of bits per action */ | ||
116 | __u8 spd; /* The maxim acceptable speed of controller, in MHz.*/ | ||
117 | __u8 mode; /* CPOL, CPHA, and Duplex mode of SPI */ | ||
118 | |||
119 | /* Internal use only */ | ||
120 | u8 *kwbuf; | ||
121 | u8 *krbuf; | ||
122 | u8 sr; | ||
123 | u16 halfclk; /* Half of clock cycle calculated from spd, in ns */ | ||
124 | }; | ||
125 | |||
126 | enum jme_spi_op_bits { | ||
127 | SPI_MODE_CPHA = 0x01, | ||
128 | SPI_MODE_CPOL = 0x02, | ||
129 | SPI_MODE_DUP = 0x80, | ||
130 | }; | ||
131 | |||
132 | #define HALF_US 500 /* 500 ns */ | ||
133 | #define JMESPIIOCTL SIOCDEVPRIVATE | ||
134 | |||
135 | /* | ||
136 | * Dynamic(adaptive)/Static PCC values | ||
137 | */ | ||
138 | enum dynamic_pcc_values { | ||
139 | PCC_OFF = 0, | ||
140 | PCC_P1 = 1, | ||
141 | PCC_P2 = 2, | ||
142 | PCC_P3 = 3, | ||
143 | |||
144 | PCC_OFF_TO = 0, | ||
145 | PCC_P1_TO = 1, | ||
146 | PCC_P2_TO = 64, | ||
147 | PCC_P3_TO = 128, | ||
148 | |||
149 | PCC_OFF_CNT = 0, | ||
150 | PCC_P1_CNT = 1, | ||
151 | PCC_P2_CNT = 16, | ||
152 | PCC_P3_CNT = 32, | ||
153 | }; | ||
154 | struct dynpcc_info { | ||
155 | unsigned long last_bytes; | ||
156 | unsigned long last_pkts; | ||
157 | unsigned long intr_cnt; | ||
158 | unsigned char cur; | ||
159 | unsigned char attempt; | ||
160 | unsigned char cnt; | ||
161 | }; | ||
162 | #define PCC_INTERVAL_US 100000 | ||
163 | #define PCC_INTERVAL (HZ / (1000000 / PCC_INTERVAL_US)) | ||
164 | #define PCC_P3_THRESHOLD (2 * 1024 * 1024) | ||
165 | #define PCC_P2_THRESHOLD 800 | ||
166 | #define PCC_INTR_THRESHOLD 800 | ||
167 | #define PCC_TX_TO 1000 | ||
168 | #define PCC_TX_CNT 8 | ||
169 | |||
170 | /* | ||
171 | * TX/RX Descriptors | ||
172 | * | ||
173 | * TX/RX Ring DESC Count Must be multiple of 16 and <= 1024 | ||
174 | */ | ||
175 | #define RING_DESC_ALIGN 16 /* Descriptor alignment */ | ||
176 | #define TX_DESC_SIZE 16 | ||
177 | #define TX_RING_NR 8 | ||
178 | #define TX_RING_ALLOC_SIZE(s) ((s * TX_DESC_SIZE) + RING_DESC_ALIGN) | ||
179 | |||
180 | struct txdesc { | ||
181 | union { | ||
182 | __u8 all[16]; | ||
183 | __le32 dw[4]; | ||
184 | struct { | ||
185 | /* DW0 */ | ||
186 | __le16 vlan; | ||
187 | __u8 rsv1; | ||
188 | __u8 flags; | ||
189 | |||
190 | /* DW1 */ | ||
191 | __le16 datalen; | ||
192 | __le16 mss; | ||
193 | |||
194 | /* DW2 */ | ||
195 | __le16 pktsize; | ||
196 | __le16 rsv2; | ||
197 | |||
198 | /* DW3 */ | ||
199 | __le32 bufaddr; | ||
200 | } desc1; | ||
201 | struct { | ||
202 | /* DW0 */ | ||
203 | __le16 rsv1; | ||
204 | __u8 rsv2; | ||
205 | __u8 flags; | ||
206 | |||
207 | /* DW1 */ | ||
208 | __le16 datalen; | ||
209 | __le16 rsv3; | ||
210 | |||
211 | /* DW2 */ | ||
212 | __le32 bufaddrh; | ||
213 | |||
214 | /* DW3 */ | ||
215 | __le32 bufaddrl; | ||
216 | } desc2; | ||
217 | struct { | ||
218 | /* DW0 */ | ||
219 | __u8 ehdrsz; | ||
220 | __u8 rsv1; | ||
221 | __u8 rsv2; | ||
222 | __u8 flags; | ||
223 | |||
224 | /* DW1 */ | ||
225 | __le16 trycnt; | ||
226 | __le16 segcnt; | ||
227 | |||
228 | /* DW2 */ | ||
229 | __le16 pktsz; | ||
230 | __le16 rsv3; | ||
231 | |||
232 | /* DW3 */ | ||
233 | __le32 bufaddrl; | ||
234 | } descwb; | ||
235 | }; | ||
236 | }; | ||
237 | |||
238 | enum jme_txdesc_flags_bits { | ||
239 | TXFLAG_OWN = 0x80, | ||
240 | TXFLAG_INT = 0x40, | ||
241 | TXFLAG_64BIT = 0x20, | ||
242 | TXFLAG_TCPCS = 0x10, | ||
243 | TXFLAG_UDPCS = 0x08, | ||
244 | TXFLAG_IPCS = 0x04, | ||
245 | TXFLAG_LSEN = 0x02, | ||
246 | TXFLAG_TAGON = 0x01, | ||
247 | }; | ||
248 | |||
249 | #define TXDESC_MSS_SHIFT 2 | ||
250 | enum jme_rxdescwb_flags_bits { | ||
251 | TXWBFLAG_OWN = 0x80, | ||
252 | TXWBFLAG_INT = 0x40, | ||
253 | TXWBFLAG_TMOUT = 0x20, | ||
254 | TXWBFLAG_TRYOUT = 0x10, | ||
255 | TXWBFLAG_COL = 0x08, | ||
256 | |||
257 | TXWBFLAG_ALLERR = TXWBFLAG_TMOUT | | ||
258 | TXWBFLAG_TRYOUT | | ||
259 | TXWBFLAG_COL, | ||
260 | }; | ||
261 | |||
262 | #define RX_DESC_SIZE 16 | ||
263 | #define RX_RING_NR 4 | ||
264 | #define RX_RING_ALLOC_SIZE(s) ((s * RX_DESC_SIZE) + RING_DESC_ALIGN) | ||
265 | #define RX_BUF_DMA_ALIGN 8 | ||
266 | #define RX_PREPAD_SIZE 10 | ||
267 | #define ETH_CRC_LEN 2 | ||
268 | #define RX_VLANHDR_LEN 2 | ||
269 | #define RX_EXTRA_LEN (RX_PREPAD_SIZE + \ | ||
270 | ETH_HLEN + \ | ||
271 | ETH_CRC_LEN + \ | ||
272 | RX_VLANHDR_LEN + \ | ||
273 | RX_BUF_DMA_ALIGN) | ||
274 | |||
275 | struct rxdesc { | ||
276 | union { | ||
277 | __u8 all[16]; | ||
278 | __le32 dw[4]; | ||
279 | struct { | ||
280 | /* DW0 */ | ||
281 | __le16 rsv2; | ||
282 | __u8 rsv1; | ||
283 | __u8 flags; | ||
284 | |||
285 | /* DW1 */ | ||
286 | __le16 datalen; | ||
287 | __le16 wbcpl; | ||
288 | |||
289 | /* DW2 */ | ||
290 | __le32 bufaddrh; | ||
291 | |||
292 | /* DW3 */ | ||
293 | __le32 bufaddrl; | ||
294 | } desc1; | ||
295 | struct { | ||
296 | /* DW0 */ | ||
297 | __le16 vlan; | ||
298 | __le16 flags; | ||
299 | |||
300 | /* DW1 */ | ||
301 | __le16 framesize; | ||
302 | __u8 errstat; | ||
303 | __u8 desccnt; | ||
304 | |||
305 | /* DW2 */ | ||
306 | __le32 rsshash; | ||
307 | |||
308 | /* DW3 */ | ||
309 | __u8 hashfun; | ||
310 | __u8 hashtype; | ||
311 | __le16 resrv; | ||
312 | } descwb; | ||
313 | }; | ||
314 | }; | ||
315 | |||
316 | enum jme_rxdesc_flags_bits { | ||
317 | RXFLAG_OWN = 0x80, | ||
318 | RXFLAG_INT = 0x40, | ||
319 | RXFLAG_64BIT = 0x20, | ||
320 | }; | ||
321 | |||
322 | enum jme_rxwbdesc_flags_bits { | ||
323 | RXWBFLAG_OWN = 0x8000, | ||
324 | RXWBFLAG_INT = 0x4000, | ||
325 | RXWBFLAG_MF = 0x2000, | ||
326 | RXWBFLAG_64BIT = 0x2000, | ||
327 | RXWBFLAG_TCPON = 0x1000, | ||
328 | RXWBFLAG_UDPON = 0x0800, | ||
329 | RXWBFLAG_IPCS = 0x0400, | ||
330 | RXWBFLAG_TCPCS = 0x0200, | ||
331 | RXWBFLAG_UDPCS = 0x0100, | ||
332 | RXWBFLAG_TAGON = 0x0080, | ||
333 | RXWBFLAG_IPV4 = 0x0040, | ||
334 | RXWBFLAG_IPV6 = 0x0020, | ||
335 | RXWBFLAG_PAUSE = 0x0010, | ||
336 | RXWBFLAG_MAGIC = 0x0008, | ||
337 | RXWBFLAG_WAKEUP = 0x0004, | ||
338 | RXWBFLAG_DEST = 0x0003, | ||
339 | RXWBFLAG_DEST_UNI = 0x0001, | ||
340 | RXWBFLAG_DEST_MUL = 0x0002, | ||
341 | RXWBFLAG_DEST_BRO = 0x0003, | ||
342 | }; | ||
343 | |||
344 | enum jme_rxwbdesc_desccnt_mask { | ||
345 | RXWBDCNT_WBCPL = 0x80, | ||
346 | RXWBDCNT_DCNT = 0x7F, | ||
347 | }; | ||
348 | |||
349 | enum jme_rxwbdesc_errstat_bits { | ||
350 | RXWBERR_LIMIT = 0x80, | ||
351 | RXWBERR_MIIER = 0x40, | ||
352 | RXWBERR_NIBON = 0x20, | ||
353 | RXWBERR_COLON = 0x10, | ||
354 | RXWBERR_ABORT = 0x08, | ||
355 | RXWBERR_SHORT = 0x04, | ||
356 | RXWBERR_OVERUN = 0x02, | ||
357 | RXWBERR_CRCERR = 0x01, | ||
358 | RXWBERR_ALLERR = 0xFF, | ||
359 | }; | ||
360 | |||
361 | /* | ||
362 | * Buffer information corresponding to ring descriptors. | ||
363 | */ | ||
364 | struct jme_buffer_info { | ||
365 | struct sk_buff *skb; | ||
366 | dma_addr_t mapping; | ||
367 | int len; | ||
368 | int nr_desc; | ||
369 | unsigned long start_xmit; | ||
370 | }; | ||
371 | |||
372 | /* | ||
373 | * The structure holding buffer information and ring descriptors all together. | ||
374 | */ | ||
375 | #define MAX_RING_DESC_NR 1024 | ||
376 | struct jme_ring { | ||
377 | void *alloc; /* pointer to allocated memory */ | ||
378 | void *desc; /* pointer to ring memory */ | ||
379 | dma_addr_t dmaalloc; /* phys address of ring alloc */ | ||
380 | dma_addr_t dma; /* phys address for ring dma */ | ||
381 | |||
382 | /* Buffer information corresponding to each descriptor */ | ||
383 | struct jme_buffer_info bufinf[MAX_RING_DESC_NR]; | ||
384 | |||
385 | int next_to_use; | ||
386 | atomic_t next_to_clean; | ||
387 | atomic_t nr_free; | ||
388 | }; | ||
389 | |||
390 | #define NET_STAT(priv) (priv->dev->stats) | ||
391 | #define NETDEV_GET_STATS(netdev, fun_ptr) | ||
392 | #define DECLARE_NET_DEVICE_STATS | ||
393 | |||
394 | #define DECLARE_NAPI_STRUCT struct napi_struct napi; | ||
395 | #define NETIF_NAPI_SET(dev, napis, pollfn, q) \ | ||
396 | netif_napi_add(dev, napis, pollfn, q); | ||
397 | #define JME_NAPI_HOLDER(holder) struct napi_struct *holder | ||
398 | #define JME_NAPI_WEIGHT(w) int w | ||
399 | #define JME_NAPI_WEIGHT_VAL(w) w | ||
400 | #define JME_NAPI_WEIGHT_SET(w, r) | ||
401 | #define JME_RX_COMPLETE(dev, napis) netif_rx_complete(dev, napis) | ||
402 | #define JME_NAPI_ENABLE(priv) napi_enable(&priv->napi); | ||
403 | #define JME_NAPI_DISABLE(priv) \ | ||
404 | if (!napi_disable_pending(&priv->napi)) \ | ||
405 | napi_disable(&priv->napi); | ||
406 | #define JME_RX_SCHEDULE_PREP(priv) \ | ||
407 | netif_rx_schedule_prep(priv->dev, &priv->napi) | ||
408 | #define JME_RX_SCHEDULE(priv) \ | ||
409 | __netif_rx_schedule(priv->dev, &priv->napi); | ||
410 | |||
411 | /* | ||
412 | * Jmac Adapter Private data | ||
413 | */ | ||
414 | #define SHADOW_REG_NR 8 | ||
415 | struct jme_adapter { | ||
416 | struct pci_dev *pdev; | ||
417 | struct net_device *dev; | ||
418 | void __iomem *regs; | ||
419 | dma_addr_t shadow_dma; | ||
420 | u32 *shadow_regs; | ||
421 | struct mii_if_info mii_if; | ||
422 | struct jme_ring rxring[RX_RING_NR]; | ||
423 | struct jme_ring txring[TX_RING_NR]; | ||
424 | spinlock_t phy_lock; | ||
425 | spinlock_t macaddr_lock; | ||
426 | spinlock_t rxmcs_lock; | ||
427 | struct tasklet_struct rxempty_task; | ||
428 | struct tasklet_struct rxclean_task; | ||
429 | struct tasklet_struct txclean_task; | ||
430 | struct tasklet_struct linkch_task; | ||
431 | struct tasklet_struct pcc_task; | ||
432 | unsigned long flags; | ||
433 | u32 reg_txcs; | ||
434 | u32 reg_txpfc; | ||
435 | u32 reg_rxcs; | ||
436 | u32 reg_rxmcs; | ||
437 | u32 reg_ghc; | ||
438 | u32 reg_pmcs; | ||
439 | u32 phylink; | ||
440 | u32 tx_ring_size; | ||
441 | u32 tx_ring_mask; | ||
442 | u32 tx_wake_threshold; | ||
443 | u32 rx_ring_size; | ||
444 | u32 rx_ring_mask; | ||
445 | u8 mrrs; | ||
446 | unsigned int fpgaver; | ||
447 | unsigned int chiprev; | ||
448 | u8 rev; | ||
449 | u32 msg_enable; | ||
450 | struct ethtool_cmd old_ecmd; | ||
451 | unsigned int old_mtu; | ||
452 | struct vlan_group *vlgrp; | ||
453 | struct dynpcc_info dpi; | ||
454 | atomic_t intr_sem; | ||
455 | atomic_t link_changing; | ||
456 | atomic_t tx_cleaning; | ||
457 | atomic_t rx_cleaning; | ||
458 | atomic_t rx_empty; | ||
459 | int (*jme_rx)(struct sk_buff *skb); | ||
460 | int (*jme_vlan_rx)(struct sk_buff *skb, | ||
461 | struct vlan_group *grp, | ||
462 | unsigned short vlan_tag); | ||
463 | DECLARE_NAPI_STRUCT | ||
464 | DECLARE_NET_DEVICE_STATS | ||
465 | }; | ||
466 | |||
467 | enum shadow_reg_val { | ||
468 | SHADOW_IEVE = 0, | ||
469 | }; | ||
470 | |||
471 | enum jme_flags_bits { | ||
472 | JME_FLAG_MSI = 1, | ||
473 | JME_FLAG_SSET = 2, | ||
474 | JME_FLAG_TXCSUM = 3, | ||
475 | JME_FLAG_TSO = 4, | ||
476 | JME_FLAG_POLL = 5, | ||
477 | JME_FLAG_SHUTDOWN = 6, | ||
478 | }; | ||
479 | |||
480 | #define TX_TIMEOUT (5 * HZ) | ||
481 | #define JME_REG_LEN 0x500 | ||
482 | #define MAX_ETHERNET_JUMBO_PACKET_SIZE 9216 | ||
483 | |||
484 | static inline struct jme_adapter* | ||
485 | jme_napi_priv(struct napi_struct *napi) | ||
486 | { | ||
487 | struct jme_adapter *jme; | ||
488 | jme = container_of(napi, struct jme_adapter, napi); | ||
489 | return jme; | ||
490 | } | ||
491 | |||
492 | /* | ||
493 | * MMaped I/O Resters | ||
494 | */ | ||
495 | enum jme_iomap_offsets { | ||
496 | JME_MAC = 0x0000, | ||
497 | JME_PHY = 0x0400, | ||
498 | JME_MISC = 0x0800, | ||
499 | JME_RSS = 0x0C00, | ||
500 | }; | ||
501 | |||
502 | enum jme_iomap_lens { | ||
503 | JME_MAC_LEN = 0x80, | ||
504 | JME_PHY_LEN = 0x58, | ||
505 | JME_MISC_LEN = 0x98, | ||
506 | JME_RSS_LEN = 0xFF, | ||
507 | }; | ||
508 | |||
509 | enum jme_iomap_regs { | ||
510 | JME_TXCS = JME_MAC | 0x00, /* Transmit Control and Status */ | ||
511 | JME_TXDBA_LO = JME_MAC | 0x04, /* Transmit Queue Desc Base Addr */ | ||
512 | JME_TXDBA_HI = JME_MAC | 0x08, /* Transmit Queue Desc Base Addr */ | ||
513 | JME_TXQDC = JME_MAC | 0x0C, /* Transmit Queue Desc Count */ | ||
514 | JME_TXNDA = JME_MAC | 0x10, /* Transmit Queue Next Desc Addr */ | ||
515 | JME_TXMCS = JME_MAC | 0x14, /* Transmit MAC Control Status */ | ||
516 | JME_TXPFC = JME_MAC | 0x18, /* Transmit Pause Frame Control */ | ||
517 | JME_TXTRHD = JME_MAC | 0x1C, /* Transmit Timer/Retry@Half-Dup */ | ||
518 | |||
519 | JME_RXCS = JME_MAC | 0x20, /* Receive Control and Status */ | ||
520 | JME_RXDBA_LO = JME_MAC | 0x24, /* Receive Queue Desc Base Addr */ | ||
521 | JME_RXDBA_HI = JME_MAC | 0x28, /* Receive Queue Desc Base Addr */ | ||
522 | JME_RXQDC = JME_MAC | 0x2C, /* Receive Queue Desc Count */ | ||
523 | JME_RXNDA = JME_MAC | 0x30, /* Receive Queue Next Desc Addr */ | ||
524 | JME_RXMCS = JME_MAC | 0x34, /* Receive MAC Control Status */ | ||
525 | JME_RXUMA_LO = JME_MAC | 0x38, /* Receive Unicast MAC Address */ | ||
526 | JME_RXUMA_HI = JME_MAC | 0x3C, /* Receive Unicast MAC Address */ | ||
527 | JME_RXMCHT_LO = JME_MAC | 0x40, /* Recv Multicast Addr HashTable */ | ||
528 | JME_RXMCHT_HI = JME_MAC | 0x44, /* Recv Multicast Addr HashTable */ | ||
529 | JME_WFODP = JME_MAC | 0x48, /* Wakeup Frame Output Data Port */ | ||
530 | JME_WFOI = JME_MAC | 0x4C, /* Wakeup Frame Output Interface */ | ||
531 | |||
532 | JME_SMI = JME_MAC | 0x50, /* Station Management Interface */ | ||
533 | JME_GHC = JME_MAC | 0x54, /* Global Host Control */ | ||
534 | JME_PMCS = JME_MAC | 0x60, /* Power Management Control/Stat */ | ||
535 | |||
536 | |||
537 | JME_PHY_CS = JME_PHY | 0x28, /* PHY Ctrl and Status Register */ | ||
538 | JME_PHY_LINK = JME_PHY | 0x30, /* PHY Link Status Register */ | ||
539 | JME_SMBCSR = JME_PHY | 0x40, /* SMB Control and Status */ | ||
540 | JME_SMBINTF = JME_PHY | 0x44, /* SMB Interface */ | ||
541 | |||
542 | |||
543 | JME_TMCSR = JME_MISC | 0x00, /* Timer Control/Status Register */ | ||
544 | JME_GPREG0 = JME_MISC | 0x08, /* General purpose REG-0 */ | ||
545 | JME_GPREG1 = JME_MISC | 0x0C, /* General purpose REG-1 */ | ||
546 | JME_IEVE = JME_MISC | 0x20, /* Interrupt Event Status */ | ||
547 | JME_IREQ = JME_MISC | 0x24, /* Intr Req Status(For Debug) */ | ||
548 | JME_IENS = JME_MISC | 0x28, /* Intr Enable - Setting Port */ | ||
549 | JME_IENC = JME_MISC | 0x2C, /* Interrupt Enable - Clear Port */ | ||
550 | JME_PCCRX0 = JME_MISC | 0x30, /* PCC Control for RX Queue 0 */ | ||
551 | JME_PCCTX = JME_MISC | 0x40, /* PCC Control for TX Queues */ | ||
552 | JME_CHIPMODE = JME_MISC | 0x44, /* Identify FPGA Version */ | ||
553 | JME_SHBA_HI = JME_MISC | 0x48, /* Shadow Register Base HI */ | ||
554 | JME_SHBA_LO = JME_MISC | 0x4C, /* Shadow Register Base LO */ | ||
555 | JME_TIMER1 = JME_MISC | 0x70, /* Timer1 */ | ||
556 | JME_TIMER2 = JME_MISC | 0x74, /* Timer2 */ | ||
557 | JME_APMC = JME_MISC | 0x7C, /* Aggressive Power Mode Control */ | ||
558 | JME_PCCSRX0 = JME_MISC | 0x80, /* PCC Status of RX0 */ | ||
559 | }; | ||
560 | |||
561 | /* | ||
562 | * TX Control/Status Bits | ||
563 | */ | ||
564 | enum jme_txcs_bits { | ||
565 | TXCS_QUEUE7S = 0x00008000, | ||
566 | TXCS_QUEUE6S = 0x00004000, | ||
567 | TXCS_QUEUE5S = 0x00002000, | ||
568 | TXCS_QUEUE4S = 0x00001000, | ||
569 | TXCS_QUEUE3S = 0x00000800, | ||
570 | TXCS_QUEUE2S = 0x00000400, | ||
571 | TXCS_QUEUE1S = 0x00000200, | ||
572 | TXCS_QUEUE0S = 0x00000100, | ||
573 | TXCS_FIFOTH = 0x000000C0, | ||
574 | TXCS_DMASIZE = 0x00000030, | ||
575 | TXCS_BURST = 0x00000004, | ||
576 | TXCS_ENABLE = 0x00000001, | ||
577 | }; | ||
578 | |||
579 | enum jme_txcs_value { | ||
580 | TXCS_FIFOTH_16QW = 0x000000C0, | ||
581 | TXCS_FIFOTH_12QW = 0x00000080, | ||
582 | TXCS_FIFOTH_8QW = 0x00000040, | ||
583 | TXCS_FIFOTH_4QW = 0x00000000, | ||
584 | |||
585 | TXCS_DMASIZE_64B = 0x00000000, | ||
586 | TXCS_DMASIZE_128B = 0x00000010, | ||
587 | TXCS_DMASIZE_256B = 0x00000020, | ||
588 | TXCS_DMASIZE_512B = 0x00000030, | ||
589 | |||
590 | TXCS_SELECT_QUEUE0 = 0x00000000, | ||
591 | TXCS_SELECT_QUEUE1 = 0x00010000, | ||
592 | TXCS_SELECT_QUEUE2 = 0x00020000, | ||
593 | TXCS_SELECT_QUEUE3 = 0x00030000, | ||
594 | TXCS_SELECT_QUEUE4 = 0x00040000, | ||
595 | TXCS_SELECT_QUEUE5 = 0x00050000, | ||
596 | TXCS_SELECT_QUEUE6 = 0x00060000, | ||
597 | TXCS_SELECT_QUEUE7 = 0x00070000, | ||
598 | |||
599 | TXCS_DEFAULT = TXCS_FIFOTH_4QW | | ||
600 | TXCS_BURST, | ||
601 | }; | ||
602 | |||
603 | #define JME_TX_DISABLE_TIMEOUT 10 /* 10 msec */ | ||
604 | |||
605 | /* | ||
606 | * TX MAC Control/Status Bits | ||
607 | */ | ||
608 | enum jme_txmcs_bit_masks { | ||
609 | TXMCS_IFG2 = 0xC0000000, | ||
610 | TXMCS_IFG1 = 0x30000000, | ||
611 | TXMCS_TTHOLD = 0x00000300, | ||
612 | TXMCS_FBURST = 0x00000080, | ||
613 | TXMCS_CARRIEREXT = 0x00000040, | ||
614 | TXMCS_DEFER = 0x00000020, | ||
615 | TXMCS_BACKOFF = 0x00000010, | ||
616 | TXMCS_CARRIERSENSE = 0x00000008, | ||
617 | TXMCS_COLLISION = 0x00000004, | ||
618 | TXMCS_CRC = 0x00000002, | ||
619 | TXMCS_PADDING = 0x00000001, | ||
620 | }; | ||
621 | |||
622 | enum jme_txmcs_values { | ||
623 | TXMCS_IFG2_6_4 = 0x00000000, | ||
624 | TXMCS_IFG2_8_5 = 0x40000000, | ||
625 | TXMCS_IFG2_10_6 = 0x80000000, | ||
626 | TXMCS_IFG2_12_7 = 0xC0000000, | ||
627 | |||
628 | TXMCS_IFG1_8_4 = 0x00000000, | ||
629 | TXMCS_IFG1_12_6 = 0x10000000, | ||
630 | TXMCS_IFG1_16_8 = 0x20000000, | ||
631 | TXMCS_IFG1_20_10 = 0x30000000, | ||
632 | |||
633 | TXMCS_TTHOLD_1_8 = 0x00000000, | ||
634 | TXMCS_TTHOLD_1_4 = 0x00000100, | ||
635 | TXMCS_TTHOLD_1_2 = 0x00000200, | ||
636 | TXMCS_TTHOLD_FULL = 0x00000300, | ||
637 | |||
638 | TXMCS_DEFAULT = TXMCS_IFG2_8_5 | | ||
639 | TXMCS_IFG1_16_8 | | ||
640 | TXMCS_TTHOLD_FULL | | ||
641 | TXMCS_DEFER | | ||
642 | TXMCS_CRC | | ||
643 | TXMCS_PADDING, | ||
644 | }; | ||
645 | |||
646 | enum jme_txpfc_bits_masks { | ||
647 | TXPFC_VLAN_TAG = 0xFFFF0000, | ||
648 | TXPFC_VLAN_EN = 0x00008000, | ||
649 | TXPFC_PF_EN = 0x00000001, | ||
650 | }; | ||
651 | |||
652 | enum jme_txtrhd_bits_masks { | ||
653 | TXTRHD_TXPEN = 0x80000000, | ||
654 | TXTRHD_TXP = 0x7FFFFF00, | ||
655 | TXTRHD_TXREN = 0x00000080, | ||
656 | TXTRHD_TXRL = 0x0000007F, | ||
657 | }; | ||
658 | |||
659 | enum jme_txtrhd_shifts { | ||
660 | TXTRHD_TXP_SHIFT = 8, | ||
661 | TXTRHD_TXRL_SHIFT = 0, | ||
662 | }; | ||
663 | |||
664 | /* | ||
665 | * RX Control/Status Bits | ||
666 | */ | ||
667 | enum jme_rxcs_bit_masks { | ||
668 | /* FIFO full threshold for transmitting Tx Pause Packet */ | ||
669 | RXCS_FIFOTHTP = 0x30000000, | ||
670 | /* FIFO threshold for processing next packet */ | ||
671 | RXCS_FIFOTHNP = 0x0C000000, | ||
672 | RXCS_DMAREQSZ = 0x03000000, /* DMA Request Size */ | ||
673 | RXCS_QUEUESEL = 0x00030000, /* Queue selection */ | ||
674 | RXCS_RETRYGAP = 0x0000F000, /* RX Desc full retry gap */ | ||
675 | RXCS_RETRYCNT = 0x00000F00, /* RX Desc full retry counter */ | ||
676 | RXCS_WAKEUP = 0x00000040, /* Enable receive wakeup packet */ | ||
677 | RXCS_MAGIC = 0x00000020, /* Enable receive magic packet */ | ||
678 | RXCS_SHORT = 0x00000010, /* Enable receive short packet */ | ||
679 | RXCS_ABORT = 0x00000008, /* Enable receive errorr packet */ | ||
680 | RXCS_QST = 0x00000004, /* Receive queue start */ | ||
681 | RXCS_SUSPEND = 0x00000002, | ||
682 | RXCS_ENABLE = 0x00000001, | ||
683 | }; | ||
684 | |||
685 | enum jme_rxcs_values { | ||
686 | RXCS_FIFOTHTP_16T = 0x00000000, | ||
687 | RXCS_FIFOTHTP_32T = 0x10000000, | ||
688 | RXCS_FIFOTHTP_64T = 0x20000000, | ||
689 | RXCS_FIFOTHTP_128T = 0x30000000, | ||
690 | |||
691 | RXCS_FIFOTHNP_16QW = 0x00000000, | ||
692 | RXCS_FIFOTHNP_32QW = 0x04000000, | ||
693 | RXCS_FIFOTHNP_64QW = 0x08000000, | ||
694 | RXCS_FIFOTHNP_128QW = 0x0C000000, | ||
695 | |||
696 | RXCS_DMAREQSZ_16B = 0x00000000, | ||
697 | RXCS_DMAREQSZ_32B = 0x01000000, | ||
698 | RXCS_DMAREQSZ_64B = 0x02000000, | ||
699 | RXCS_DMAREQSZ_128B = 0x03000000, | ||
700 | |||
701 | RXCS_QUEUESEL_Q0 = 0x00000000, | ||
702 | RXCS_QUEUESEL_Q1 = 0x00010000, | ||
703 | RXCS_QUEUESEL_Q2 = 0x00020000, | ||
704 | RXCS_QUEUESEL_Q3 = 0x00030000, | ||
705 | |||
706 | RXCS_RETRYGAP_256ns = 0x00000000, | ||
707 | RXCS_RETRYGAP_512ns = 0x00001000, | ||
708 | RXCS_RETRYGAP_1024ns = 0x00002000, | ||
709 | RXCS_RETRYGAP_2048ns = 0x00003000, | ||
710 | RXCS_RETRYGAP_4096ns = 0x00004000, | ||
711 | RXCS_RETRYGAP_8192ns = 0x00005000, | ||
712 | RXCS_RETRYGAP_16384ns = 0x00006000, | ||
713 | RXCS_RETRYGAP_32768ns = 0x00007000, | ||
714 | |||
715 | RXCS_RETRYCNT_0 = 0x00000000, | ||
716 | RXCS_RETRYCNT_4 = 0x00000100, | ||
717 | RXCS_RETRYCNT_8 = 0x00000200, | ||
718 | RXCS_RETRYCNT_12 = 0x00000300, | ||
719 | RXCS_RETRYCNT_16 = 0x00000400, | ||
720 | RXCS_RETRYCNT_20 = 0x00000500, | ||
721 | RXCS_RETRYCNT_24 = 0x00000600, | ||
722 | RXCS_RETRYCNT_28 = 0x00000700, | ||
723 | RXCS_RETRYCNT_32 = 0x00000800, | ||
724 | RXCS_RETRYCNT_36 = 0x00000900, | ||
725 | RXCS_RETRYCNT_40 = 0x00000A00, | ||
726 | RXCS_RETRYCNT_44 = 0x00000B00, | ||
727 | RXCS_RETRYCNT_48 = 0x00000C00, | ||
728 | RXCS_RETRYCNT_52 = 0x00000D00, | ||
729 | RXCS_RETRYCNT_56 = 0x00000E00, | ||
730 | RXCS_RETRYCNT_60 = 0x00000F00, | ||
731 | |||
732 | RXCS_DEFAULT = RXCS_FIFOTHTP_128T | | ||
733 | RXCS_FIFOTHNP_128QW | | ||
734 | RXCS_DMAREQSZ_128B | | ||
735 | RXCS_RETRYGAP_256ns | | ||
736 | RXCS_RETRYCNT_32, | ||
737 | }; | ||
738 | |||
739 | #define JME_RX_DISABLE_TIMEOUT 10 /* 10 msec */ | ||
740 | |||
741 | /* | ||
742 | * RX MAC Control/Status Bits | ||
743 | */ | ||
744 | enum jme_rxmcs_bits { | ||
745 | RXMCS_ALLFRAME = 0x00000800, | ||
746 | RXMCS_BRDFRAME = 0x00000400, | ||
747 | RXMCS_MULFRAME = 0x00000200, | ||
748 | RXMCS_UNIFRAME = 0x00000100, | ||
749 | RXMCS_ALLMULFRAME = 0x00000080, | ||
750 | RXMCS_MULFILTERED = 0x00000040, | ||
751 | RXMCS_RXCOLLDEC = 0x00000020, | ||
752 | RXMCS_FLOWCTRL = 0x00000008, | ||
753 | RXMCS_VTAGRM = 0x00000004, | ||
754 | RXMCS_PREPAD = 0x00000002, | ||
755 | RXMCS_CHECKSUM = 0x00000001, | ||
756 | |||
757 | RXMCS_DEFAULT = RXMCS_VTAGRM | | ||
758 | RXMCS_PREPAD | | ||
759 | RXMCS_FLOWCTRL | | ||
760 | RXMCS_CHECKSUM, | ||
761 | }; | ||
762 | |||
763 | /* | ||
764 | * Wakeup Frame setup interface registers | ||
765 | */ | ||
766 | #define WAKEUP_FRAME_NR 8 | ||
767 | #define WAKEUP_FRAME_MASK_DWNR 4 | ||
768 | |||
769 | enum jme_wfoi_bit_masks { | ||
770 | WFOI_MASK_SEL = 0x00000070, | ||
771 | WFOI_CRC_SEL = 0x00000008, | ||
772 | WFOI_FRAME_SEL = 0x00000007, | ||
773 | }; | ||
774 | |||
775 | enum jme_wfoi_shifts { | ||
776 | WFOI_MASK_SHIFT = 4, | ||
777 | }; | ||
778 | |||
779 | /* | ||
780 | * SMI Related definitions | ||
781 | */ | ||
782 | enum jme_smi_bit_mask { | ||
783 | SMI_DATA_MASK = 0xFFFF0000, | ||
784 | SMI_REG_ADDR_MASK = 0x0000F800, | ||
785 | SMI_PHY_ADDR_MASK = 0x000007C0, | ||
786 | SMI_OP_WRITE = 0x00000020, | ||
787 | /* Set to 1, after req done it'll be cleared to 0 */ | ||
788 | SMI_OP_REQ = 0x00000010, | ||
789 | SMI_OP_MDIO = 0x00000008, /* Software assess In/Out */ | ||
790 | SMI_OP_MDOE = 0x00000004, /* Software Output Enable */ | ||
791 | SMI_OP_MDC = 0x00000002, /* Software CLK Control */ | ||
792 | SMI_OP_MDEN = 0x00000001, /* Software access Enable */ | ||
793 | }; | ||
794 | |||
795 | enum jme_smi_bit_shift { | ||
796 | SMI_DATA_SHIFT = 16, | ||
797 | SMI_REG_ADDR_SHIFT = 11, | ||
798 | SMI_PHY_ADDR_SHIFT = 6, | ||
799 | }; | ||
800 | |||
801 | static inline u32 smi_reg_addr(int x) | ||
802 | { | ||
803 | return (x << SMI_REG_ADDR_SHIFT) & SMI_REG_ADDR_MASK; | ||
804 | } | ||
805 | |||
806 | static inline u32 smi_phy_addr(int x) | ||
807 | { | ||
808 | return (x << SMI_PHY_ADDR_SHIFT) & SMI_PHY_ADDR_MASK; | ||
809 | } | ||
810 | |||
811 | #define JME_PHY_TIMEOUT 100 /* 100 msec */ | ||
812 | #define JME_PHY_REG_NR 32 | ||
813 | |||
814 | /* | ||
815 | * Global Host Control | ||
816 | */ | ||
817 | enum jme_ghc_bit_mask { | ||
818 | GHC_SWRST = 0x40000000, | ||
819 | GHC_DPX = 0x00000040, | ||
820 | GHC_SPEED = 0x00000030, | ||
821 | GHC_LINK_POLL = 0x00000001, | ||
822 | }; | ||
823 | |||
824 | enum jme_ghc_speed_val { | ||
825 | GHC_SPEED_10M = 0x00000010, | ||
826 | GHC_SPEED_100M = 0x00000020, | ||
827 | GHC_SPEED_1000M = 0x00000030, | ||
828 | }; | ||
829 | |||
830 | /* | ||
831 | * Power management control and status register | ||
832 | */ | ||
833 | enum jme_pmcs_bit_masks { | ||
834 | PMCS_WF7DET = 0x80000000, | ||
835 | PMCS_WF6DET = 0x40000000, | ||
836 | PMCS_WF5DET = 0x20000000, | ||
837 | PMCS_WF4DET = 0x10000000, | ||
838 | PMCS_WF3DET = 0x08000000, | ||
839 | PMCS_WF2DET = 0x04000000, | ||
840 | PMCS_WF1DET = 0x02000000, | ||
841 | PMCS_WF0DET = 0x01000000, | ||
842 | PMCS_LFDET = 0x00040000, | ||
843 | PMCS_LRDET = 0x00020000, | ||
844 | PMCS_MFDET = 0x00010000, | ||
845 | PMCS_WF7EN = 0x00008000, | ||
846 | PMCS_WF6EN = 0x00004000, | ||
847 | PMCS_WF5EN = 0x00002000, | ||
848 | PMCS_WF4EN = 0x00001000, | ||
849 | PMCS_WF3EN = 0x00000800, | ||
850 | PMCS_WF2EN = 0x00000400, | ||
851 | PMCS_WF1EN = 0x00000200, | ||
852 | PMCS_WF0EN = 0x00000100, | ||
853 | PMCS_LFEN = 0x00000004, | ||
854 | PMCS_LREN = 0x00000002, | ||
855 | PMCS_MFEN = 0x00000001, | ||
856 | }; | ||
857 | |||
858 | /* | ||
859 | * Giga PHY Status Registers | ||
860 | */ | ||
861 | enum jme_phy_link_bit_mask { | ||
862 | PHY_LINK_SPEED_MASK = 0x0000C000, | ||
863 | PHY_LINK_DUPLEX = 0x00002000, | ||
864 | PHY_LINK_SPEEDDPU_RESOLVED = 0x00000800, | ||
865 | PHY_LINK_UP = 0x00000400, | ||
866 | PHY_LINK_AUTONEG_COMPLETE = 0x00000200, | ||
867 | PHY_LINK_MDI_STAT = 0x00000040, | ||
868 | }; | ||
869 | |||
870 | enum jme_phy_link_speed_val { | ||
871 | PHY_LINK_SPEED_10M = 0x00000000, | ||
872 | PHY_LINK_SPEED_100M = 0x00004000, | ||
873 | PHY_LINK_SPEED_1000M = 0x00008000, | ||
874 | }; | ||
875 | |||
876 | #define JME_SPDRSV_TIMEOUT 500 /* 500 us */ | ||
877 | |||
878 | /* | ||
879 | * SMB Control and Status | ||
880 | */ | ||
881 | enum jme_smbcsr_bit_mask { | ||
882 | SMBCSR_CNACK = 0x00020000, | ||
883 | SMBCSR_RELOAD = 0x00010000, | ||
884 | SMBCSR_EEPROMD = 0x00000020, | ||
885 | SMBCSR_INITDONE = 0x00000010, | ||
886 | SMBCSR_BUSY = 0x0000000F, | ||
887 | }; | ||
888 | |||
889 | enum jme_smbintf_bit_mask { | ||
890 | SMBINTF_HWDATR = 0xFF000000, | ||
891 | SMBINTF_HWDATW = 0x00FF0000, | ||
892 | SMBINTF_HWADDR = 0x0000FF00, | ||
893 | SMBINTF_HWRWN = 0x00000020, | ||
894 | SMBINTF_HWCMD = 0x00000010, | ||
895 | SMBINTF_FASTM = 0x00000008, | ||
896 | SMBINTF_GPIOSCL = 0x00000004, | ||
897 | SMBINTF_GPIOSDA = 0x00000002, | ||
898 | SMBINTF_GPIOEN = 0x00000001, | ||
899 | }; | ||
900 | |||
901 | enum jme_smbintf_vals { | ||
902 | SMBINTF_HWRWN_READ = 0x00000020, | ||
903 | SMBINTF_HWRWN_WRITE = 0x00000000, | ||
904 | }; | ||
905 | |||
906 | enum jme_smbintf_shifts { | ||
907 | SMBINTF_HWDATR_SHIFT = 24, | ||
908 | SMBINTF_HWDATW_SHIFT = 16, | ||
909 | SMBINTF_HWADDR_SHIFT = 8, | ||
910 | }; | ||
911 | |||
912 | #define JME_EEPROM_RELOAD_TIMEOUT 2000 /* 2000 msec */ | ||
913 | #define JME_SMB_BUSY_TIMEOUT 20 /* 20 msec */ | ||
914 | #define JME_SMB_LEN 256 | ||
915 | #define JME_EEPROM_MAGIC 0x250 | ||
916 | |||
917 | /* | ||
918 | * Timer Control/Status Register | ||
919 | */ | ||
920 | enum jme_tmcsr_bit_masks { | ||
921 | TMCSR_SWIT = 0x80000000, | ||
922 | TMCSR_EN = 0x01000000, | ||
923 | TMCSR_CNT = 0x00FFFFFF, | ||
924 | }; | ||
925 | |||
926 | /* | ||
927 | * General Purpose REG-0 | ||
928 | */ | ||
929 | enum jme_gpreg0_masks { | ||
930 | GPREG0_DISSH = 0xFF000000, | ||
931 | GPREG0_PCIRLMT = 0x00300000, | ||
932 | GPREG0_PCCNOMUTCLR = 0x00040000, | ||
933 | GPREG0_LNKINTPOLL = 0x00001000, | ||
934 | GPREG0_PCCTMR = 0x00000300, | ||
935 | GPREG0_PHYADDR = 0x0000001F, | ||
936 | }; | ||
937 | |||
938 | enum jme_gpreg0_vals { | ||
939 | GPREG0_DISSH_DW7 = 0x80000000, | ||
940 | GPREG0_DISSH_DW6 = 0x40000000, | ||
941 | GPREG0_DISSH_DW5 = 0x20000000, | ||
942 | GPREG0_DISSH_DW4 = 0x10000000, | ||
943 | GPREG0_DISSH_DW3 = 0x08000000, | ||
944 | GPREG0_DISSH_DW2 = 0x04000000, | ||
945 | GPREG0_DISSH_DW1 = 0x02000000, | ||
946 | GPREG0_DISSH_DW0 = 0x01000000, | ||
947 | GPREG0_DISSH_ALL = 0xFF000000, | ||
948 | |||
949 | GPREG0_PCIRLMT_8 = 0x00000000, | ||
950 | GPREG0_PCIRLMT_6 = 0x00100000, | ||
951 | GPREG0_PCIRLMT_5 = 0x00200000, | ||
952 | GPREG0_PCIRLMT_4 = 0x00300000, | ||
953 | |||
954 | GPREG0_PCCTMR_16ns = 0x00000000, | ||
955 | GPREG0_PCCTMR_256ns = 0x00000100, | ||
956 | GPREG0_PCCTMR_1us = 0x00000200, | ||
957 | GPREG0_PCCTMR_1ms = 0x00000300, | ||
958 | |||
959 | GPREG0_PHYADDR_1 = 0x00000001, | ||
960 | |||
961 | GPREG0_DEFAULT = GPREG0_PCIRLMT_4 | | ||
962 | GPREG0_PCCTMR_1us | | ||
963 | GPREG0_PHYADDR_1, | ||
964 | }; | ||
965 | |||
966 | /* | ||
967 | * Interrupt Status Bits | ||
968 | */ | ||
969 | enum jme_interrupt_bits { | ||
970 | INTR_SWINTR = 0x80000000, | ||
971 | INTR_TMINTR = 0x40000000, | ||
972 | INTR_LINKCH = 0x20000000, | ||
973 | INTR_PAUSERCV = 0x10000000, | ||
974 | INTR_MAGICRCV = 0x08000000, | ||
975 | INTR_WAKERCV = 0x04000000, | ||
976 | INTR_PCCRX0TO = 0x02000000, | ||
977 | INTR_PCCRX1TO = 0x01000000, | ||
978 | INTR_PCCRX2TO = 0x00800000, | ||
979 | INTR_PCCRX3TO = 0x00400000, | ||
980 | INTR_PCCTXTO = 0x00200000, | ||
981 | INTR_PCCRX0 = 0x00100000, | ||
982 | INTR_PCCRX1 = 0x00080000, | ||
983 | INTR_PCCRX2 = 0x00040000, | ||
984 | INTR_PCCRX3 = 0x00020000, | ||
985 | INTR_PCCTX = 0x00010000, | ||
986 | INTR_RX3EMP = 0x00008000, | ||
987 | INTR_RX2EMP = 0x00004000, | ||
988 | INTR_RX1EMP = 0x00002000, | ||
989 | INTR_RX0EMP = 0x00001000, | ||
990 | INTR_RX3 = 0x00000800, | ||
991 | INTR_RX2 = 0x00000400, | ||
992 | INTR_RX1 = 0x00000200, | ||
993 | INTR_RX0 = 0x00000100, | ||
994 | INTR_TX7 = 0x00000080, | ||
995 | INTR_TX6 = 0x00000040, | ||
996 | INTR_TX5 = 0x00000020, | ||
997 | INTR_TX4 = 0x00000010, | ||
998 | INTR_TX3 = 0x00000008, | ||
999 | INTR_TX2 = 0x00000004, | ||
1000 | INTR_TX1 = 0x00000002, | ||
1001 | INTR_TX0 = 0x00000001, | ||
1002 | }; | ||
1003 | |||
1004 | static const u32 INTR_ENABLE = INTR_SWINTR | | ||
1005 | INTR_TMINTR | | ||
1006 | INTR_LINKCH | | ||
1007 | INTR_PCCRX0TO | | ||
1008 | INTR_PCCRX0 | | ||
1009 | INTR_PCCTXTO | | ||
1010 | INTR_PCCTX | | ||
1011 | INTR_RX0EMP; | ||
1012 | |||
1013 | /* | ||
1014 | * PCC Control Registers | ||
1015 | */ | ||
1016 | enum jme_pccrx_masks { | ||
1017 | PCCRXTO_MASK = 0xFFFF0000, | ||
1018 | PCCRX_MASK = 0x0000FF00, | ||
1019 | }; | ||
1020 | |||
1021 | enum jme_pcctx_masks { | ||
1022 | PCCTXTO_MASK = 0xFFFF0000, | ||
1023 | PCCTX_MASK = 0x0000FF00, | ||
1024 | PCCTX_QS_MASK = 0x000000FF, | ||
1025 | }; | ||
1026 | |||
1027 | enum jme_pccrx_shifts { | ||
1028 | PCCRXTO_SHIFT = 16, | ||
1029 | PCCRX_SHIFT = 8, | ||
1030 | }; | ||
1031 | |||
1032 | enum jme_pcctx_shifts { | ||
1033 | PCCTXTO_SHIFT = 16, | ||
1034 | PCCTX_SHIFT = 8, | ||
1035 | }; | ||
1036 | |||
1037 | enum jme_pcctx_bits { | ||
1038 | PCCTXQ0_EN = 0x00000001, | ||
1039 | PCCTXQ1_EN = 0x00000002, | ||
1040 | PCCTXQ2_EN = 0x00000004, | ||
1041 | PCCTXQ3_EN = 0x00000008, | ||
1042 | PCCTXQ4_EN = 0x00000010, | ||
1043 | PCCTXQ5_EN = 0x00000020, | ||
1044 | PCCTXQ6_EN = 0x00000040, | ||
1045 | PCCTXQ7_EN = 0x00000080, | ||
1046 | }; | ||
1047 | |||
1048 | /* | ||
1049 | * Chip Mode Register | ||
1050 | */ | ||
1051 | enum jme_chipmode_bit_masks { | ||
1052 | CM_FPGAVER_MASK = 0xFFFF0000, | ||
1053 | CM_CHIPREV_MASK = 0x0000FF00, | ||
1054 | CM_CHIPMODE_MASK = 0x0000000F, | ||
1055 | }; | ||
1056 | |||
1057 | enum jme_chipmode_shifts { | ||
1058 | CM_FPGAVER_SHIFT = 16, | ||
1059 | CM_CHIPREV_SHIFT = 8, | ||
1060 | }; | ||
1061 | |||
1062 | /* | ||
1063 | * Shadow base address register bits | ||
1064 | */ | ||
1065 | enum jme_shadow_base_address_bits { | ||
1066 | SHBA_POSTEN = 0x1, | ||
1067 | }; | ||
1068 | |||
1069 | /* | ||
1070 | * Aggressive Power Mode Control | ||
1071 | */ | ||
1072 | enum jme_apmc_bits { | ||
1073 | JME_APMC_PCIE_SD_EN = 0x40000000, | ||
1074 | JME_APMC_PSEUDO_HP_EN = 0x20000000, | ||
1075 | JME_APMC_EPIEN = 0x04000000, | ||
1076 | JME_APMC_EPIEN_CTRL = 0x03000000, | ||
1077 | }; | ||
1078 | |||
1079 | enum jme_apmc_values { | ||
1080 | JME_APMC_EPIEN_CTRL_EN = 0x02000000, | ||
1081 | JME_APMC_EPIEN_CTRL_DIS = 0x01000000, | ||
1082 | }; | ||
1083 | |||
1084 | #define APMC_PHP_SHUTDOWN_DELAY (10 * 1000 * 1000) | ||
1085 | |||
1086 | #ifdef REG_DEBUG | ||
1087 | static char *MAC_REG_NAME[] = { | ||
1088 | "JME_TXCS", "JME_TXDBA_LO", "JME_TXDBA_HI", "JME_TXQDC", | ||
1089 | "JME_TXNDA", "JME_TXMCS", "JME_TXPFC", "JME_TXTRHD", | ||
1090 | "JME_RXCS", "JME_RXDBA_LO", "JME_RXDBA_HI", "JME_RXQDC", | ||
1091 | "JME_RXNDA", "JME_RXMCS", "JME_RXUMA_LO", "JME_RXUMA_HI", | ||
1092 | "JME_RXMCHT_LO", "JME_RXMCHT_HI", "JME_WFODP", "JME_WFOI", | ||
1093 | "JME_SMI", "JME_GHC", "UNKNOWN", "UNKNOWN", | ||
1094 | "JME_PMCS"}; | ||
1095 | |||
1096 | static char *PE_REG_NAME[] = { | ||
1097 | "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", | ||
1098 | "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", | ||
1099 | "UNKNOWN", "UNKNOWN", "JME_PHY_CS", "UNKNOWN", | ||
1100 | "JME_PHY_LINK", "UNKNOWN", "UNKNOWN", "UNKNOWN", | ||
1101 | "JME_SMBCSR", "JME_SMBINTF"}; | ||
1102 | |||
1103 | static char *MISC_REG_NAME[] = { | ||
1104 | "JME_TMCSR", "JME_GPIO", "JME_GPREG0", "JME_GPREG1", | ||
1105 | "JME_IEVE", "JME_IREQ", "JME_IENS", "JME_IENC", | ||
1106 | "JME_PCCRX0", "JME_PCCRX1", "JME_PCCRX2", "JME_PCCRX3", | ||
1107 | "JME_PCCTX0", "JME_CHIPMODE", "JME_SHBA_HI", "JME_SHBA_LO", | ||
1108 | "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", | ||
1109 | "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", | ||
1110 | "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", | ||
1111 | "JME_TIMER1", "JME_TIMER2", "UNKNOWN", "JME_APMC", | ||
1112 | "JME_PCCSRX0"}; | ||
1113 | |||
1114 | static inline void reg_dbg(const struct jme_adapter *jme, | ||
1115 | const char *msg, u32 val, u32 reg) | ||
1116 | { | ||
1117 | const char *regname; | ||
1118 | switch (reg & 0xF00) { | ||
1119 | case 0x000: | ||
1120 | regname = MAC_REG_NAME[(reg & 0xFF) >> 2]; | ||
1121 | break; | ||
1122 | case 0x400: | ||
1123 | regname = PE_REG_NAME[(reg & 0xFF) >> 2]; | ||
1124 | break; | ||
1125 | case 0x800: | ||
1126 | regname = MISC_REG_NAME[(reg & 0xFF) >> 2]; | ||
1127 | break; | ||
1128 | default: | ||
1129 | regname = PE_REG_NAME[0]; | ||
1130 | } | ||
1131 | printk(KERN_DEBUG "%s: %-20s %08x@%s\n", jme->dev->name, | ||
1132 | msg, val, regname); | ||
1133 | } | ||
1134 | #else | ||
1135 | static inline void reg_dbg(const struct jme_adapter *jme, | ||
1136 | const char *msg, u32 val, u32 reg) {} | ||
1137 | #endif | ||
1138 | |||
1139 | /* | ||
1140 | * Read/Write MMaped I/O Registers | ||
1141 | */ | ||
1142 | static inline u32 jread32(struct jme_adapter *jme, u32 reg) | ||
1143 | { | ||
1144 | return readl(jme->regs + reg); | ||
1145 | } | ||
1146 | |||
1147 | static inline void jwrite32(struct jme_adapter *jme, u32 reg, u32 val) | ||
1148 | { | ||
1149 | reg_dbg(jme, "REG WRITE", val, reg); | ||
1150 | writel(val, jme->regs + reg); | ||
1151 | reg_dbg(jme, "VAL AFTER WRITE", readl(jme->regs + reg), reg); | ||
1152 | } | ||
1153 | |||
1154 | static inline void jwrite32f(struct jme_adapter *jme, u32 reg, u32 val) | ||
1155 | { | ||
1156 | /* | ||
1157 | * Read after write should cause flush | ||
1158 | */ | ||
1159 | reg_dbg(jme, "REG WRITE FLUSH", val, reg); | ||
1160 | writel(val, jme->regs + reg); | ||
1161 | readl(jme->regs + reg); | ||
1162 | reg_dbg(jme, "VAL AFTER WRITE", readl(jme->regs + reg), reg); | ||
1163 | } | ||
1164 | |||
1165 | /* | ||
1166 | * PHY Regs | ||
1167 | */ | ||
1168 | enum jme_phy_reg17_bit_masks { | ||
1169 | PREG17_SPEED = 0xC000, | ||
1170 | PREG17_DUPLEX = 0x2000, | ||
1171 | PREG17_SPDRSV = 0x0800, | ||
1172 | PREG17_LNKUP = 0x0400, | ||
1173 | PREG17_MDI = 0x0040, | ||
1174 | }; | ||
1175 | |||
1176 | enum jme_phy_reg17_vals { | ||
1177 | PREG17_SPEED_10M = 0x0000, | ||
1178 | PREG17_SPEED_100M = 0x4000, | ||
1179 | PREG17_SPEED_1000M = 0x8000, | ||
1180 | }; | ||
1181 | |||
1182 | #define BMSR_ANCOMP 0x0020 | ||
1183 | |||
1184 | /* | ||
1185 | * Workaround | ||
1186 | */ | ||
1187 | static inline int is_buggy250(unsigned short device, unsigned int chiprev) | ||
1188 | { | ||
1189 | return device == PCI_DEVICE_ID_JMICRON_JMC250 && chiprev == 0x11; | ||
1190 | } | ||
1191 | |||
1192 | /* | ||
1193 | * Function prototypes | ||
1194 | */ | ||
1195 | static int jme_set_settings(struct net_device *netdev, | ||
1196 | struct ethtool_cmd *ecmd); | ||
1197 | static void jme_set_multi(struct net_device *netdev); | ||
1198 | |||
1199 | #endif | ||
diff --git a/drivers/net/qlge/Makefile b/drivers/net/qlge/Makefile new file mode 100644 index 000000000000..8a197658d76f --- /dev/null +++ b/drivers/net/qlge/Makefile | |||
@@ -0,0 +1,7 @@ | |||
1 | # | ||
2 | # Makefile for the Qlogic 10GbE PCI Express ethernet driver | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_QLGE) += qlge.o | ||
6 | |||
7 | qlge-objs := qlge_main.o qlge_dbg.o qlge_mpi.o qlge_ethtool.o | ||
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h new file mode 100644 index 000000000000..c37ea436c918 --- /dev/null +++ b/drivers/net/qlge/qlge.h | |||
@@ -0,0 +1,1593 @@ | |||
1 | /* | ||
2 | * QLogic QLA41xx NIC HBA Driver | ||
3 | * Copyright (c) 2003-2006 QLogic Corporation | ||
4 | * | ||
5 | * See LICENSE.qlge for copyright and licensing details. | ||
6 | */ | ||
7 | #ifndef _QLGE_H_ | ||
8 | #define _QLGE_H_ | ||
9 | |||
10 | #include <linux/pci.h> | ||
11 | #include <linux/netdevice.h> | ||
12 | |||
13 | /* | ||
14 | * General definitions... | ||
15 | */ | ||
16 | #define DRV_NAME "qlge" | ||
17 | #define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " | ||
18 | #define DRV_VERSION "v1.00.00-b3" | ||
19 | |||
20 | #define PFX "qlge: " | ||
21 | #define QPRINTK(qdev, nlevel, klevel, fmt, args...) \ | ||
22 | do { \ | ||
23 | if (!((qdev)->msg_enable & NETIF_MSG_##nlevel)) \ | ||
24 | ; \ | ||
25 | else \ | ||
26 | dev_printk(KERN_##klevel, &((qdev)->pdev->dev), \ | ||
27 | "%s: " fmt, __func__, ##args); \ | ||
28 | } while (0) | ||
29 | |||
30 | #define QLGE_VENDOR_ID 0x1077 | ||
31 | #define QLGE_DEVICE_ID1 0x8012 | ||
32 | #define QLGE_DEVICE_ID 0x8000 | ||
33 | |||
34 | #define MAX_RX_RINGS 128 | ||
35 | #define MAX_TX_RINGS 128 | ||
36 | |||
37 | #define NUM_TX_RING_ENTRIES 256 | ||
38 | #define NUM_RX_RING_ENTRIES 256 | ||
39 | |||
40 | #define NUM_SMALL_BUFFERS 512 | ||
41 | #define NUM_LARGE_BUFFERS 512 | ||
42 | |||
43 | #define SMALL_BUFFER_SIZE 256 | ||
44 | #define LARGE_BUFFER_SIZE PAGE_SIZE | ||
45 | #define MAX_SPLIT_SIZE 1023 | ||
46 | #define QLGE_SB_PAD 32 | ||
47 | |||
48 | #define DFLT_COALESCE_WAIT 100 /* 100 usec wait for coalescing */ | ||
49 | #define MAX_INTER_FRAME_WAIT 10 /* 10 usec max interframe-wait for coalescing */ | ||
50 | #define DFLT_INTER_FRAME_WAIT (MAX_INTER_FRAME_WAIT/2) | ||
51 | #define UDELAY_COUNT 3 | ||
52 | #define UDELAY_DELAY 10 | ||
53 | |||
54 | |||
55 | #define TX_DESC_PER_IOCB 8 | ||
56 | /* The maximum number of frags we handle is based | ||
57 | * on PAGE_SIZE... | ||
58 | */ | ||
59 | #if (PAGE_SHIFT == 12) || (PAGE_SHIFT == 13) /* 4k & 8k pages */ | ||
60 | #define TX_DESC_PER_OAL ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2) | ||
61 | #elif (PAGE_SHIFT == 16) /* 64k pages */ | ||
62 | #define TX_DESC_PER_OAL 0 | ||
63 | #endif | ||
64 | |||
65 | #define DB_PAGE_SIZE 4096 | ||
66 | |||
67 | /* | ||
68 | * Processor Address Register (PROC_ADDR) bit definitions. | ||
69 | */ | ||
70 | enum { | ||
71 | |||
72 | /* Misc. stuff */ | ||
73 | MAILBOX_COUNT = 16, | ||
74 | |||
75 | PROC_ADDR_RDY = (1 << 31), | ||
76 | PROC_ADDR_R = (1 << 30), | ||
77 | PROC_ADDR_ERR = (1 << 29), | ||
78 | PROC_ADDR_DA = (1 << 28), | ||
79 | PROC_ADDR_FUNC0_MBI = 0x00001180, | ||
80 | PROC_ADDR_FUNC0_MBO = (PROC_ADDR_FUNC0_MBI + MAILBOX_COUNT), | ||
81 | PROC_ADDR_FUNC0_CTL = 0x000011a1, | ||
82 | PROC_ADDR_FUNC2_MBI = 0x00001280, | ||
83 | PROC_ADDR_FUNC2_MBO = (PROC_ADDR_FUNC2_MBI + MAILBOX_COUNT), | ||
84 | PROC_ADDR_FUNC2_CTL = 0x000012a1, | ||
85 | PROC_ADDR_MPI_RISC = 0x00000000, | ||
86 | PROC_ADDR_MDE = 0x00010000, | ||
87 | PROC_ADDR_REGBLOCK = 0x00020000, | ||
88 | PROC_ADDR_RISC_REG = 0x00030000, | ||
89 | }; | ||
90 | |||
91 | /* | ||
92 | * System Register (SYS) bit definitions. | ||
93 | */ | ||
94 | enum { | ||
95 | SYS_EFE = (1 << 0), | ||
96 | SYS_FAE = (1 << 1), | ||
97 | SYS_MDC = (1 << 2), | ||
98 | SYS_DST = (1 << 3), | ||
99 | SYS_DWC = (1 << 4), | ||
100 | SYS_EVW = (1 << 5), | ||
101 | SYS_OMP_DLY_MASK = 0x3f000000, | ||
102 | /* | ||
103 | * There are no values defined as of edit #15. | ||
104 | */ | ||
105 | SYS_ODI = (1 << 14), | ||
106 | }; | ||
107 | |||
108 | /* | ||
109 | * Reset/Failover Register (RST_FO) bit definitions. | ||
110 | */ | ||
111 | enum { | ||
112 | RST_FO_TFO = (1 << 0), | ||
113 | RST_FO_RR_MASK = 0x00060000, | ||
114 | RST_FO_RR_CQ_CAM = 0x00000000, | ||
115 | RST_FO_RR_DROP = 0x00000001, | ||
116 | RST_FO_RR_DQ = 0x00000002, | ||
117 | RST_FO_RR_RCV_FUNC_CQ = 0x00000003, | ||
118 | RST_FO_FRB = (1 << 12), | ||
119 | RST_FO_MOP = (1 << 13), | ||
120 | RST_FO_REG = (1 << 14), | ||
121 | RST_FO_FR = (1 << 15), | ||
122 | }; | ||
123 | |||
124 | /* | ||
125 | * Function Specific Control Register (FSC) bit definitions. | ||
126 | */ | ||
127 | enum { | ||
128 | FSC_DBRST_MASK = 0x00070000, | ||
129 | FSC_DBRST_256 = 0x00000000, | ||
130 | FSC_DBRST_512 = 0x00000001, | ||
131 | FSC_DBRST_768 = 0x00000002, | ||
132 | FSC_DBRST_1024 = 0x00000003, | ||
133 | FSC_DBL_MASK = 0x00180000, | ||
134 | FSC_DBL_DBRST = 0x00000000, | ||
135 | FSC_DBL_MAX_PLD = 0x00000008, | ||
136 | FSC_DBL_MAX_BRST = 0x00000010, | ||
137 | FSC_DBL_128_BYTES = 0x00000018, | ||
138 | FSC_EC = (1 << 5), | ||
139 | FSC_EPC_MASK = 0x00c00000, | ||
140 | FSC_EPC_INBOUND = (1 << 6), | ||
141 | FSC_EPC_OUTBOUND = (1 << 7), | ||
142 | FSC_VM_PAGESIZE_MASK = 0x07000000, | ||
143 | FSC_VM_PAGE_2K = 0x00000100, | ||
144 | FSC_VM_PAGE_4K = 0x00000200, | ||
145 | FSC_VM_PAGE_8K = 0x00000300, | ||
146 | FSC_VM_PAGE_64K = 0x00000600, | ||
147 | FSC_SH = (1 << 11), | ||
148 | FSC_DSB = (1 << 12), | ||
149 | FSC_STE = (1 << 13), | ||
150 | FSC_FE = (1 << 15), | ||
151 | }; | ||
152 | |||
153 | /* | ||
154 | * Host Command Status Register (CSR) bit definitions. | ||
155 | */ | ||
156 | enum { | ||
157 | CSR_ERR_STS_MASK = 0x0000003f, | ||
158 | /* | ||
159 | * There are no valued defined as of edit #15. | ||
160 | */ | ||
161 | CSR_RR = (1 << 8), | ||
162 | CSR_HRI = (1 << 9), | ||
163 | CSR_RP = (1 << 10), | ||
164 | CSR_CMD_PARM_SHIFT = 22, | ||
165 | CSR_CMD_NOP = 0x00000000, | ||
166 | CSR_CMD_SET_RST = 0x1000000, | ||
167 | CSR_CMD_CLR_RST = 0x20000000, | ||
168 | CSR_CMD_SET_PAUSE = 0x30000000, | ||
169 | CSR_CMD_CLR_PAUSE = 0x40000000, | ||
170 | CSR_CMD_SET_H2R_INT = 0x50000000, | ||
171 | CSR_CMD_CLR_H2R_INT = 0x60000000, | ||
172 | CSR_CMD_PAR_EN = 0x70000000, | ||
173 | CSR_CMD_SET_BAD_PAR = 0x80000000, | ||
174 | CSR_CMD_CLR_BAD_PAR = 0x90000000, | ||
175 | CSR_CMD_CLR_R2PCI_INT = 0xa0000000, | ||
176 | }; | ||
177 | |||
178 | /* | ||
179 | * Configuration Register (CFG) bit definitions. | ||
180 | */ | ||
181 | enum { | ||
182 | CFG_LRQ = (1 << 0), | ||
183 | CFG_DRQ = (1 << 1), | ||
184 | CFG_LR = (1 << 2), | ||
185 | CFG_DR = (1 << 3), | ||
186 | CFG_LE = (1 << 5), | ||
187 | CFG_LCQ = (1 << 6), | ||
188 | CFG_DCQ = (1 << 7), | ||
189 | CFG_Q_SHIFT = 8, | ||
190 | CFG_Q_MASK = 0x7f000000, | ||
191 | }; | ||
192 | |||
193 | /* | ||
194 | * Status Register (STS) bit definitions. | ||
195 | */ | ||
196 | enum { | ||
197 | STS_FE = (1 << 0), | ||
198 | STS_PI = (1 << 1), | ||
199 | STS_PL0 = (1 << 2), | ||
200 | STS_PL1 = (1 << 3), | ||
201 | STS_PI0 = (1 << 4), | ||
202 | STS_PI1 = (1 << 5), | ||
203 | STS_FUNC_ID_MASK = 0x000000c0, | ||
204 | STS_FUNC_ID_SHIFT = 6, | ||
205 | STS_F0E = (1 << 8), | ||
206 | STS_F1E = (1 << 9), | ||
207 | STS_F2E = (1 << 10), | ||
208 | STS_F3E = (1 << 11), | ||
209 | STS_NFE = (1 << 12), | ||
210 | }; | ||
211 | |||
212 | /* | ||
213 | * Interrupt Enable Register (INTR_EN) bit definitions. | ||
214 | */ | ||
215 | enum { | ||
216 | INTR_EN_INTR_MASK = 0x007f0000, | ||
217 | INTR_EN_TYPE_MASK = 0x03000000, | ||
218 | INTR_EN_TYPE_ENABLE = 0x00000100, | ||
219 | INTR_EN_TYPE_DISABLE = 0x00000200, | ||
220 | INTR_EN_TYPE_READ = 0x00000300, | ||
221 | INTR_EN_IHD = (1 << 13), | ||
222 | INTR_EN_IHD_MASK = (INTR_EN_IHD << 16), | ||
223 | INTR_EN_EI = (1 << 14), | ||
224 | INTR_EN_EN = (1 << 15), | ||
225 | }; | ||
226 | |||
227 | /* | ||
228 | * Interrupt Mask Register (INTR_MASK) bit definitions. | ||
229 | */ | ||
230 | enum { | ||
231 | INTR_MASK_PI = (1 << 0), | ||
232 | INTR_MASK_HL0 = (1 << 1), | ||
233 | INTR_MASK_LH0 = (1 << 2), | ||
234 | INTR_MASK_HL1 = (1 << 3), | ||
235 | INTR_MASK_LH1 = (1 << 4), | ||
236 | INTR_MASK_SE = (1 << 5), | ||
237 | INTR_MASK_LSC = (1 << 6), | ||
238 | INTR_MASK_MC = (1 << 7), | ||
239 | INTR_MASK_LINK_IRQS = INTR_MASK_LSC | INTR_MASK_SE | INTR_MASK_MC, | ||
240 | }; | ||
241 | |||
242 | /* | ||
243 | * Register (REV_ID) bit definitions. | ||
244 | */ | ||
245 | enum { | ||
246 | REV_ID_MASK = 0x0000000f, | ||
247 | REV_ID_NICROLL_SHIFT = 0, | ||
248 | REV_ID_NICREV_SHIFT = 4, | ||
249 | REV_ID_XGROLL_SHIFT = 8, | ||
250 | REV_ID_XGREV_SHIFT = 12, | ||
251 | REV_ID_CHIPREV_SHIFT = 28, | ||
252 | }; | ||
253 | |||
254 | /* | ||
255 | * Force ECC Error Register (FRC_ECC_ERR) bit definitions. | ||
256 | */ | ||
257 | enum { | ||
258 | FRC_ECC_ERR_VW = (1 << 12), | ||
259 | FRC_ECC_ERR_VB = (1 << 13), | ||
260 | FRC_ECC_ERR_NI = (1 << 14), | ||
261 | FRC_ECC_ERR_NO = (1 << 15), | ||
262 | FRC_ECC_PFE_SHIFT = 16, | ||
263 | FRC_ECC_ERR_DO = (1 << 18), | ||
264 | FRC_ECC_P14 = (1 << 19), | ||
265 | }; | ||
266 | |||
267 | /* | ||
268 | * Error Status Register (ERR_STS) bit definitions. | ||
269 | */ | ||
270 | enum { | ||
271 | ERR_STS_NOF = (1 << 0), | ||
272 | ERR_STS_NIF = (1 << 1), | ||
273 | ERR_STS_DRP = (1 << 2), | ||
274 | ERR_STS_XGP = (1 << 3), | ||
275 | ERR_STS_FOU = (1 << 4), | ||
276 | ERR_STS_FOC = (1 << 5), | ||
277 | ERR_STS_FOF = (1 << 6), | ||
278 | ERR_STS_FIU = (1 << 7), | ||
279 | ERR_STS_FIC = (1 << 8), | ||
280 | ERR_STS_FIF = (1 << 9), | ||
281 | ERR_STS_MOF = (1 << 10), | ||
282 | ERR_STS_TA = (1 << 11), | ||
283 | ERR_STS_MA = (1 << 12), | ||
284 | ERR_STS_MPE = (1 << 13), | ||
285 | ERR_STS_SCE = (1 << 14), | ||
286 | ERR_STS_STE = (1 << 15), | ||
287 | ERR_STS_FOW = (1 << 16), | ||
288 | ERR_STS_UE = (1 << 17), | ||
289 | ERR_STS_MCH = (1 << 26), | ||
290 | ERR_STS_LOC_SHIFT = 27, | ||
291 | }; | ||
292 | |||
293 | /* | ||
294 | * RAM Debug Address Register (RAM_DBG_ADDR) bit definitions. | ||
295 | */ | ||
296 | enum { | ||
297 | RAM_DBG_ADDR_FW = (1 << 30), | ||
298 | RAM_DBG_ADDR_FR = (1 << 31), | ||
299 | }; | ||
300 | |||
301 | /* | ||
302 | * Semaphore Register (SEM) bit definitions. | ||
303 | */ | ||
304 | enum { | ||
305 | /* | ||
306 | * Example: | ||
307 | * reg = SEM_XGMAC0_MASK | (SEM_SET << SEM_XGMAC0_SHIFT) | ||
308 | */ | ||
309 | SEM_CLEAR = 0, | ||
310 | SEM_SET = 1, | ||
311 | SEM_FORCE = 3, | ||
312 | SEM_XGMAC0_SHIFT = 0, | ||
313 | SEM_XGMAC1_SHIFT = 2, | ||
314 | SEM_ICB_SHIFT = 4, | ||
315 | SEM_MAC_ADDR_SHIFT = 6, | ||
316 | SEM_FLASH_SHIFT = 8, | ||
317 | SEM_PROBE_SHIFT = 10, | ||
318 | SEM_RT_IDX_SHIFT = 12, | ||
319 | SEM_PROC_REG_SHIFT = 14, | ||
320 | SEM_XGMAC0_MASK = 0x00030000, | ||
321 | SEM_XGMAC1_MASK = 0x000c0000, | ||
322 | SEM_ICB_MASK = 0x00300000, | ||
323 | SEM_MAC_ADDR_MASK = 0x00c00000, | ||
324 | SEM_FLASH_MASK = 0x03000000, | ||
325 | SEM_PROBE_MASK = 0x0c000000, | ||
326 | SEM_RT_IDX_MASK = 0x30000000, | ||
327 | SEM_PROC_REG_MASK = 0xc0000000, | ||
328 | }; | ||
329 | |||
330 | /* | ||
331 | * 10G MAC Address Register (XGMAC_ADDR) bit definitions. | ||
332 | */ | ||
333 | enum { | ||
334 | XGMAC_ADDR_RDY = (1 << 31), | ||
335 | XGMAC_ADDR_R = (1 << 30), | ||
336 | XGMAC_ADDR_XME = (1 << 29), | ||
337 | |||
338 | /* XGMAC control registers */ | ||
339 | PAUSE_SRC_LO = 0x00000100, | ||
340 | PAUSE_SRC_HI = 0x00000104, | ||
341 | GLOBAL_CFG = 0x00000108, | ||
342 | GLOBAL_CFG_RESET = (1 << 0), | ||
343 | GLOBAL_CFG_JUMBO = (1 << 6), | ||
344 | GLOBAL_CFG_TX_STAT_EN = (1 << 10), | ||
345 | GLOBAL_CFG_RX_STAT_EN = (1 << 11), | ||
346 | TX_CFG = 0x0000010c, | ||
347 | TX_CFG_RESET = (1 << 0), | ||
348 | TX_CFG_EN = (1 << 1), | ||
349 | TX_CFG_PREAM = (1 << 2), | ||
350 | RX_CFG = 0x00000110, | ||
351 | RX_CFG_RESET = (1 << 0), | ||
352 | RX_CFG_EN = (1 << 1), | ||
353 | RX_CFG_PREAM = (1 << 2), | ||
354 | FLOW_CTL = 0x0000011c, | ||
355 | PAUSE_OPCODE = 0x00000120, | ||
356 | PAUSE_TIMER = 0x00000124, | ||
357 | PAUSE_FRM_DEST_LO = 0x00000128, | ||
358 | PAUSE_FRM_DEST_HI = 0x0000012c, | ||
359 | MAC_TX_PARAMS = 0x00000134, | ||
360 | MAC_TX_PARAMS_JUMBO = (1 << 31), | ||
361 | MAC_TX_PARAMS_SIZE_SHIFT = 16, | ||
362 | MAC_RX_PARAMS = 0x00000138, | ||
363 | MAC_SYS_INT = 0x00000144, | ||
364 | MAC_SYS_INT_MASK = 0x00000148, | ||
365 | MAC_MGMT_INT = 0x0000014c, | ||
366 | MAC_MGMT_IN_MASK = 0x00000150, | ||
367 | EXT_ARB_MODE = 0x000001fc, | ||
368 | |||
369 | /* XGMAC TX statistics registers */ | ||
370 | TX_PKTS = 0x00000200, | ||
371 | TX_BYTES = 0x00000208, | ||
372 | TX_MCAST_PKTS = 0x00000210, | ||
373 | TX_BCAST_PKTS = 0x00000218, | ||
374 | TX_UCAST_PKTS = 0x00000220, | ||
375 | TX_CTL_PKTS = 0x00000228, | ||
376 | TX_PAUSE_PKTS = 0x00000230, | ||
377 | TX_64_PKT = 0x00000238, | ||
378 | TX_65_TO_127_PKT = 0x00000240, | ||
379 | TX_128_TO_255_PKT = 0x00000248, | ||
380 | TX_256_511_PKT = 0x00000250, | ||
381 | TX_512_TO_1023_PKT = 0x00000258, | ||
382 | TX_1024_TO_1518_PKT = 0x00000260, | ||
383 | TX_1519_TO_MAX_PKT = 0x00000268, | ||
384 | TX_UNDERSIZE_PKT = 0x00000270, | ||
385 | TX_OVERSIZE_PKT = 0x00000278, | ||
386 | |||
387 | /* XGMAC statistics control registers */ | ||
388 | RX_HALF_FULL_DET = 0x000002a0, | ||
389 | TX_HALF_FULL_DET = 0x000002a4, | ||
390 | RX_OVERFLOW_DET = 0x000002a8, | ||
391 | TX_OVERFLOW_DET = 0x000002ac, | ||
392 | RX_HALF_FULL_MASK = 0x000002b0, | ||
393 | TX_HALF_FULL_MASK = 0x000002b4, | ||
394 | RX_OVERFLOW_MASK = 0x000002b8, | ||
395 | TX_OVERFLOW_MASK = 0x000002bc, | ||
396 | STAT_CNT_CTL = 0x000002c0, | ||
397 | STAT_CNT_CTL_CLEAR_TX = (1 << 0), | ||
398 | STAT_CNT_CTL_CLEAR_RX = (1 << 1), | ||
399 | AUX_RX_HALF_FULL_DET = 0x000002d0, | ||
400 | AUX_TX_HALF_FULL_DET = 0x000002d4, | ||
401 | AUX_RX_OVERFLOW_DET = 0x000002d8, | ||
402 | AUX_TX_OVERFLOW_DET = 0x000002dc, | ||
403 | AUX_RX_HALF_FULL_MASK = 0x000002f0, | ||
404 | AUX_TX_HALF_FULL_MASK = 0x000002f4, | ||
405 | AUX_RX_OVERFLOW_MASK = 0x000002f8, | ||
406 | AUX_TX_OVERFLOW_MASK = 0x000002fc, | ||
407 | |||
408 | /* XGMAC RX statistics registers */ | ||
409 | RX_BYTES = 0x00000300, | ||
410 | RX_BYTES_OK = 0x00000308, | ||
411 | RX_PKTS = 0x00000310, | ||
412 | RX_PKTS_OK = 0x00000318, | ||
413 | RX_BCAST_PKTS = 0x00000320, | ||
414 | RX_MCAST_PKTS = 0x00000328, | ||
415 | RX_UCAST_PKTS = 0x00000330, | ||
416 | RX_UNDERSIZE_PKTS = 0x00000338, | ||
417 | RX_OVERSIZE_PKTS = 0x00000340, | ||
418 | RX_JABBER_PKTS = 0x00000348, | ||
419 | RX_UNDERSIZE_FCERR_PKTS = 0x00000350, | ||
420 | RX_DROP_EVENTS = 0x00000358, | ||
421 | RX_FCERR_PKTS = 0x00000360, | ||
422 | RX_ALIGN_ERR = 0x00000368, | ||
423 | RX_SYMBOL_ERR = 0x00000370, | ||
424 | RX_MAC_ERR = 0x00000378, | ||
425 | RX_CTL_PKTS = 0x00000380, | ||
426 | RX_PAUSE_PKTS = 0x00000384, | ||
427 | RX_64_PKTS = 0x00000390, | ||
428 | RX_65_TO_127_PKTS = 0x00000398, | ||
429 | RX_128_255_PKTS = 0x000003a0, | ||
430 | RX_256_511_PKTS = 0x000003a8, | ||
431 | RX_512_TO_1023_PKTS = 0x000003b0, | ||
432 | RX_1024_TO_1518_PKTS = 0x000003b8, | ||
433 | RX_1519_TO_MAX_PKTS = 0x000003c0, | ||
434 | RX_LEN_ERR_PKTS = 0x000003c8, | ||
435 | |||
436 | /* XGMAC MDIO control registers */ | ||
437 | MDIO_TX_DATA = 0x00000400, | ||
438 | MDIO_RX_DATA = 0x00000410, | ||
439 | MDIO_CMD = 0x00000420, | ||
440 | MDIO_PHY_ADDR = 0x00000430, | ||
441 | MDIO_PORT = 0x00000440, | ||
442 | MDIO_STATUS = 0x00000450, | ||
443 | |||
444 | /* XGMAC AUX statistics registers */ | ||
445 | }; | ||
446 | |||
447 | /* | ||
448 | * Enhanced Transmission Schedule Registers (NIC_ETS,CNA_ETS) bit definitions. | ||
449 | */ | ||
450 | enum { | ||
451 | ETS_QUEUE_SHIFT = 29, | ||
452 | ETS_REF = (1 << 26), | ||
453 | ETS_RS = (1 << 27), | ||
454 | ETS_P = (1 << 28), | ||
455 | ETS_FC_COS_SHIFT = 23, | ||
456 | }; | ||
457 | |||
458 | /* | ||
459 | * Flash Address Register (FLASH_ADDR) bit definitions. | ||
460 | */ | ||
461 | enum { | ||
462 | FLASH_ADDR_RDY = (1 << 31), | ||
463 | FLASH_ADDR_R = (1 << 30), | ||
464 | FLASH_ADDR_ERR = (1 << 29), | ||
465 | }; | ||
466 | |||
467 | /* | ||
468 | * Stop CQ Processing Register (CQ_STOP) bit definitions. | ||
469 | */ | ||
470 | enum { | ||
471 | CQ_STOP_QUEUE_MASK = (0x007f0000), | ||
472 | CQ_STOP_TYPE_MASK = (0x03000000), | ||
473 | CQ_STOP_TYPE_START = 0x00000100, | ||
474 | CQ_STOP_TYPE_STOP = 0x00000200, | ||
475 | CQ_STOP_TYPE_READ = 0x00000300, | ||
476 | CQ_STOP_EN = (1 << 15), | ||
477 | }; | ||
478 | |||
479 | /* | ||
480 | * MAC Protocol Address Index Register (MAC_ADDR_IDX) bit definitions. | ||
481 | */ | ||
482 | enum { | ||
483 | MAC_ADDR_IDX_SHIFT = 4, | ||
484 | MAC_ADDR_TYPE_SHIFT = 16, | ||
485 | MAC_ADDR_TYPE_MASK = 0x000f0000, | ||
486 | MAC_ADDR_TYPE_CAM_MAC = 0x00000000, | ||
487 | MAC_ADDR_TYPE_MULTI_MAC = 0x00010000, | ||
488 | MAC_ADDR_TYPE_VLAN = 0x00020000, | ||
489 | MAC_ADDR_TYPE_MULTI_FLTR = 0x00030000, | ||
490 | MAC_ADDR_TYPE_FC_MAC = 0x00040000, | ||
491 | MAC_ADDR_TYPE_MGMT_MAC = 0x00050000, | ||
492 | MAC_ADDR_TYPE_MGMT_VLAN = 0x00060000, | ||
493 | MAC_ADDR_TYPE_MGMT_V4 = 0x00070000, | ||
494 | MAC_ADDR_TYPE_MGMT_V6 = 0x00080000, | ||
495 | MAC_ADDR_TYPE_MGMT_TU_DP = 0x00090000, | ||
496 | MAC_ADDR_ADR = (1 << 25), | ||
497 | MAC_ADDR_RS = (1 << 26), | ||
498 | MAC_ADDR_E = (1 << 27), | ||
499 | MAC_ADDR_MR = (1 << 30), | ||
500 | MAC_ADDR_MW = (1 << 31), | ||
501 | MAX_MULTICAST_ENTRIES = 32, | ||
502 | }; | ||
503 | |||
504 | /* | ||
505 | * MAC Protocol Address Index Register (SPLT_HDR) bit definitions. | ||
506 | */ | ||
507 | enum { | ||
508 | SPLT_HDR_EP = (1 << 31), | ||
509 | }; | ||
510 | |||
511 | /* | ||
512 | * FCoE Receive Configuration Register (FC_RCV_CFG) bit definitions. | ||
513 | */ | ||
514 | enum { | ||
515 | FC_RCV_CFG_ECT = (1 << 15), | ||
516 | FC_RCV_CFG_DFH = (1 << 20), | ||
517 | FC_RCV_CFG_DVF = (1 << 21), | ||
518 | FC_RCV_CFG_RCE = (1 << 27), | ||
519 | FC_RCV_CFG_RFE = (1 << 28), | ||
520 | FC_RCV_CFG_TEE = (1 << 29), | ||
521 | FC_RCV_CFG_TCE = (1 << 30), | ||
522 | FC_RCV_CFG_TFE = (1 << 31), | ||
523 | }; | ||
524 | |||
525 | /* | ||
526 | * NIC Receive Configuration Register (NIC_RCV_CFG) bit definitions. | ||
527 | */ | ||
528 | enum { | ||
529 | NIC_RCV_CFG_PPE = (1 << 0), | ||
530 | NIC_RCV_CFG_VLAN_MASK = 0x00060000, | ||
531 | NIC_RCV_CFG_VLAN_ALL = 0x00000000, | ||
532 | NIC_RCV_CFG_VLAN_MATCH_ONLY = 0x00000002, | ||
533 | NIC_RCV_CFG_VLAN_MATCH_AND_NON = 0x00000004, | ||
534 | NIC_RCV_CFG_VLAN_NONE_AND_NON = 0x00000006, | ||
535 | NIC_RCV_CFG_RV = (1 << 3), | ||
536 | NIC_RCV_CFG_DFQ_MASK = (0x7f000000), | ||
537 | NIC_RCV_CFG_DFQ_SHIFT = 8, | ||
538 | NIC_RCV_CFG_DFQ = 0, /* HARDCODE default queue to 0. */ | ||
539 | }; | ||
540 | |||
541 | /* | ||
542 | * Mgmt Receive Configuration Register (MGMT_RCV_CFG) bit definitions. | ||
543 | */ | ||
544 | enum { | ||
545 | MGMT_RCV_CFG_ARP = (1 << 0), | ||
546 | MGMT_RCV_CFG_DHC = (1 << 1), | ||
547 | MGMT_RCV_CFG_DHS = (1 << 2), | ||
548 | MGMT_RCV_CFG_NP = (1 << 3), | ||
549 | MGMT_RCV_CFG_I6N = (1 << 4), | ||
550 | MGMT_RCV_CFG_I6R = (1 << 5), | ||
551 | MGMT_RCV_CFG_DH6 = (1 << 6), | ||
552 | MGMT_RCV_CFG_UD1 = (1 << 7), | ||
553 | MGMT_RCV_CFG_UD0 = (1 << 8), | ||
554 | MGMT_RCV_CFG_BCT = (1 << 9), | ||
555 | MGMT_RCV_CFG_MCT = (1 << 10), | ||
556 | MGMT_RCV_CFG_DM = (1 << 11), | ||
557 | MGMT_RCV_CFG_RM = (1 << 12), | ||
558 | MGMT_RCV_CFG_STL = (1 << 13), | ||
559 | MGMT_RCV_CFG_VLAN_MASK = 0xc0000000, | ||
560 | MGMT_RCV_CFG_VLAN_ALL = 0x00000000, | ||
561 | MGMT_RCV_CFG_VLAN_MATCH_ONLY = 0x00004000, | ||
562 | MGMT_RCV_CFG_VLAN_MATCH_AND_NON = 0x00008000, | ||
563 | MGMT_RCV_CFG_VLAN_NONE_AND_NON = 0x0000c000, | ||
564 | }; | ||
565 | |||
566 | /* | ||
567 | * Routing Index Register (RT_IDX) bit definitions. | ||
568 | */ | ||
569 | enum { | ||
570 | RT_IDX_IDX_SHIFT = 8, | ||
571 | RT_IDX_TYPE_MASK = 0x000f0000, | ||
572 | RT_IDX_TYPE_RT = 0x00000000, | ||
573 | RT_IDX_TYPE_RT_INV = 0x00010000, | ||
574 | RT_IDX_TYPE_NICQ = 0x00020000, | ||
575 | RT_IDX_TYPE_NICQ_INV = 0x00030000, | ||
576 | RT_IDX_DST_MASK = 0x00700000, | ||
577 | RT_IDX_DST_RSS = 0x00000000, | ||
578 | RT_IDX_DST_CAM_Q = 0x00100000, | ||
579 | RT_IDX_DST_COS_Q = 0x00200000, | ||
580 | RT_IDX_DST_DFLT_Q = 0x00300000, | ||
581 | RT_IDX_DST_DEST_Q = 0x00400000, | ||
582 | RT_IDX_RS = (1 << 26), | ||
583 | RT_IDX_E = (1 << 27), | ||
584 | RT_IDX_MR = (1 << 30), | ||
585 | RT_IDX_MW = (1 << 31), | ||
586 | |||
587 | /* Nic Queue format - type 2 bits */ | ||
588 | RT_IDX_BCAST = (1 << 0), | ||
589 | RT_IDX_MCAST = (1 << 1), | ||
590 | RT_IDX_MCAST_MATCH = (1 << 2), | ||
591 | RT_IDX_MCAST_REG_MATCH = (1 << 3), | ||
592 | RT_IDX_MCAST_HASH_MATCH = (1 << 4), | ||
593 | RT_IDX_FC_MACH = (1 << 5), | ||
594 | RT_IDX_ETH_FCOE = (1 << 6), | ||
595 | RT_IDX_CAM_HIT = (1 << 7), | ||
596 | RT_IDX_CAM_BIT0 = (1 << 8), | ||
597 | RT_IDX_CAM_BIT1 = (1 << 9), | ||
598 | RT_IDX_VLAN_TAG = (1 << 10), | ||
599 | RT_IDX_VLAN_MATCH = (1 << 11), | ||
600 | RT_IDX_VLAN_FILTER = (1 << 12), | ||
601 | RT_IDX_ETH_SKIP1 = (1 << 13), | ||
602 | RT_IDX_ETH_SKIP2 = (1 << 14), | ||
603 | RT_IDX_BCAST_MCAST_MATCH = (1 << 15), | ||
604 | RT_IDX_802_3 = (1 << 16), | ||
605 | RT_IDX_LLDP = (1 << 17), | ||
606 | RT_IDX_UNUSED018 = (1 << 18), | ||
607 | RT_IDX_UNUSED019 = (1 << 19), | ||
608 | RT_IDX_UNUSED20 = (1 << 20), | ||
609 | RT_IDX_UNUSED21 = (1 << 21), | ||
610 | RT_IDX_ERR = (1 << 22), | ||
611 | RT_IDX_VALID = (1 << 23), | ||
612 | RT_IDX_TU_CSUM_ERR = (1 << 24), | ||
613 | RT_IDX_IP_CSUM_ERR = (1 << 25), | ||
614 | RT_IDX_MAC_ERR = (1 << 26), | ||
615 | RT_IDX_RSS_TCP6 = (1 << 27), | ||
616 | RT_IDX_RSS_TCP4 = (1 << 28), | ||
617 | RT_IDX_RSS_IPV6 = (1 << 29), | ||
618 | RT_IDX_RSS_IPV4 = (1 << 30), | ||
619 | RT_IDX_RSS_MATCH = (1 << 31), | ||
620 | |||
621 | /* Hierarchy for the NIC Queue Mask */ | ||
622 | RT_IDX_ALL_ERR_SLOT = 0, | ||
623 | RT_IDX_MAC_ERR_SLOT = 0, | ||
624 | RT_IDX_IP_CSUM_ERR_SLOT = 1, | ||
625 | RT_IDX_TCP_UDP_CSUM_ERR_SLOT = 2, | ||
626 | RT_IDX_BCAST_SLOT = 3, | ||
627 | RT_IDX_MCAST_MATCH_SLOT = 4, | ||
628 | RT_IDX_ALLMULTI_SLOT = 5, | ||
629 | RT_IDX_UNUSED6_SLOT = 6, | ||
630 | RT_IDX_UNUSED7_SLOT = 7, | ||
631 | RT_IDX_RSS_MATCH_SLOT = 8, | ||
632 | RT_IDX_RSS_IPV4_SLOT = 8, | ||
633 | RT_IDX_RSS_IPV6_SLOT = 9, | ||
634 | RT_IDX_RSS_TCP4_SLOT = 10, | ||
635 | RT_IDX_RSS_TCP6_SLOT = 11, | ||
636 | RT_IDX_CAM_HIT_SLOT = 12, | ||
637 | RT_IDX_UNUSED013 = 13, | ||
638 | RT_IDX_UNUSED014 = 14, | ||
639 | RT_IDX_PROMISCUOUS_SLOT = 15, | ||
640 | RT_IDX_MAX_SLOTS = 16, | ||
641 | }; | ||
642 | |||
643 | /* | ||
644 | * Control Register Set Map | ||
645 | */ | ||
646 | enum { | ||
647 | PROC_ADDR = 0, /* Use semaphore */ | ||
648 | PROC_DATA = 0x04, /* Use semaphore */ | ||
649 | SYS = 0x08, | ||
650 | RST_FO = 0x0c, | ||
651 | FSC = 0x10, | ||
652 | CSR = 0x14, | ||
653 | LED = 0x18, | ||
654 | ICB_RID = 0x1c, /* Use semaphore */ | ||
655 | ICB_L = 0x20, /* Use semaphore */ | ||
656 | ICB_H = 0x24, /* Use semaphore */ | ||
657 | CFG = 0x28, | ||
658 | BIOS_ADDR = 0x2c, | ||
659 | STS = 0x30, | ||
660 | INTR_EN = 0x34, | ||
661 | INTR_MASK = 0x38, | ||
662 | ISR1 = 0x3c, | ||
663 | ISR2 = 0x40, | ||
664 | ISR3 = 0x44, | ||
665 | ISR4 = 0x48, | ||
666 | REV_ID = 0x4c, | ||
667 | FRC_ECC_ERR = 0x50, | ||
668 | ERR_STS = 0x54, | ||
669 | RAM_DBG_ADDR = 0x58, | ||
670 | RAM_DBG_DATA = 0x5c, | ||
671 | ECC_ERR_CNT = 0x60, | ||
672 | SEM = 0x64, | ||
673 | GPIO_1 = 0x68, /* Use semaphore */ | ||
674 | GPIO_2 = 0x6c, /* Use semaphore */ | ||
675 | GPIO_3 = 0x70, /* Use semaphore */ | ||
676 | RSVD2 = 0x74, | ||
677 | XGMAC_ADDR = 0x78, /* Use semaphore */ | ||
678 | XGMAC_DATA = 0x7c, /* Use semaphore */ | ||
679 | NIC_ETS = 0x80, | ||
680 | CNA_ETS = 0x84, | ||
681 | FLASH_ADDR = 0x88, /* Use semaphore */ | ||
682 | FLASH_DATA = 0x8c, /* Use semaphore */ | ||
683 | CQ_STOP = 0x90, | ||
684 | PAGE_TBL_RID = 0x94, | ||
685 | WQ_PAGE_TBL_LO = 0x98, | ||
686 | WQ_PAGE_TBL_HI = 0x9c, | ||
687 | CQ_PAGE_TBL_LO = 0xa0, | ||
688 | CQ_PAGE_TBL_HI = 0xa4, | ||
689 | MAC_ADDR_IDX = 0xa8, /* Use semaphore */ | ||
690 | MAC_ADDR_DATA = 0xac, /* Use semaphore */ | ||
691 | COS_DFLT_CQ1 = 0xb0, | ||
692 | COS_DFLT_CQ2 = 0xb4, | ||
693 | ETYPE_SKIP1 = 0xb8, | ||
694 | ETYPE_SKIP2 = 0xbc, | ||
695 | SPLT_HDR = 0xc0, | ||
696 | FC_PAUSE_THRES = 0xc4, | ||
697 | NIC_PAUSE_THRES = 0xc8, | ||
698 | FC_ETHERTYPE = 0xcc, | ||
699 | FC_RCV_CFG = 0xd0, | ||
700 | NIC_RCV_CFG = 0xd4, | ||
701 | FC_COS_TAGS = 0xd8, | ||
702 | NIC_COS_TAGS = 0xdc, | ||
703 | MGMT_RCV_CFG = 0xe0, | ||
704 | RT_IDX = 0xe4, | ||
705 | RT_DATA = 0xe8, | ||
706 | RSVD7 = 0xec, | ||
707 | XG_SERDES_ADDR = 0xf0, | ||
708 | XG_SERDES_DATA = 0xf4, | ||
709 | PRB_MX_ADDR = 0xf8, /* Use semaphore */ | ||
710 | PRB_MX_DATA = 0xfc, /* Use semaphore */ | ||
711 | }; | ||
712 | |||
713 | /* | ||
714 | * CAM output format. | ||
715 | */ | ||
716 | enum { | ||
717 | CAM_OUT_ROUTE_FC = 0, | ||
718 | CAM_OUT_ROUTE_NIC = 1, | ||
719 | CAM_OUT_FUNC_SHIFT = 2, | ||
720 | CAM_OUT_RV = (1 << 4), | ||
721 | CAM_OUT_SH = (1 << 15), | ||
722 | CAM_OUT_CQ_ID_SHIFT = 5, | ||
723 | }; | ||
724 | |||
725 | /* | ||
726 | * Mailbox definitions | ||
727 | */ | ||
728 | enum { | ||
729 | /* Asynchronous Event Notifications */ | ||
730 | AEN_SYS_ERR = 0x00008002, | ||
731 | AEN_LINK_UP = 0x00008011, | ||
732 | AEN_LINK_DOWN = 0x00008012, | ||
733 | AEN_IDC_CMPLT = 0x00008100, | ||
734 | AEN_IDC_REQ = 0x00008101, | ||
735 | AEN_FW_INIT_DONE = 0x00008400, | ||
736 | AEN_FW_INIT_FAIL = 0x00008401, | ||
737 | |||
738 | /* Mailbox Command Opcodes. */ | ||
739 | MB_CMD_NOP = 0x00000000, | ||
740 | MB_CMD_EX_FW = 0x00000002, | ||
741 | MB_CMD_MB_TEST = 0x00000006, | ||
742 | MB_CMD_CSUM_TEST = 0x00000007, /* Verify Checksum */ | ||
743 | MB_CMD_ABOUT_FW = 0x00000008, | ||
744 | MB_CMD_LOAD_RISC_RAM = 0x0000000b, | ||
745 | MB_CMD_DUMP_RISC_RAM = 0x0000000c, | ||
746 | MB_CMD_WRITE_RAM = 0x0000000d, | ||
747 | MB_CMD_READ_RAM = 0x0000000f, | ||
748 | MB_CMD_STOP_FW = 0x00000014, | ||
749 | MB_CMD_MAKE_SYS_ERR = 0x0000002a, | ||
750 | MB_CMD_INIT_FW = 0x00000060, | ||
751 | MB_CMD_GET_INIT_CB = 0x00000061, | ||
752 | MB_CMD_GET_FW_STATE = 0x00000069, | ||
753 | MB_CMD_IDC_REQ = 0x00000100, /* Inter-Driver Communication */ | ||
754 | MB_CMD_IDC_ACK = 0x00000101, /* Inter-Driver Communication */ | ||
755 | MB_CMD_SET_WOL_MODE = 0x00000110, /* Wake On Lan */ | ||
756 | MB_WOL_DISABLE = 0x00000000, | ||
757 | MB_WOL_MAGIC_PKT = 0x00000001, | ||
758 | MB_WOL_FLTR = 0x00000002, | ||
759 | MB_WOL_UCAST = 0x00000004, | ||
760 | MB_WOL_MCAST = 0x00000008, | ||
761 | MB_WOL_BCAST = 0x00000010, | ||
762 | MB_WOL_LINK_UP = 0x00000020, | ||
763 | MB_WOL_LINK_DOWN = 0x00000040, | ||
764 | MB_CMD_SET_WOL_FLTR = 0x00000111, /* Wake On Lan Filter */ | ||
765 | MB_CMD_CLEAR_WOL_FLTR = 0x00000112, /* Wake On Lan Filter */ | ||
766 | MB_CMD_SET_WOL_MAGIC = 0x00000113, /* Wake On Lan Magic Packet */ | ||
767 | MB_CMD_CLEAR_WOL_MAGIC = 0x00000114, /* Wake On Lan Magic Packet */ | ||
768 | MB_CMD_PORT_RESET = 0x00000120, | ||
769 | MB_CMD_SET_PORT_CFG = 0x00000122, | ||
770 | MB_CMD_GET_PORT_CFG = 0x00000123, | ||
771 | MB_CMD_SET_ASIC_VOLTS = 0x00000130, | ||
772 | MB_CMD_GET_SNS_DATA = 0x00000131, /* Temp and Volt Sense data. */ | ||
773 | |||
774 | /* Mailbox Command Status. */ | ||
775 | MB_CMD_STS_GOOD = 0x00004000, /* Success. */ | ||
776 | MB_CMD_STS_INTRMDT = 0x00001000, /* Intermediate Complete. */ | ||
777 | MB_CMD_STS_ERR = 0x00004005, /* Error. */ | ||
778 | }; | ||
779 | |||
780 | struct mbox_params { | ||
781 | u32 mbox_in[MAILBOX_COUNT]; | ||
782 | u32 mbox_out[MAILBOX_COUNT]; | ||
783 | int in_count; | ||
784 | int out_count; | ||
785 | }; | ||
786 | |||
787 | struct flash_params { | ||
788 | u8 dev_id_str[4]; | ||
789 | u16 size; | ||
790 | u16 csum; | ||
791 | u16 ver; | ||
792 | u16 sub_dev_id; | ||
793 | u8 mac_addr[6]; | ||
794 | u16 res; | ||
795 | }; | ||
796 | |||
797 | |||
798 | /* | ||
799 | * doorbell space for the rx ring context | ||
800 | */ | ||
801 | struct rx_doorbell_context { | ||
802 | u32 cnsmr_idx; /* 0x00 */ | ||
803 | u32 valid; /* 0x04 */ | ||
804 | u32 reserved[4]; /* 0x08-0x14 */ | ||
805 | u32 lbq_prod_idx; /* 0x18 */ | ||
806 | u32 sbq_prod_idx; /* 0x1c */ | ||
807 | }; | ||
808 | |||
809 | /* | ||
810 | * doorbell space for the tx ring context | ||
811 | */ | ||
812 | struct tx_doorbell_context { | ||
813 | u32 prod_idx; /* 0x00 */ | ||
814 | u32 valid; /* 0x04 */ | ||
815 | u32 reserved[4]; /* 0x08-0x14 */ | ||
816 | u32 lbq_prod_idx; /* 0x18 */ | ||
817 | u32 sbq_prod_idx; /* 0x1c */ | ||
818 | }; | ||
819 | |||
820 | /* DATA STRUCTURES SHARED WITH HARDWARE. */ | ||
821 | |||
822 | struct bq_element { | ||
823 | u32 addr_lo; | ||
824 | #define BQ_END 0x00000001 | ||
825 | #define BQ_CONT 0x00000002 | ||
826 | #define BQ_MASK 0x00000003 | ||
827 | u32 addr_hi; | ||
828 | } __attribute((packed)); | ||
829 | |||
830 | struct tx_buf_desc { | ||
831 | __le64 addr; | ||
832 | __le32 len; | ||
833 | #define TX_DESC_LEN_MASK 0x000fffff | ||
834 | #define TX_DESC_C 0x40000000 | ||
835 | #define TX_DESC_E 0x80000000 | ||
836 | } __attribute((packed)); | ||
837 | |||
838 | /* | ||
839 | * IOCB Definitions... | ||
840 | */ | ||
841 | |||
842 | #define OPCODE_OB_MAC_IOCB 0x01 | ||
843 | #define OPCODE_OB_MAC_TSO_IOCB 0x02 | ||
844 | #define OPCODE_IB_MAC_IOCB 0x20 | ||
845 | #define OPCODE_IB_MPI_IOCB 0x21 | ||
846 | #define OPCODE_IB_AE_IOCB 0x3f | ||
847 | |||
848 | struct ob_mac_iocb_req { | ||
849 | u8 opcode; | ||
850 | u8 flags1; | ||
851 | #define OB_MAC_IOCB_REQ_OI 0x01 | ||
852 | #define OB_MAC_IOCB_REQ_I 0x02 | ||
853 | #define OB_MAC_IOCB_REQ_D 0x08 | ||
854 | #define OB_MAC_IOCB_REQ_F 0x10 | ||
855 | u8 flags2; | ||
856 | u8 flags3; | ||
857 | #define OB_MAC_IOCB_DFP 0x02 | ||
858 | #define OB_MAC_IOCB_V 0x04 | ||
859 | __le32 reserved1[2]; | ||
860 | __le16 frame_len; | ||
861 | #define OB_MAC_IOCB_LEN_MASK 0x3ffff | ||
862 | __le16 reserved2; | ||
863 | __le32 tid; | ||
864 | __le32 txq_idx; | ||
865 | __le32 reserved3; | ||
866 | __le16 vlan_tci; | ||
867 | __le16 reserved4; | ||
868 | struct tx_buf_desc tbd[TX_DESC_PER_IOCB]; | ||
869 | } __attribute((packed)); | ||
870 | |||
871 | struct ob_mac_iocb_rsp { | ||
872 | u8 opcode; /* */ | ||
873 | u8 flags1; /* */ | ||
874 | #define OB_MAC_IOCB_RSP_OI 0x01 /* */ | ||
875 | #define OB_MAC_IOCB_RSP_I 0x02 /* */ | ||
876 | #define OB_MAC_IOCB_RSP_E 0x08 /* */ | ||
877 | #define OB_MAC_IOCB_RSP_S 0x10 /* too Short */ | ||
878 | #define OB_MAC_IOCB_RSP_L 0x20 /* too Large */ | ||
879 | #define OB_MAC_IOCB_RSP_P 0x40 /* Padded */ | ||
880 | u8 flags2; /* */ | ||
881 | u8 flags3; /* */ | ||
882 | #define OB_MAC_IOCB_RSP_B 0x80 /* */ | ||
883 | __le32 tid; | ||
884 | __le32 txq_idx; | ||
885 | __le32 reserved[13]; | ||
886 | } __attribute((packed)); | ||
887 | |||
888 | struct ob_mac_tso_iocb_req { | ||
889 | u8 opcode; | ||
890 | u8 flags1; | ||
891 | #define OB_MAC_TSO_IOCB_OI 0x01 | ||
892 | #define OB_MAC_TSO_IOCB_I 0x02 | ||
893 | #define OB_MAC_TSO_IOCB_D 0x08 | ||
894 | #define OB_MAC_TSO_IOCB_IP4 0x40 | ||
895 | #define OB_MAC_TSO_IOCB_IP6 0x80 | ||
896 | u8 flags2; | ||
897 | #define OB_MAC_TSO_IOCB_LSO 0x20 | ||
898 | #define OB_MAC_TSO_IOCB_UC 0x40 | ||
899 | #define OB_MAC_TSO_IOCB_TC 0x80 | ||
900 | u8 flags3; | ||
901 | #define OB_MAC_TSO_IOCB_IC 0x01 | ||
902 | #define OB_MAC_TSO_IOCB_DFP 0x02 | ||
903 | #define OB_MAC_TSO_IOCB_V 0x04 | ||
904 | __le32 reserved1[2]; | ||
905 | __le32 frame_len; | ||
906 | __le32 tid; | ||
907 | __le32 txq_idx; | ||
908 | __le16 total_hdrs_len; | ||
909 | __le16 net_trans_offset; | ||
910 | #define OB_MAC_TRANSPORT_HDR_SHIFT 6 | ||
911 | __le16 vlan_tci; | ||
912 | __le16 mss; | ||
913 | struct tx_buf_desc tbd[TX_DESC_PER_IOCB]; | ||
914 | } __attribute((packed)); | ||
915 | |||
916 | struct ob_mac_tso_iocb_rsp { | ||
917 | u8 opcode; | ||
918 | u8 flags1; | ||
919 | #define OB_MAC_TSO_IOCB_RSP_OI 0x01 | ||
920 | #define OB_MAC_TSO_IOCB_RSP_I 0x02 | ||
921 | #define OB_MAC_TSO_IOCB_RSP_E 0x08 | ||
922 | #define OB_MAC_TSO_IOCB_RSP_S 0x10 | ||
923 | #define OB_MAC_TSO_IOCB_RSP_L 0x20 | ||
924 | #define OB_MAC_TSO_IOCB_RSP_P 0x40 | ||
925 | u8 flags2; /* */ | ||
926 | u8 flags3; /* */ | ||
927 | #define OB_MAC_TSO_IOCB_RSP_B 0x8000 | ||
928 | __le32 tid; | ||
929 | __le32 txq_idx; | ||
930 | __le32 reserved2[13]; | ||
931 | } __attribute((packed)); | ||
932 | |||
933 | struct ib_mac_iocb_rsp { | ||
934 | u8 opcode; /* 0x20 */ | ||
935 | u8 flags1; | ||
936 | #define IB_MAC_IOCB_RSP_OI 0x01 /* Overide intr delay */ | ||
937 | #define IB_MAC_IOCB_RSP_I 0x02 /* Disble Intr Generation */ | ||
938 | #define IB_MAC_IOCB_RSP_TE 0x04 /* Checksum error */ | ||
939 | #define IB_MAC_IOCB_RSP_NU 0x08 /* No checksum rcvd */ | ||
940 | #define IB_MAC_IOCB_RSP_IE 0x10 /* IPv4 checksum error */ | ||
941 | #define IB_MAC_IOCB_RSP_M_MASK 0x60 /* Multicast info */ | ||
942 | #define IB_MAC_IOCB_RSP_M_NONE 0x00 /* Not mcast frame */ | ||
943 | #define IB_MAC_IOCB_RSP_M_HASH 0x20 /* HASH mcast frame */ | ||
944 | #define IB_MAC_IOCB_RSP_M_REG 0x40 /* Registered mcast frame */ | ||
945 | #define IB_MAC_IOCB_RSP_M_PROM 0x60 /* Promiscuous mcast frame */ | ||
946 | #define IB_MAC_IOCB_RSP_B 0x80 /* Broadcast frame */ | ||
947 | u8 flags2; | ||
948 | #define IB_MAC_IOCB_RSP_P 0x01 /* Promiscuous frame */ | ||
949 | #define IB_MAC_IOCB_RSP_V 0x02 /* Vlan tag present */ | ||
950 | #define IB_MAC_IOCB_RSP_ERR_MASK 0x1c /* */ | ||
951 | #define IB_MAC_IOCB_RSP_ERR_CODE_ERR 0x04 | ||
952 | #define IB_MAC_IOCB_RSP_ERR_OVERSIZE 0x08 | ||
953 | #define IB_MAC_IOCB_RSP_ERR_UNDERSIZE 0x10 | ||
954 | #define IB_MAC_IOCB_RSP_ERR_PREAMBLE 0x14 | ||
955 | #define IB_MAC_IOCB_RSP_ERR_FRAME_LEN 0x18 | ||
956 | #define IB_MAC_IOCB_RSP_ERR_CRC 0x1c | ||
957 | #define IB_MAC_IOCB_RSP_U 0x20 /* UDP packet */ | ||
958 | #define IB_MAC_IOCB_RSP_T 0x40 /* TCP packet */ | ||
959 | #define IB_MAC_IOCB_RSP_FO 0x80 /* Failover port */ | ||
960 | u8 flags3; | ||
961 | #define IB_MAC_IOCB_RSP_RSS_MASK 0x07 /* RSS mask */ | ||
962 | #define IB_MAC_IOCB_RSP_M_NONE 0x00 /* No RSS match */ | ||
963 | #define IB_MAC_IOCB_RSP_M_IPV4 0x04 /* IPv4 RSS match */ | ||
964 | #define IB_MAC_IOCB_RSP_M_IPV6 0x02 /* IPv6 RSS match */ | ||
965 | #define IB_MAC_IOCB_RSP_M_TCP_V4 0x05 /* TCP with IPv4 */ | ||
966 | #define IB_MAC_IOCB_RSP_M_TCP_V6 0x03 /* TCP with IPv6 */ | ||
967 | #define IB_MAC_IOCB_RSP_V4 0x08 /* IPV4 */ | ||
968 | #define IB_MAC_IOCB_RSP_V6 0x10 /* IPV6 */ | ||
969 | #define IB_MAC_IOCB_RSP_IH 0x20 /* Split after IP header */ | ||
970 | #define IB_MAC_IOCB_RSP_DS 0x40 /* data is in small buffer */ | ||
971 | #define IB_MAC_IOCB_RSP_DL 0x80 /* data is in large buffer */ | ||
972 | __le32 data_len; /* */ | ||
973 | __le32 data_addr_lo; /* */ | ||
974 | __le32 data_addr_hi; /* */ | ||
975 | __le32 rss; /* */ | ||
976 | __le16 vlan_id; /* 12 bits */ | ||
977 | #define IB_MAC_IOCB_RSP_C 0x1000 /* VLAN CFI bit */ | ||
978 | #define IB_MAC_IOCB_RSP_COS_SHIFT 12 /* class of service value */ | ||
979 | |||
980 | __le16 reserved1; | ||
981 | __le32 reserved2[6]; | ||
982 | __le32 flags4; | ||
983 | #define IB_MAC_IOCB_RSP_HV 0x20000000 /* */ | ||
984 | #define IB_MAC_IOCB_RSP_HS 0x40000000 /* */ | ||
985 | #define IB_MAC_IOCB_RSP_HL 0x80000000 /* */ | ||
986 | __le32 hdr_len; /* */ | ||
987 | __le32 hdr_addr_lo; /* */ | ||
988 | __le32 hdr_addr_hi; /* */ | ||
989 | } __attribute((packed)); | ||
990 | |||
991 | struct ib_ae_iocb_rsp { | ||
992 | u8 opcode; | ||
993 | u8 flags1; | ||
994 | #define IB_AE_IOCB_RSP_OI 0x01 | ||
995 | #define IB_AE_IOCB_RSP_I 0x02 | ||
996 | u8 event; | ||
997 | #define LINK_UP_EVENT 0x00 | ||
998 | #define LINK_DOWN_EVENT 0x01 | ||
999 | #define CAM_LOOKUP_ERR_EVENT 0x06 | ||
1000 | #define SOFT_ECC_ERROR_EVENT 0x07 | ||
1001 | #define MGMT_ERR_EVENT 0x08 | ||
1002 | #define TEN_GIG_MAC_EVENT 0x09 | ||
1003 | #define GPI0_H2L_EVENT 0x10 | ||
1004 | #define GPI0_L2H_EVENT 0x20 | ||
1005 | #define GPI1_H2L_EVENT 0x11 | ||
1006 | #define GPI1_L2H_EVENT 0x21 | ||
1007 | #define PCI_ERR_ANON_BUF_RD 0x40 | ||
1008 | u8 q_id; | ||
1009 | __le32 reserved[15]; | ||
1010 | } __attribute((packed)); | ||
1011 | |||
1012 | /* | ||
1013 | * These three structures are for generic | ||
1014 | * handling of ib and ob iocbs. | ||
1015 | */ | ||
1016 | struct ql_net_rsp_iocb { | ||
1017 | u8 opcode; | ||
1018 | u8 flags0; | ||
1019 | __le16 length; | ||
1020 | __le32 tid; | ||
1021 | __le32 reserved[14]; | ||
1022 | } __attribute((packed)); | ||
1023 | |||
1024 | struct net_req_iocb { | ||
1025 | u8 opcode; | ||
1026 | u8 flags0; | ||
1027 | __le16 flags1; | ||
1028 | __le32 tid; | ||
1029 | __le32 reserved1[30]; | ||
1030 | } __attribute((packed)); | ||
1031 | |||
1032 | /* | ||
1033 | * tx ring initialization control block for chip. | ||
1034 | * It is defined as: | ||
1035 | * "Work Queue Initialization Control Block" | ||
1036 | */ | ||
1037 | struct wqicb { | ||
1038 | __le16 len; | ||
1039 | #define Q_LEN_V (1 << 4) | ||
1040 | #define Q_LEN_CPP_CONT 0x0000 | ||
1041 | #define Q_LEN_CPP_16 0x0001 | ||
1042 | #define Q_LEN_CPP_32 0x0002 | ||
1043 | #define Q_LEN_CPP_64 0x0003 | ||
1044 | __le16 flags; | ||
1045 | #define Q_PRI_SHIFT 1 | ||
1046 | #define Q_FLAGS_LC 0x1000 | ||
1047 | #define Q_FLAGS_LB 0x2000 | ||
1048 | #define Q_FLAGS_LI 0x4000 | ||
1049 | #define Q_FLAGS_LO 0x8000 | ||
1050 | __le16 cq_id_rss; | ||
1051 | #define Q_CQ_ID_RSS_RV 0x8000 | ||
1052 | __le16 rid; | ||
1053 | __le32 addr_lo; | ||
1054 | __le32 addr_hi; | ||
1055 | __le32 cnsmr_idx_addr_lo; | ||
1056 | __le32 cnsmr_idx_addr_hi; | ||
1057 | } __attribute((packed)); | ||
1058 | |||
1059 | /* | ||
1060 | * rx ring initialization control block for chip. | ||
1061 | * It is defined as: | ||
1062 | * "Completion Queue Initialization Control Block" | ||
1063 | */ | ||
1064 | struct cqicb { | ||
1065 | u8 msix_vect; | ||
1066 | u8 reserved1; | ||
1067 | u8 reserved2; | ||
1068 | u8 flags; | ||
1069 | #define FLAGS_LV 0x08 | ||
1070 | #define FLAGS_LS 0x10 | ||
1071 | #define FLAGS_LL 0x20 | ||
1072 | #define FLAGS_LI 0x40 | ||
1073 | #define FLAGS_LC 0x80 | ||
1074 | __le16 len; | ||
1075 | #define LEN_V (1 << 4) | ||
1076 | #define LEN_CPP_CONT 0x0000 | ||
1077 | #define LEN_CPP_32 0x0001 | ||
1078 | #define LEN_CPP_64 0x0002 | ||
1079 | #define LEN_CPP_128 0x0003 | ||
1080 | __le16 rid; | ||
1081 | __le32 addr_lo; | ||
1082 | __le32 addr_hi; | ||
1083 | __le32 prod_idx_addr_lo; | ||
1084 | __le32 prod_idx_addr_hi; | ||
1085 | __le16 pkt_delay; | ||
1086 | __le16 irq_delay; | ||
1087 | __le32 lbq_addr_lo; | ||
1088 | __le32 lbq_addr_hi; | ||
1089 | __le16 lbq_buf_size; | ||
1090 | __le16 lbq_len; /* entry count */ | ||
1091 | __le32 sbq_addr_lo; | ||
1092 | __le32 sbq_addr_hi; | ||
1093 | __le16 sbq_buf_size; | ||
1094 | __le16 sbq_len; /* entry count */ | ||
1095 | } __attribute((packed)); | ||
1096 | |||
1097 | struct ricb { | ||
1098 | u8 base_cq; | ||
1099 | #define RSS_L4K 0x80 | ||
1100 | u8 flags; | ||
1101 | #define RSS_L6K 0x01 | ||
1102 | #define RSS_LI 0x02 | ||
1103 | #define RSS_LB 0x04 | ||
1104 | #define RSS_LM 0x08 | ||
1105 | #define RSS_RI4 0x10 | ||
1106 | #define RSS_RT4 0x20 | ||
1107 | #define RSS_RI6 0x40 | ||
1108 | #define RSS_RT6 0x80 | ||
1109 | __le16 mask; | ||
1110 | __le32 hash_cq_id[256]; | ||
1111 | __le32 ipv6_hash_key[10]; | ||
1112 | __le32 ipv4_hash_key[4]; | ||
1113 | } __attribute((packed)); | ||
1114 | |||
1115 | /* SOFTWARE/DRIVER DATA STRUCTURES. */ | ||
1116 | |||
1117 | struct oal { | ||
1118 | struct tx_buf_desc oal[TX_DESC_PER_OAL]; | ||
1119 | }; | ||
1120 | |||
1121 | struct map_list { | ||
1122 | DECLARE_PCI_UNMAP_ADDR(mapaddr); | ||
1123 | DECLARE_PCI_UNMAP_LEN(maplen); | ||
1124 | }; | ||
1125 | |||
1126 | struct tx_ring_desc { | ||
1127 | struct sk_buff *skb; | ||
1128 | struct ob_mac_iocb_req *queue_entry; | ||
1129 | int index; | ||
1130 | struct oal oal; | ||
1131 | struct map_list map[MAX_SKB_FRAGS + 1]; | ||
1132 | int map_cnt; | ||
1133 | struct tx_ring_desc *next; | ||
1134 | }; | ||
1135 | |||
1136 | struct bq_desc { | ||
1137 | union { | ||
1138 | struct page *lbq_page; | ||
1139 | struct sk_buff *skb; | ||
1140 | } p; | ||
1141 | struct bq_element *bq; | ||
1142 | int index; | ||
1143 | DECLARE_PCI_UNMAP_ADDR(mapaddr); | ||
1144 | DECLARE_PCI_UNMAP_LEN(maplen); | ||
1145 | }; | ||
1146 | |||
1147 | #define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count)) | ||
1148 | |||
1149 | struct tx_ring { | ||
1150 | /* | ||
1151 | * queue info. | ||
1152 | */ | ||
1153 | struct wqicb wqicb; /* structure used to inform chip of new queue */ | ||
1154 | void *wq_base; /* pci_alloc:virtual addr for tx */ | ||
1155 | dma_addr_t wq_base_dma; /* pci_alloc:dma addr for tx */ | ||
1156 | u32 *cnsmr_idx_sh_reg; /* shadow copy of consumer idx */ | ||
1157 | dma_addr_t cnsmr_idx_sh_reg_dma; /* dma-shadow copy of consumer */ | ||
1158 | u32 wq_size; /* size in bytes of queue area */ | ||
1159 | u32 wq_len; /* number of entries in queue */ | ||
1160 | void __iomem *prod_idx_db_reg; /* doorbell area index reg at offset 0x00 */ | ||
1161 | void __iomem *valid_db_reg; /* doorbell area valid reg at offset 0x04 */ | ||
1162 | u16 prod_idx; /* current value for prod idx */ | ||
1163 | u16 cq_id; /* completion (rx) queue for tx completions */ | ||
1164 | u8 wq_id; /* queue id for this entry */ | ||
1165 | u8 reserved1[3]; | ||
1166 | struct tx_ring_desc *q; /* descriptor list for the queue */ | ||
1167 | spinlock_t lock; | ||
1168 | atomic_t tx_count; /* counts down for every outstanding IO */ | ||
1169 | atomic_t queue_stopped; /* Turns queue off when full. */ | ||
1170 | struct delayed_work tx_work; | ||
1171 | struct ql_adapter *qdev; | ||
1172 | }; | ||
1173 | |||
1174 | /* | ||
1175 | * Type of inbound queue. | ||
1176 | */ | ||
1177 | enum { | ||
1178 | DEFAULT_Q = 2, /* Handles slow queue and chip/MPI events. */ | ||
1179 | TX_Q = 3, /* Handles outbound completions. */ | ||
1180 | RX_Q = 4, /* Handles inbound completions. */ | ||
1181 | }; | ||
1182 | |||
1183 | struct rx_ring { | ||
1184 | struct cqicb cqicb; /* The chip's completion queue init control block. */ | ||
1185 | |||
1186 | /* Completion queue elements. */ | ||
1187 | void *cq_base; | ||
1188 | dma_addr_t cq_base_dma; | ||
1189 | u32 cq_size; | ||
1190 | u32 cq_len; | ||
1191 | u16 cq_id; | ||
1192 | u32 *prod_idx_sh_reg; /* Shadowed producer register. */ | ||
1193 | dma_addr_t prod_idx_sh_reg_dma; | ||
1194 | void __iomem *cnsmr_idx_db_reg; /* PCI doorbell mem area + 0 */ | ||
1195 | u32 cnsmr_idx; /* current sw idx */ | ||
1196 | struct ql_net_rsp_iocb *curr_entry; /* next entry on queue */ | ||
1197 | void __iomem *valid_db_reg; /* PCI doorbell mem area + 0x04 */ | ||
1198 | |||
1199 | /* Large buffer queue elements. */ | ||
1200 | u32 lbq_len; /* entry count */ | ||
1201 | u32 lbq_size; /* size in bytes of queue */ | ||
1202 | u32 lbq_buf_size; | ||
1203 | void *lbq_base; | ||
1204 | dma_addr_t lbq_base_dma; | ||
1205 | void *lbq_base_indirect; | ||
1206 | dma_addr_t lbq_base_indirect_dma; | ||
1207 | struct bq_desc *lbq; /* array of control blocks */ | ||
1208 | void __iomem *lbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x18 */ | ||
1209 | u32 lbq_prod_idx; /* current sw prod idx */ | ||
1210 | u32 lbq_curr_idx; /* next entry we expect */ | ||
1211 | u32 lbq_clean_idx; /* beginning of new descs */ | ||
1212 | u32 lbq_free_cnt; /* free buffer desc cnt */ | ||
1213 | |||
1214 | /* Small buffer queue elements. */ | ||
1215 | u32 sbq_len; /* entry count */ | ||
1216 | u32 sbq_size; /* size in bytes of queue */ | ||
1217 | u32 sbq_buf_size; | ||
1218 | void *sbq_base; | ||
1219 | dma_addr_t sbq_base_dma; | ||
1220 | void *sbq_base_indirect; | ||
1221 | dma_addr_t sbq_base_indirect_dma; | ||
1222 | struct bq_desc *sbq; /* array of control blocks */ | ||
1223 | void __iomem *sbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x1c */ | ||
1224 | u32 sbq_prod_idx; /* current sw prod idx */ | ||
1225 | u32 sbq_curr_idx; /* next entry we expect */ | ||
1226 | u32 sbq_clean_idx; /* beginning of new descs */ | ||
1227 | u32 sbq_free_cnt; /* free buffer desc cnt */ | ||
1228 | |||
1229 | /* Misc. handler elements. */ | ||
1230 | u32 type; /* Type of queue, tx, rx, or default. */ | ||
1231 | u32 irq; /* Which vector this ring is assigned. */ | ||
1232 | u32 cpu; /* Which CPU this should run on. */ | ||
1233 | char name[IFNAMSIZ + 5]; | ||
1234 | struct napi_struct napi; | ||
1235 | struct delayed_work rx_work; | ||
1236 | u8 reserved; | ||
1237 | struct ql_adapter *qdev; | ||
1238 | }; | ||
1239 | |||
1240 | /* | ||
1241 | * RSS Initialization Control Block | ||
1242 | */ | ||
1243 | struct hash_id { | ||
1244 | u8 value[4]; | ||
1245 | }; | ||
1246 | |||
1247 | struct nic_stats { | ||
1248 | /* | ||
1249 | * These stats come from offset 200h to 278h | ||
1250 | * in the XGMAC register. | ||
1251 | */ | ||
1252 | u64 tx_pkts; | ||
1253 | u64 tx_bytes; | ||
1254 | u64 tx_mcast_pkts; | ||
1255 | u64 tx_bcast_pkts; | ||
1256 | u64 tx_ucast_pkts; | ||
1257 | u64 tx_ctl_pkts; | ||
1258 | u64 tx_pause_pkts; | ||
1259 | u64 tx_64_pkt; | ||
1260 | u64 tx_65_to_127_pkt; | ||
1261 | u64 tx_128_to_255_pkt; | ||
1262 | u64 tx_256_511_pkt; | ||
1263 | u64 tx_512_to_1023_pkt; | ||
1264 | u64 tx_1024_to_1518_pkt; | ||
1265 | u64 tx_1519_to_max_pkt; | ||
1266 | u64 tx_undersize_pkt; | ||
1267 | u64 tx_oversize_pkt; | ||
1268 | |||
1269 | /* | ||
1270 | * These stats come from offset 300h to 3C8h | ||
1271 | * in the XGMAC register. | ||
1272 | */ | ||
1273 | u64 rx_bytes; | ||
1274 | u64 rx_bytes_ok; | ||
1275 | u64 rx_pkts; | ||
1276 | u64 rx_pkts_ok; | ||
1277 | u64 rx_bcast_pkts; | ||
1278 | u64 rx_mcast_pkts; | ||
1279 | u64 rx_ucast_pkts; | ||
1280 | u64 rx_undersize_pkts; | ||
1281 | u64 rx_oversize_pkts; | ||
1282 | u64 rx_jabber_pkts; | ||
1283 | u64 rx_undersize_fcerr_pkts; | ||
1284 | u64 rx_drop_events; | ||
1285 | u64 rx_fcerr_pkts; | ||
1286 | u64 rx_align_err; | ||
1287 | u64 rx_symbol_err; | ||
1288 | u64 rx_mac_err; | ||
1289 | u64 rx_ctl_pkts; | ||
1290 | u64 rx_pause_pkts; | ||
1291 | u64 rx_64_pkts; | ||
1292 | u64 rx_65_to_127_pkts; | ||
1293 | u64 rx_128_255_pkts; | ||
1294 | u64 rx_256_511_pkts; | ||
1295 | u64 rx_512_to_1023_pkts; | ||
1296 | u64 rx_1024_to_1518_pkts; | ||
1297 | u64 rx_1519_to_max_pkts; | ||
1298 | u64 rx_len_err_pkts; | ||
1299 | }; | ||
1300 | |||
1301 | /* | ||
1302 | * intr_context structure is used during initialization | ||
1303 | * to hook the interrupts. It is also used in a single | ||
1304 | * irq environment as a context to the ISR. | ||
1305 | */ | ||
1306 | struct intr_context { | ||
1307 | struct ql_adapter *qdev; | ||
1308 | u32 intr; | ||
1309 | u32 hooked; | ||
1310 | u32 intr_en_mask; /* value/mask used to enable this intr */ | ||
1311 | u32 intr_dis_mask; /* value/mask used to disable this intr */ | ||
1312 | u32 intr_read_mask; /* value/mask used to read this intr */ | ||
1313 | char name[IFNAMSIZ * 2]; | ||
1314 | atomic_t irq_cnt; /* irq_cnt is used in single vector | ||
1315 | * environment. It's incremented for each | ||
1316 | * irq handler that is scheduled. When each | ||
1317 | * handler finishes it decrements irq_cnt and | ||
1318 | * enables interrupts if it's zero. */ | ||
1319 | irq_handler_t handler; | ||
1320 | }; | ||
1321 | |||
1322 | /* adapter flags definitions. */ | ||
1323 | enum { | ||
1324 | QL_ADAPTER_UP = (1 << 0), /* Adapter has been brought up. */ | ||
1325 | QL_LEGACY_ENABLED = (1 << 3), | ||
1326 | QL_MSI_ENABLED = (1 << 3), | ||
1327 | QL_MSIX_ENABLED = (1 << 4), | ||
1328 | QL_DMA64 = (1 << 5), | ||
1329 | QL_PROMISCUOUS = (1 << 6), | ||
1330 | QL_ALLMULTI = (1 << 7), | ||
1331 | }; | ||
1332 | |||
1333 | /* link_status bit definitions */ | ||
1334 | enum { | ||
1335 | LOOPBACK_MASK = 0x00000700, | ||
1336 | LOOPBACK_PCS = 0x00000100, | ||
1337 | LOOPBACK_HSS = 0x00000200, | ||
1338 | LOOPBACK_EXT = 0x00000300, | ||
1339 | PAUSE_MASK = 0x000000c0, | ||
1340 | PAUSE_STD = 0x00000040, | ||
1341 | PAUSE_PRI = 0x00000080, | ||
1342 | SPEED_MASK = 0x00000038, | ||
1343 | SPEED_100Mb = 0x00000000, | ||
1344 | SPEED_1Gb = 0x00000008, | ||
1345 | SPEED_10Gb = 0x00000010, | ||
1346 | LINK_TYPE_MASK = 0x00000007, | ||
1347 | LINK_TYPE_XFI = 0x00000001, | ||
1348 | LINK_TYPE_XAUI = 0x00000002, | ||
1349 | LINK_TYPE_XFI_BP = 0x00000003, | ||
1350 | LINK_TYPE_XAUI_BP = 0x00000004, | ||
1351 | LINK_TYPE_10GBASET = 0x00000005, | ||
1352 | }; | ||
1353 | |||
1354 | /* | ||
1355 | * The main Adapter structure definition. | ||
1356 | * This structure has all fields relevant to the hardware. | ||
1357 | */ | ||
1358 | struct ql_adapter { | ||
1359 | struct ricb ricb; | ||
1360 | unsigned long flags; | ||
1361 | u32 wol; | ||
1362 | |||
1363 | struct nic_stats nic_stats; | ||
1364 | |||
1365 | struct vlan_group *vlgrp; | ||
1366 | |||
1367 | /* PCI Configuration information for this device */ | ||
1368 | struct pci_dev *pdev; | ||
1369 | struct net_device *ndev; /* Parent NET device */ | ||
1370 | |||
1371 | /* Hardware information */ | ||
1372 | u32 chip_rev_id; | ||
1373 | u32 func; /* PCI function for this adapter */ | ||
1374 | |||
1375 | spinlock_t adapter_lock; | ||
1376 | spinlock_t hw_lock; | ||
1377 | spinlock_t stats_lock; | ||
1378 | spinlock_t legacy_lock; /* used for maintaining legacy intr sync */ | ||
1379 | |||
1380 | /* PCI Bus Relative Register Addresses */ | ||
1381 | void __iomem *reg_base; | ||
1382 | void __iomem *doorbell_area; | ||
1383 | u32 doorbell_area_size; | ||
1384 | |||
1385 | u32 msg_enable; | ||
1386 | |||
1387 | /* Page for Shadow Registers */ | ||
1388 | void *rx_ring_shadow_reg_area; | ||
1389 | dma_addr_t rx_ring_shadow_reg_dma; | ||
1390 | void *tx_ring_shadow_reg_area; | ||
1391 | dma_addr_t tx_ring_shadow_reg_dma; | ||
1392 | |||
1393 | u32 mailbox_in; | ||
1394 | u32 mailbox_out; | ||
1395 | |||
1396 | int tx_ring_size; | ||
1397 | int rx_ring_size; | ||
1398 | u32 intr_count; | ||
1399 | struct msix_entry *msi_x_entry; | ||
1400 | struct intr_context intr_context[MAX_RX_RINGS]; | ||
1401 | |||
1402 | int (*legacy_check) (struct ql_adapter *); | ||
1403 | |||
1404 | int tx_ring_count; /* One per online CPU. */ | ||
1405 | u32 rss_ring_first_cq_id;/* index of first inbound (rss) rx_ring */ | ||
1406 | u32 rss_ring_count; /* One per online CPU. */ | ||
1407 | /* | ||
1408 | * rx_ring_count = | ||
1409 | * one default queue + | ||
1410 | * (CPU count * outbound completion rx_ring) + | ||
1411 | * (CPU count * inbound (RSS) completion rx_ring) | ||
1412 | */ | ||
1413 | int rx_ring_count; | ||
1414 | int ring_mem_size; | ||
1415 | void *ring_mem; | ||
1416 | struct rx_ring *rx_ring; | ||
1417 | int rx_csum; | ||
1418 | struct tx_ring *tx_ring; | ||
1419 | u32 default_rx_queue; | ||
1420 | |||
1421 | u16 rx_coalesce_usecs; /* cqicb->int_delay */ | ||
1422 | u16 rx_max_coalesced_frames; /* cqicb->pkt_int_delay */ | ||
1423 | u16 tx_coalesce_usecs; /* cqicb->int_delay */ | ||
1424 | u16 tx_max_coalesced_frames; /* cqicb->pkt_int_delay */ | ||
1425 | |||
1426 | u32 xg_sem_mask; | ||
1427 | u32 port_link_up; | ||
1428 | u32 port_init; | ||
1429 | u32 link_status; | ||
1430 | |||
1431 | struct flash_params flash; | ||
1432 | |||
1433 | struct net_device_stats stats; | ||
1434 | struct workqueue_struct *q_workqueue; | ||
1435 | struct workqueue_struct *workqueue; | ||
1436 | struct delayed_work asic_reset_work; | ||
1437 | struct delayed_work mpi_reset_work; | ||
1438 | struct delayed_work mpi_work; | ||
1439 | }; | ||
1440 | |||
1441 | /* | ||
1442 | * Typical Register accessor for memory mapped device. | ||
1443 | */ | ||
1444 | static inline u32 ql_read32(const struct ql_adapter *qdev, int reg) | ||
1445 | { | ||
1446 | return readl(qdev->reg_base + reg); | ||
1447 | } | ||
1448 | |||
1449 | /* | ||
1450 | * Typical Register accessor for memory mapped device. | ||
1451 | */ | ||
1452 | static inline void ql_write32(const struct ql_adapter *qdev, int reg, u32 val) | ||
1453 | { | ||
1454 | writel(val, qdev->reg_base + reg); | ||
1455 | } | ||
1456 | |||
1457 | /* | ||
1458 | * Doorbell Registers: | ||
1459 | * Doorbell registers are virtual registers in the PCI memory space. | ||
1460 | * The space is allocated by the chip during PCI initialization. The | ||
1461 | * device driver finds the doorbell address in BAR 3 in PCI config space. | ||
1462 | * The registers are used to control outbound and inbound queues. For | ||
1463 | * example, the producer index for an outbound queue. Each queue uses | ||
1464 | * 1 4k chunk of memory. The lower half of the space is for outbound | ||
1465 | * queues. The upper half is for inbound queues. | ||
1466 | */ | ||
1467 | static inline void ql_write_db_reg(u32 val, void __iomem *addr) | ||
1468 | { | ||
1469 | writel(val, addr); | ||
1470 | mmiowb(); | ||
1471 | } | ||
1472 | |||
1473 | /* | ||
1474 | * Shadow Registers: | ||
1475 | * Outbound queues have a consumer index that is maintained by the chip. | ||
1476 | * Inbound queues have a producer index that is maintained by the chip. | ||
1477 | * For lower overhead, these registers are "shadowed" to host memory | ||
1478 | * which allows the device driver to track the queue progress without | ||
1479 | * PCI reads. When an entry is placed on an inbound queue, the chip will | ||
1480 | * update the relevant index register and then copy the value to the | ||
1481 | * shadow register in host memory. | ||
1482 | */ | ||
1483 | static inline unsigned int ql_read_sh_reg(const volatile void *addr) | ||
1484 | { | ||
1485 | return *(volatile unsigned int __force *)addr; | ||
1486 | } | ||
1487 | |||
1488 | extern char qlge_driver_name[]; | ||
1489 | extern const char qlge_driver_version[]; | ||
1490 | extern const struct ethtool_ops qlge_ethtool_ops; | ||
1491 | |||
1492 | extern int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask); | ||
1493 | extern void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask); | ||
1494 | extern int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data); | ||
1495 | extern int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index, | ||
1496 | u32 *value); | ||
1497 | extern int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value); | ||
1498 | extern int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit, | ||
1499 | u16 q_id); | ||
1500 | void ql_queue_fw_error(struct ql_adapter *qdev); | ||
1501 | void ql_mpi_work(struct work_struct *work); | ||
1502 | void ql_mpi_reset_work(struct work_struct *work); | ||
1503 | int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit); | ||
1504 | void ql_queue_asic_error(struct ql_adapter *qdev); | ||
1505 | void ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr); | ||
1506 | void ql_set_ethtool_ops(struct net_device *ndev); | ||
1507 | int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data); | ||
1508 | |||
1509 | #if 1 | ||
1510 | #define QL_ALL_DUMP | ||
1511 | #define QL_REG_DUMP | ||
1512 | #define QL_DEV_DUMP | ||
1513 | #define QL_CB_DUMP | ||
1514 | /* #define QL_IB_DUMP */ | ||
1515 | /* #define QL_OB_DUMP */ | ||
1516 | #endif | ||
1517 | |||
1518 | #ifdef QL_REG_DUMP | ||
1519 | extern void ql_dump_xgmac_control_regs(struct ql_adapter *qdev); | ||
1520 | extern void ql_dump_routing_entries(struct ql_adapter *qdev); | ||
1521 | extern void ql_dump_regs(struct ql_adapter *qdev); | ||
1522 | #define QL_DUMP_REGS(qdev) ql_dump_regs(qdev) | ||
1523 | #define QL_DUMP_ROUTE(qdev) ql_dump_routing_entries(qdev) | ||
1524 | #define QL_DUMP_XGMAC_CONTROL_REGS(qdev) ql_dump_xgmac_control_regs(qdev) | ||
1525 | #else | ||
1526 | #define QL_DUMP_REGS(qdev) | ||
1527 | #define QL_DUMP_ROUTE(qdev) | ||
1528 | #define QL_DUMP_XGMAC_CONTROL_REGS(qdev) | ||
1529 | #endif | ||
1530 | |||
1531 | #ifdef QL_STAT_DUMP | ||
1532 | extern void ql_dump_stat(struct ql_adapter *qdev); | ||
1533 | #define QL_DUMP_STAT(qdev) ql_dump_stat(qdev) | ||
1534 | #else | ||
1535 | #define QL_DUMP_STAT(qdev) | ||
1536 | #endif | ||
1537 | |||
1538 | #ifdef QL_DEV_DUMP | ||
1539 | extern void ql_dump_qdev(struct ql_adapter *qdev); | ||
1540 | #define QL_DUMP_QDEV(qdev) ql_dump_qdev(qdev) | ||
1541 | #else | ||
1542 | #define QL_DUMP_QDEV(qdev) | ||
1543 | #endif | ||
1544 | |||
1545 | #ifdef QL_CB_DUMP | ||
1546 | extern void ql_dump_wqicb(struct wqicb *wqicb); | ||
1547 | extern void ql_dump_tx_ring(struct tx_ring *tx_ring); | ||
1548 | extern void ql_dump_ricb(struct ricb *ricb); | ||
1549 | extern void ql_dump_cqicb(struct cqicb *cqicb); | ||
1550 | extern void ql_dump_rx_ring(struct rx_ring *rx_ring); | ||
1551 | extern void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id); | ||
1552 | #define QL_DUMP_RICB(ricb) ql_dump_ricb(ricb) | ||
1553 | #define QL_DUMP_WQICB(wqicb) ql_dump_wqicb(wqicb) | ||
1554 | #define QL_DUMP_TX_RING(tx_ring) ql_dump_tx_ring(tx_ring) | ||
1555 | #define QL_DUMP_CQICB(cqicb) ql_dump_cqicb(cqicb) | ||
1556 | #define QL_DUMP_RX_RING(rx_ring) ql_dump_rx_ring(rx_ring) | ||
1557 | #define QL_DUMP_HW_CB(qdev, size, bit, q_id) \ | ||
1558 | ql_dump_hw_cb(qdev, size, bit, q_id) | ||
1559 | #else | ||
1560 | #define QL_DUMP_RICB(ricb) | ||
1561 | #define QL_DUMP_WQICB(wqicb) | ||
1562 | #define QL_DUMP_TX_RING(tx_ring) | ||
1563 | #define QL_DUMP_CQICB(cqicb) | ||
1564 | #define QL_DUMP_RX_RING(rx_ring) | ||
1565 | #define QL_DUMP_HW_CB(qdev, size, bit, q_id) | ||
1566 | #endif | ||
1567 | |||
1568 | #ifdef QL_OB_DUMP | ||
1569 | extern void ql_dump_tx_desc(struct tx_buf_desc *tbd); | ||
1570 | extern void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb); | ||
1571 | extern void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp); | ||
1572 | #define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) ql_dump_ob_mac_iocb(ob_mac_iocb) | ||
1573 | #define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) ql_dump_ob_mac_rsp(ob_mac_rsp) | ||
1574 | #else | ||
1575 | #define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) | ||
1576 | #define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) | ||
1577 | #endif | ||
1578 | |||
1579 | #ifdef QL_IB_DUMP | ||
1580 | extern void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp); | ||
1581 | #define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) ql_dump_ib_mac_rsp(ib_mac_rsp) | ||
1582 | #else | ||
1583 | #define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) | ||
1584 | #endif | ||
1585 | |||
1586 | #ifdef QL_ALL_DUMP | ||
1587 | extern void ql_dump_all(struct ql_adapter *qdev); | ||
1588 | #define QL_DUMP_ALL(qdev) ql_dump_all(qdev) | ||
1589 | #else | ||
1590 | #define QL_DUMP_ALL(qdev) | ||
1591 | #endif | ||
1592 | |||
1593 | #endif /* _QLGE_H_ */ | ||
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c new file mode 100644 index 000000000000..f0392b166170 --- /dev/null +++ b/drivers/net/qlge/qlge_dbg.c | |||
@@ -0,0 +1,858 @@ | |||
1 | #include "qlge.h" | ||
2 | |||
3 | #ifdef QL_REG_DUMP | ||
4 | static void ql_dump_intr_states(struct ql_adapter *qdev) | ||
5 | { | ||
6 | int i; | ||
7 | u32 value; | ||
8 | for (i = 0; i < qdev->intr_count; i++) { | ||
9 | ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask); | ||
10 | value = ql_read32(qdev, INTR_EN); | ||
11 | printk(KERN_ERR PFX | ||
12 | "%s: Interrupt %d is %s.\n", | ||
13 | qdev->ndev->name, i, | ||
14 | (value & INTR_EN_EN ? "enabled" : "disabled")); | ||
15 | } | ||
16 | } | ||
17 | |||
18 | void ql_dump_xgmac_control_regs(struct ql_adapter *qdev) | ||
19 | { | ||
20 | u32 data; | ||
21 | if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) { | ||
22 | printk(KERN_ERR "%s: Couldn't get xgmac sem.\n", __func__); | ||
23 | return; | ||
24 | } | ||
25 | ql_read_xgmac_reg(qdev, PAUSE_SRC_LO, &data); | ||
26 | printk(KERN_ERR PFX "%s: PAUSE_SRC_LO = 0x%.08x.\n", qdev->ndev->name, | ||
27 | data); | ||
28 | ql_read_xgmac_reg(qdev, PAUSE_SRC_HI, &data); | ||
29 | printk(KERN_ERR PFX "%s: PAUSE_SRC_HI = 0x%.08x.\n", qdev->ndev->name, | ||
30 | data); | ||
31 | ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data); | ||
32 | printk(KERN_ERR PFX "%s: GLOBAL_CFG = 0x%.08x.\n", qdev->ndev->name, | ||
33 | data); | ||
34 | ql_read_xgmac_reg(qdev, TX_CFG, &data); | ||
35 | printk(KERN_ERR PFX "%s: TX_CFG = 0x%.08x.\n", qdev->ndev->name, data); | ||
36 | ql_read_xgmac_reg(qdev, RX_CFG, &data); | ||
37 | printk(KERN_ERR PFX "%s: RX_CFG = 0x%.08x.\n", qdev->ndev->name, data); | ||
38 | ql_read_xgmac_reg(qdev, FLOW_CTL, &data); | ||
39 | printk(KERN_ERR PFX "%s: FLOW_CTL = 0x%.08x.\n", qdev->ndev->name, | ||
40 | data); | ||
41 | ql_read_xgmac_reg(qdev, PAUSE_OPCODE, &data); | ||
42 | printk(KERN_ERR PFX "%s: PAUSE_OPCODE = 0x%.08x.\n", qdev->ndev->name, | ||
43 | data); | ||
44 | ql_read_xgmac_reg(qdev, PAUSE_TIMER, &data); | ||
45 | printk(KERN_ERR PFX "%s: PAUSE_TIMER = 0x%.08x.\n", qdev->ndev->name, | ||
46 | data); | ||
47 | ql_read_xgmac_reg(qdev, PAUSE_FRM_DEST_LO, &data); | ||
48 | printk(KERN_ERR PFX "%s: PAUSE_FRM_DEST_LO = 0x%.08x.\n", | ||
49 | qdev->ndev->name, data); | ||
50 | ql_read_xgmac_reg(qdev, PAUSE_FRM_DEST_HI, &data); | ||
51 | printk(KERN_ERR PFX "%s: PAUSE_FRM_DEST_HI = 0x%.08x.\n", | ||
52 | qdev->ndev->name, data); | ||
53 | ql_read_xgmac_reg(qdev, MAC_TX_PARAMS, &data); | ||
54 | printk(KERN_ERR PFX "%s: MAC_TX_PARAMS = 0x%.08x.\n", qdev->ndev->name, | ||
55 | data); | ||
56 | ql_read_xgmac_reg(qdev, MAC_RX_PARAMS, &data); | ||
57 | printk(KERN_ERR PFX "%s: MAC_RX_PARAMS = 0x%.08x.\n", qdev->ndev->name, | ||
58 | data); | ||
59 | ql_read_xgmac_reg(qdev, MAC_SYS_INT, &data); | ||
60 | printk(KERN_ERR PFX "%s: MAC_SYS_INT = 0x%.08x.\n", qdev->ndev->name, | ||
61 | data); | ||
62 | ql_read_xgmac_reg(qdev, MAC_SYS_INT_MASK, &data); | ||
63 | printk(KERN_ERR PFX "%s: MAC_SYS_INT_MASK = 0x%.08x.\n", | ||
64 | qdev->ndev->name, data); | ||
65 | ql_read_xgmac_reg(qdev, MAC_MGMT_INT, &data); | ||
66 | printk(KERN_ERR PFX "%s: MAC_MGMT_INT = 0x%.08x.\n", qdev->ndev->name, | ||
67 | data); | ||
68 | ql_read_xgmac_reg(qdev, MAC_MGMT_IN_MASK, &data); | ||
69 | printk(KERN_ERR PFX "%s: MAC_MGMT_IN_MASK = 0x%.08x.\n", | ||
70 | qdev->ndev->name, data); | ||
71 | ql_read_xgmac_reg(qdev, EXT_ARB_MODE, &data); | ||
72 | printk(KERN_ERR PFX "%s: EXT_ARB_MODE = 0x%.08x.\n", qdev->ndev->name, | ||
73 | data); | ||
74 | ql_sem_unlock(qdev, qdev->xg_sem_mask); | ||
75 | |||
76 | } | ||
77 | |||
78 | static void ql_dump_ets_regs(struct ql_adapter *qdev) | ||
79 | { | ||
80 | } | ||
81 | |||
82 | static void ql_dump_cam_entries(struct ql_adapter *qdev) | ||
83 | { | ||
84 | int i; | ||
85 | u32 value[3]; | ||
86 | for (i = 0; i < 4; i++) { | ||
87 | if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) { | ||
88 | printk(KERN_ERR PFX | ||
89 | "%s: Failed read of mac index register.\n", | ||
90 | __func__); | ||
91 | return; | ||
92 | } else { | ||
93 | if (value[0]) | ||
94 | printk(KERN_ERR PFX | ||
95 | "%s: CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x.\n", | ||
96 | qdev->ndev->name, i, value[1], value[0], | ||
97 | value[2]); | ||
98 | } | ||
99 | } | ||
100 | for (i = 0; i < 32; i++) { | ||
101 | if (ql_get_mac_addr_reg | ||
102 | (qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value)) { | ||
103 | printk(KERN_ERR PFX | ||
104 | "%s: Failed read of mac index register.\n", | ||
105 | __func__); | ||
106 | return; | ||
107 | } else { | ||
108 | if (value[0]) | ||
109 | printk(KERN_ERR PFX | ||
110 | "%s: MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x.\n", | ||
111 | qdev->ndev->name, i, value[1], value[0]); | ||
112 | } | ||
113 | } | ||
114 | } | ||
115 | |||
116 | void ql_dump_routing_entries(struct ql_adapter *qdev) | ||
117 | { | ||
118 | int i; | ||
119 | u32 value; | ||
120 | for (i = 0; i < 16; i++) { | ||
121 | value = 0; | ||
122 | if (ql_get_routing_reg(qdev, i, &value)) { | ||
123 | printk(KERN_ERR PFX | ||
124 | "%s: Failed read of routing index register.\n", | ||
125 | __func__); | ||
126 | return; | ||
127 | } else { | ||
128 | if (value) | ||
129 | printk(KERN_ERR PFX | ||
130 | "%s: Routing Mask %d = 0x%.08x.\n", | ||
131 | qdev->ndev->name, i, value); | ||
132 | } | ||
133 | } | ||
134 | } | ||
135 | |||
136 | void ql_dump_regs(struct ql_adapter *qdev) | ||
137 | { | ||
138 | printk(KERN_ERR PFX "reg dump for function #%d.\n", qdev->func); | ||
139 | printk(KERN_ERR PFX "SYS = 0x%x.\n", | ||
140 | ql_read32(qdev, SYS)); | ||
141 | printk(KERN_ERR PFX "RST_FO = 0x%x.\n", | ||
142 | ql_read32(qdev, RST_FO)); | ||
143 | printk(KERN_ERR PFX "FSC = 0x%x.\n", | ||
144 | ql_read32(qdev, FSC)); | ||
145 | printk(KERN_ERR PFX "CSR = 0x%x.\n", | ||
146 | ql_read32(qdev, CSR)); | ||
147 | printk(KERN_ERR PFX "ICB_RID = 0x%x.\n", | ||
148 | ql_read32(qdev, ICB_RID)); | ||
149 | printk(KERN_ERR PFX "ICB_L = 0x%x.\n", | ||
150 | ql_read32(qdev, ICB_L)); | ||
151 | printk(KERN_ERR PFX "ICB_H = 0x%x.\n", | ||
152 | ql_read32(qdev, ICB_H)); | ||
153 | printk(KERN_ERR PFX "CFG = 0x%x.\n", | ||
154 | ql_read32(qdev, CFG)); | ||
155 | printk(KERN_ERR PFX "BIOS_ADDR = 0x%x.\n", | ||
156 | ql_read32(qdev, BIOS_ADDR)); | ||
157 | printk(KERN_ERR PFX "STS = 0x%x.\n", | ||
158 | ql_read32(qdev, STS)); | ||
159 | printk(KERN_ERR PFX "INTR_EN = 0x%x.\n", | ||
160 | ql_read32(qdev, INTR_EN)); | ||
161 | printk(KERN_ERR PFX "INTR_MASK = 0x%x.\n", | ||
162 | ql_read32(qdev, INTR_MASK)); | ||
163 | printk(KERN_ERR PFX "ISR1 = 0x%x.\n", | ||
164 | ql_read32(qdev, ISR1)); | ||
165 | printk(KERN_ERR PFX "ISR2 = 0x%x.\n", | ||
166 | ql_read32(qdev, ISR2)); | ||
167 | printk(KERN_ERR PFX "ISR3 = 0x%x.\n", | ||
168 | ql_read32(qdev, ISR3)); | ||
169 | printk(KERN_ERR PFX "ISR4 = 0x%x.\n", | ||
170 | ql_read32(qdev, ISR4)); | ||
171 | printk(KERN_ERR PFX "REV_ID = 0x%x.\n", | ||
172 | ql_read32(qdev, REV_ID)); | ||
173 | printk(KERN_ERR PFX "FRC_ECC_ERR = 0x%x.\n", | ||
174 | ql_read32(qdev, FRC_ECC_ERR)); | ||
175 | printk(KERN_ERR PFX "ERR_STS = 0x%x.\n", | ||
176 | ql_read32(qdev, ERR_STS)); | ||
177 | printk(KERN_ERR PFX "RAM_DBG_ADDR = 0x%x.\n", | ||
178 | ql_read32(qdev, RAM_DBG_ADDR)); | ||
179 | printk(KERN_ERR PFX "RAM_DBG_DATA = 0x%x.\n", | ||
180 | ql_read32(qdev, RAM_DBG_DATA)); | ||
181 | printk(KERN_ERR PFX "ECC_ERR_CNT = 0x%x.\n", | ||
182 | ql_read32(qdev, ECC_ERR_CNT)); | ||
183 | printk(KERN_ERR PFX "SEM = 0x%x.\n", | ||
184 | ql_read32(qdev, SEM)); | ||
185 | printk(KERN_ERR PFX "GPIO_1 = 0x%x.\n", | ||
186 | ql_read32(qdev, GPIO_1)); | ||
187 | printk(KERN_ERR PFX "GPIO_2 = 0x%x.\n", | ||
188 | ql_read32(qdev, GPIO_2)); | ||
189 | printk(KERN_ERR PFX "GPIO_3 = 0x%x.\n", | ||
190 | ql_read32(qdev, GPIO_3)); | ||
191 | printk(KERN_ERR PFX "XGMAC_ADDR = 0x%x.\n", | ||
192 | ql_read32(qdev, XGMAC_ADDR)); | ||
193 | printk(KERN_ERR PFX "XGMAC_DATA = 0x%x.\n", | ||
194 | ql_read32(qdev, XGMAC_DATA)); | ||
195 | printk(KERN_ERR PFX "NIC_ETS = 0x%x.\n", | ||
196 | ql_read32(qdev, NIC_ETS)); | ||
197 | printk(KERN_ERR PFX "CNA_ETS = 0x%x.\n", | ||
198 | ql_read32(qdev, CNA_ETS)); | ||
199 | printk(KERN_ERR PFX "FLASH_ADDR = 0x%x.\n", | ||
200 | ql_read32(qdev, FLASH_ADDR)); | ||
201 | printk(KERN_ERR PFX "FLASH_DATA = 0x%x.\n", | ||
202 | ql_read32(qdev, FLASH_DATA)); | ||
203 | printk(KERN_ERR PFX "CQ_STOP = 0x%x.\n", | ||
204 | ql_read32(qdev, CQ_STOP)); | ||
205 | printk(KERN_ERR PFX "PAGE_TBL_RID = 0x%x.\n", | ||
206 | ql_read32(qdev, PAGE_TBL_RID)); | ||
207 | printk(KERN_ERR PFX "WQ_PAGE_TBL_LO = 0x%x.\n", | ||
208 | ql_read32(qdev, WQ_PAGE_TBL_LO)); | ||
209 | printk(KERN_ERR PFX "WQ_PAGE_TBL_HI = 0x%x.\n", | ||
210 | ql_read32(qdev, WQ_PAGE_TBL_HI)); | ||
211 | printk(KERN_ERR PFX "CQ_PAGE_TBL_LO = 0x%x.\n", | ||
212 | ql_read32(qdev, CQ_PAGE_TBL_LO)); | ||
213 | printk(KERN_ERR PFX "CQ_PAGE_TBL_HI = 0x%x.\n", | ||
214 | ql_read32(qdev, CQ_PAGE_TBL_HI)); | ||
215 | printk(KERN_ERR PFX "COS_DFLT_CQ1 = 0x%x.\n", | ||
216 | ql_read32(qdev, COS_DFLT_CQ1)); | ||
217 | printk(KERN_ERR PFX "COS_DFLT_CQ2 = 0x%x.\n", | ||
218 | ql_read32(qdev, COS_DFLT_CQ2)); | ||
219 | printk(KERN_ERR PFX "SPLT_HDR = 0x%x.\n", | ||
220 | ql_read32(qdev, SPLT_HDR)); | ||
221 | printk(KERN_ERR PFX "FC_PAUSE_THRES = 0x%x.\n", | ||
222 | ql_read32(qdev, FC_PAUSE_THRES)); | ||
223 | printk(KERN_ERR PFX "NIC_PAUSE_THRES = 0x%x.\n", | ||
224 | ql_read32(qdev, NIC_PAUSE_THRES)); | ||
225 | printk(KERN_ERR PFX "FC_ETHERTYPE = 0x%x.\n", | ||
226 | ql_read32(qdev, FC_ETHERTYPE)); | ||
227 | printk(KERN_ERR PFX "FC_RCV_CFG = 0x%x.\n", | ||
228 | ql_read32(qdev, FC_RCV_CFG)); | ||
229 | printk(KERN_ERR PFX "NIC_RCV_CFG = 0x%x.\n", | ||
230 | ql_read32(qdev, NIC_RCV_CFG)); | ||
231 | printk(KERN_ERR PFX "FC_COS_TAGS = 0x%x.\n", | ||
232 | ql_read32(qdev, FC_COS_TAGS)); | ||
233 | printk(KERN_ERR PFX "NIC_COS_TAGS = 0x%x.\n", | ||
234 | ql_read32(qdev, NIC_COS_TAGS)); | ||
235 | printk(KERN_ERR PFX "MGMT_RCV_CFG = 0x%x.\n", | ||
236 | ql_read32(qdev, MGMT_RCV_CFG)); | ||
237 | printk(KERN_ERR PFX "XG_SERDES_ADDR = 0x%x.\n", | ||
238 | ql_read32(qdev, XG_SERDES_ADDR)); | ||
239 | printk(KERN_ERR PFX "XG_SERDES_DATA = 0x%x.\n", | ||
240 | ql_read32(qdev, XG_SERDES_DATA)); | ||
241 | printk(KERN_ERR PFX "PRB_MX_ADDR = 0x%x.\n", | ||
242 | ql_read32(qdev, PRB_MX_ADDR)); | ||
243 | printk(KERN_ERR PFX "PRB_MX_DATA = 0x%x.\n", | ||
244 | ql_read32(qdev, PRB_MX_DATA)); | ||
245 | ql_dump_intr_states(qdev); | ||
246 | ql_dump_xgmac_control_regs(qdev); | ||
247 | ql_dump_ets_regs(qdev); | ||
248 | ql_dump_cam_entries(qdev); | ||
249 | ql_dump_routing_entries(qdev); | ||
250 | } | ||
251 | #endif | ||
252 | |||
253 | #ifdef QL_STAT_DUMP | ||
254 | void ql_dump_stat(struct ql_adapter *qdev) | ||
255 | { | ||
256 | printk(KERN_ERR "%s: Enter.\n", __func__); | ||
257 | printk(KERN_ERR "tx_pkts = %ld\n", | ||
258 | (unsigned long)qdev->nic_stats.tx_pkts); | ||
259 | printk(KERN_ERR "tx_bytes = %ld\n", | ||
260 | (unsigned long)qdev->nic_stats.tx_bytes); | ||
261 | printk(KERN_ERR "tx_mcast_pkts = %ld.\n", | ||
262 | (unsigned long)qdev->nic_stats.tx_mcast_pkts); | ||
263 | printk(KERN_ERR "tx_bcast_pkts = %ld.\n", | ||
264 | (unsigned long)qdev->nic_stats.tx_bcast_pkts); | ||
265 | printk(KERN_ERR "tx_ucast_pkts = %ld.\n", | ||
266 | (unsigned long)qdev->nic_stats.tx_ucast_pkts); | ||
267 | printk(KERN_ERR "tx_ctl_pkts = %ld.\n", | ||
268 | (unsigned long)qdev->nic_stats.tx_ctl_pkts); | ||
269 | printk(KERN_ERR "tx_pause_pkts = %ld.\n", | ||
270 | (unsigned long)qdev->nic_stats.tx_pause_pkts); | ||
271 | printk(KERN_ERR "tx_64_pkt = %ld.\n", | ||
272 | (unsigned long)qdev->nic_stats.tx_64_pkt); | ||
273 | printk(KERN_ERR "tx_65_to_127_pkt = %ld.\n", | ||
274 | (unsigned long)qdev->nic_stats.tx_65_to_127_pkt); | ||
275 | printk(KERN_ERR "tx_128_to_255_pkt = %ld.\n", | ||
276 | (unsigned long)qdev->nic_stats.tx_128_to_255_pkt); | ||
277 | printk(KERN_ERR "tx_256_511_pkt = %ld.\n", | ||
278 | (unsigned long)qdev->nic_stats.tx_256_511_pkt); | ||
279 | printk(KERN_ERR "tx_512_to_1023_pkt = %ld.\n", | ||
280 | (unsigned long)qdev->nic_stats.tx_512_to_1023_pkt); | ||
281 | printk(KERN_ERR "tx_1024_to_1518_pkt = %ld.\n", | ||
282 | (unsigned long)qdev->nic_stats.tx_1024_to_1518_pkt); | ||
283 | printk(KERN_ERR "tx_1519_to_max_pkt = %ld.\n", | ||
284 | (unsigned long)qdev->nic_stats.tx_1519_to_max_pkt); | ||
285 | printk(KERN_ERR "tx_undersize_pkt = %ld.\n", | ||
286 | (unsigned long)qdev->nic_stats.tx_undersize_pkt); | ||
287 | printk(KERN_ERR "tx_oversize_pkt = %ld.\n", | ||
288 | (unsigned long)qdev->nic_stats.tx_oversize_pkt); | ||
289 | printk(KERN_ERR "rx_bytes = %ld.\n", | ||
290 | (unsigned long)qdev->nic_stats.rx_bytes); | ||
291 | printk(KERN_ERR "rx_bytes_ok = %ld.\n", | ||
292 | (unsigned long)qdev->nic_stats.rx_bytes_ok); | ||
293 | printk(KERN_ERR "rx_pkts = %ld.\n", | ||
294 | (unsigned long)qdev->nic_stats.rx_pkts); | ||
295 | printk(KERN_ERR "rx_pkts_ok = %ld.\n", | ||
296 | (unsigned long)qdev->nic_stats.rx_pkts_ok); | ||
297 | printk(KERN_ERR "rx_bcast_pkts = %ld.\n", | ||
298 | (unsigned long)qdev->nic_stats.rx_bcast_pkts); | ||
299 | printk(KERN_ERR "rx_mcast_pkts = %ld.\n", | ||
300 | (unsigned long)qdev->nic_stats.rx_mcast_pkts); | ||
301 | printk(KERN_ERR "rx_ucast_pkts = %ld.\n", | ||
302 | (unsigned long)qdev->nic_stats.rx_ucast_pkts); | ||
303 | printk(KERN_ERR "rx_undersize_pkts = %ld.\n", | ||
304 | (unsigned long)qdev->nic_stats.rx_undersize_pkts); | ||
305 | printk(KERN_ERR "rx_oversize_pkts = %ld.\n", | ||
306 | (unsigned long)qdev->nic_stats.rx_oversize_pkts); | ||
307 | printk(KERN_ERR "rx_jabber_pkts = %ld.\n", | ||
308 | (unsigned long)qdev->nic_stats.rx_jabber_pkts); | ||
309 | printk(KERN_ERR "rx_undersize_fcerr_pkts = %ld.\n", | ||
310 | (unsigned long)qdev->nic_stats.rx_undersize_fcerr_pkts); | ||
311 | printk(KERN_ERR "rx_drop_events = %ld.\n", | ||
312 | (unsigned long)qdev->nic_stats.rx_drop_events); | ||
313 | printk(KERN_ERR "rx_fcerr_pkts = %ld.\n", | ||
314 | (unsigned long)qdev->nic_stats.rx_fcerr_pkts); | ||
315 | printk(KERN_ERR "rx_align_err = %ld.\n", | ||
316 | (unsigned long)qdev->nic_stats.rx_align_err); | ||
317 | printk(KERN_ERR "rx_symbol_err = %ld.\n", | ||
318 | (unsigned long)qdev->nic_stats.rx_symbol_err); | ||
319 | printk(KERN_ERR "rx_mac_err = %ld.\n", | ||
320 | (unsigned long)qdev->nic_stats.rx_mac_err); | ||
321 | printk(KERN_ERR "rx_ctl_pkts = %ld.\n", | ||
322 | (unsigned long)qdev->nic_stats.rx_ctl_pkts); | ||
323 | printk(KERN_ERR "rx_pause_pkts = %ld.\n", | ||
324 | (unsigned long)qdev->nic_stats.rx_pause_pkts); | ||
325 | printk(KERN_ERR "rx_64_pkts = %ld.\n", | ||
326 | (unsigned long)qdev->nic_stats.rx_64_pkts); | ||
327 | printk(KERN_ERR "rx_65_to_127_pkts = %ld.\n", | ||
328 | (unsigned long)qdev->nic_stats.rx_65_to_127_pkts); | ||
329 | printk(KERN_ERR "rx_128_255_pkts = %ld.\n", | ||
330 | (unsigned long)qdev->nic_stats.rx_128_255_pkts); | ||
331 | printk(KERN_ERR "rx_256_511_pkts = %ld.\n", | ||
332 | (unsigned long)qdev->nic_stats.rx_256_511_pkts); | ||
333 | printk(KERN_ERR "rx_512_to_1023_pkts = %ld.\n", | ||
334 | (unsigned long)qdev->nic_stats.rx_512_to_1023_pkts); | ||
335 | printk(KERN_ERR "rx_1024_to_1518_pkts = %ld.\n", | ||
336 | (unsigned long)qdev->nic_stats.rx_1024_to_1518_pkts); | ||
337 | printk(KERN_ERR "rx_1519_to_max_pkts = %ld.\n", | ||
338 | (unsigned long)qdev->nic_stats.rx_1519_to_max_pkts); | ||
339 | printk(KERN_ERR "rx_len_err_pkts = %ld.\n", | ||
340 | (unsigned long)qdev->nic_stats.rx_len_err_pkts); | ||
341 | }; | ||
342 | #endif | ||
343 | |||
344 | #ifdef QL_DEV_DUMP | ||
345 | void ql_dump_qdev(struct ql_adapter *qdev) | ||
346 | { | ||
347 | int i; | ||
348 | printk(KERN_ERR PFX "qdev->flags = %lx.\n", | ||
349 | qdev->flags); | ||
350 | printk(KERN_ERR PFX "qdev->vlgrp = %p.\n", | ||
351 | qdev->vlgrp); | ||
352 | printk(KERN_ERR PFX "qdev->pdev = %p.\n", | ||
353 | qdev->pdev); | ||
354 | printk(KERN_ERR PFX "qdev->ndev = %p.\n", | ||
355 | qdev->ndev); | ||
356 | printk(KERN_ERR PFX "qdev->chip_rev_id = %d.\n", | ||
357 | qdev->chip_rev_id); | ||
358 | printk(KERN_ERR PFX "qdev->reg_base = %p.\n", | ||
359 | qdev->reg_base); | ||
360 | printk(KERN_ERR PFX "qdev->doorbell_area = %p.\n", | ||
361 | qdev->doorbell_area); | ||
362 | printk(KERN_ERR PFX "qdev->doorbell_area_size = %d.\n", | ||
363 | qdev->doorbell_area_size); | ||
364 | printk(KERN_ERR PFX "msg_enable = %x.\n", | ||
365 | qdev->msg_enable); | ||
366 | printk(KERN_ERR PFX "qdev->rx_ring_shadow_reg_area = %p.\n", | ||
367 | qdev->rx_ring_shadow_reg_area); | ||
368 | printk(KERN_ERR PFX "qdev->rx_ring_shadow_reg_dma = %p.\n", | ||
369 | (void *)qdev->rx_ring_shadow_reg_dma); | ||
370 | printk(KERN_ERR PFX "qdev->tx_ring_shadow_reg_area = %p.\n", | ||
371 | qdev->tx_ring_shadow_reg_area); | ||
372 | printk(KERN_ERR PFX "qdev->tx_ring_shadow_reg_dma = %p.\n", | ||
373 | (void *)qdev->tx_ring_shadow_reg_dma); | ||
374 | printk(KERN_ERR PFX "qdev->intr_count = %d.\n", | ||
375 | qdev->intr_count); | ||
376 | if (qdev->msi_x_entry) | ||
377 | for (i = 0; i < qdev->intr_count; i++) { | ||
378 | printk(KERN_ERR PFX | ||
379 | "msi_x_entry.[%d]vector = %d.\n", i, | ||
380 | qdev->msi_x_entry[i].vector); | ||
381 | printk(KERN_ERR PFX | ||
382 | "msi_x_entry.[%d]entry = %d.\n", i, | ||
383 | qdev->msi_x_entry[i].entry); | ||
384 | } | ||
385 | for (i = 0; i < qdev->intr_count; i++) { | ||
386 | printk(KERN_ERR PFX | ||
387 | "intr_context[%d].qdev = %p.\n", i, | ||
388 | qdev->intr_context[i].qdev); | ||
389 | printk(KERN_ERR PFX | ||
390 | "intr_context[%d].intr = %d.\n", i, | ||
391 | qdev->intr_context[i].intr); | ||
392 | printk(KERN_ERR PFX | ||
393 | "intr_context[%d].hooked = %d.\n", i, | ||
394 | qdev->intr_context[i].hooked); | ||
395 | printk(KERN_ERR PFX | ||
396 | "intr_context[%d].intr_en_mask = 0x%08x.\n", i, | ||
397 | qdev->intr_context[i].intr_en_mask); | ||
398 | printk(KERN_ERR PFX | ||
399 | "intr_context[%d].intr_dis_mask = 0x%08x.\n", i, | ||
400 | qdev->intr_context[i].intr_dis_mask); | ||
401 | printk(KERN_ERR PFX | ||
402 | "intr_context[%d].intr_read_mask = 0x%08x.\n", i, | ||
403 | qdev->intr_context[i].intr_read_mask); | ||
404 | } | ||
405 | printk(KERN_ERR PFX "qdev->tx_ring_count = %d.\n", qdev->tx_ring_count); | ||
406 | printk(KERN_ERR PFX "qdev->rx_ring_count = %d.\n", qdev->rx_ring_count); | ||
407 | printk(KERN_ERR PFX "qdev->ring_mem_size = %d.\n", qdev->ring_mem_size); | ||
408 | printk(KERN_ERR PFX "qdev->ring_mem = %p.\n", qdev->ring_mem); | ||
409 | printk(KERN_ERR PFX "qdev->intr_count = %d.\n", qdev->intr_count); | ||
410 | printk(KERN_ERR PFX "qdev->tx_ring = %p.\n", | ||
411 | qdev->tx_ring); | ||
412 | printk(KERN_ERR PFX "qdev->rss_ring_first_cq_id = %d.\n", | ||
413 | qdev->rss_ring_first_cq_id); | ||
414 | printk(KERN_ERR PFX "qdev->rss_ring_count = %d.\n", | ||
415 | qdev->rss_ring_count); | ||
416 | printk(KERN_ERR PFX "qdev->rx_ring = %p.\n", qdev->rx_ring); | ||
417 | printk(KERN_ERR PFX "qdev->default_rx_queue = %d.\n", | ||
418 | qdev->default_rx_queue); | ||
419 | printk(KERN_ERR PFX "qdev->xg_sem_mask = 0x%08x.\n", | ||
420 | qdev->xg_sem_mask); | ||
421 | printk(KERN_ERR PFX "qdev->port_link_up = 0x%08x.\n", | ||
422 | qdev->port_link_up); | ||
423 | printk(KERN_ERR PFX "qdev->port_init = 0x%08x.\n", | ||
424 | qdev->port_init); | ||
425 | |||
426 | } | ||
427 | #endif | ||
428 | |||
429 | #ifdef QL_CB_DUMP | ||
430 | void ql_dump_wqicb(struct wqicb *wqicb) | ||
431 | { | ||
432 | printk(KERN_ERR PFX "Dumping wqicb stuff...\n"); | ||
433 | printk(KERN_ERR PFX "wqicb->len = 0x%x.\n", le16_to_cpu(wqicb->len)); | ||
434 | printk(KERN_ERR PFX "wqicb->flags = %x.\n", le16_to_cpu(wqicb->flags)); | ||
435 | printk(KERN_ERR PFX "wqicb->cq_id_rss = %d.\n", | ||
436 | le16_to_cpu(wqicb->cq_id_rss)); | ||
437 | printk(KERN_ERR PFX "wqicb->rid = 0x%x.\n", le16_to_cpu(wqicb->rid)); | ||
438 | printk(KERN_ERR PFX "wqicb->wq_addr_lo = 0x%.08x.\n", | ||
439 | le32_to_cpu(wqicb->addr_lo)); | ||
440 | printk(KERN_ERR PFX "wqicb->wq_addr_hi = 0x%.08x.\n", | ||
441 | le32_to_cpu(wqicb->addr_hi)); | ||
442 | printk(KERN_ERR PFX "wqicb->wq_cnsmr_idx_addr_lo = 0x%.08x.\n", | ||
443 | le32_to_cpu(wqicb->cnsmr_idx_addr_lo)); | ||
444 | printk(KERN_ERR PFX "wqicb->wq_cnsmr_idx_addr_hi = 0x%.08x.\n", | ||
445 | le32_to_cpu(wqicb->cnsmr_idx_addr_hi)); | ||
446 | } | ||
447 | |||
448 | void ql_dump_tx_ring(struct tx_ring *tx_ring) | ||
449 | { | ||
450 | if (tx_ring == NULL) | ||
451 | return; | ||
452 | printk(KERN_ERR PFX | ||
453 | "===================== Dumping tx_ring %d ===============.\n", | ||
454 | tx_ring->wq_id); | ||
455 | printk(KERN_ERR PFX "tx_ring->base = %p.\n", tx_ring->wq_base); | ||
456 | printk(KERN_ERR PFX "tx_ring->base_dma = 0x%llx.\n", | ||
457 | (u64) tx_ring->wq_base_dma); | ||
458 | printk(KERN_ERR PFX "tx_ring->cnsmr_idx_sh_reg = %p.\n", | ||
459 | tx_ring->cnsmr_idx_sh_reg); | ||
460 | printk(KERN_ERR PFX "tx_ring->cnsmr_idx_sh_reg_dma = 0x%llx.\n", | ||
461 | (u64) tx_ring->cnsmr_idx_sh_reg_dma); | ||
462 | printk(KERN_ERR PFX "tx_ring->size = %d.\n", tx_ring->wq_size); | ||
463 | printk(KERN_ERR PFX "tx_ring->len = %d.\n", tx_ring->wq_len); | ||
464 | printk(KERN_ERR PFX "tx_ring->prod_idx_db_reg = %p.\n", | ||
465 | tx_ring->prod_idx_db_reg); | ||
466 | printk(KERN_ERR PFX "tx_ring->valid_db_reg = %p.\n", | ||
467 | tx_ring->valid_db_reg); | ||
468 | printk(KERN_ERR PFX "tx_ring->prod_idx = %d.\n", tx_ring->prod_idx); | ||
469 | printk(KERN_ERR PFX "tx_ring->cq_id = %d.\n", tx_ring->cq_id); | ||
470 | printk(KERN_ERR PFX "tx_ring->wq_id = %d.\n", tx_ring->wq_id); | ||
471 | printk(KERN_ERR PFX "tx_ring->q = %p.\n", tx_ring->q); | ||
472 | printk(KERN_ERR PFX "tx_ring->tx_count = %d.\n", | ||
473 | atomic_read(&tx_ring->tx_count)); | ||
474 | } | ||
475 | |||
476 | void ql_dump_ricb(struct ricb *ricb) | ||
477 | { | ||
478 | int i; | ||
479 | printk(KERN_ERR PFX | ||
480 | "===================== Dumping ricb ===============.\n"); | ||
481 | printk(KERN_ERR PFX "Dumping ricb stuff...\n"); | ||
482 | |||
483 | printk(KERN_ERR PFX "ricb->base_cq = %d.\n", ricb->base_cq & 0x1f); | ||
484 | printk(KERN_ERR PFX "ricb->flags = %s%s%s%s%s%s%s%s%s.\n", | ||
485 | ricb->base_cq & RSS_L4K ? "RSS_L4K " : "", | ||
486 | ricb->flags & RSS_L6K ? "RSS_L6K " : "", | ||
487 | ricb->flags & RSS_LI ? "RSS_LI " : "", | ||
488 | ricb->flags & RSS_LB ? "RSS_LB " : "", | ||
489 | ricb->flags & RSS_LM ? "RSS_LM " : "", | ||
490 | ricb->flags & RSS_RI4 ? "RSS_RI4 " : "", | ||
491 | ricb->flags & RSS_RT4 ? "RSS_RT4 " : "", | ||
492 | ricb->flags & RSS_RI6 ? "RSS_RI6 " : "", | ||
493 | ricb->flags & RSS_RT6 ? "RSS_RT6 " : ""); | ||
494 | printk(KERN_ERR PFX "ricb->mask = 0x%.04x.\n", le16_to_cpu(ricb->mask)); | ||
495 | for (i = 0; i < 16; i++) | ||
496 | printk(KERN_ERR PFX "ricb->hash_cq_id[%d] = 0x%.08x.\n", i, | ||
497 | le32_to_cpu(ricb->hash_cq_id[i])); | ||
498 | for (i = 0; i < 10; i++) | ||
499 | printk(KERN_ERR PFX "ricb->ipv6_hash_key[%d] = 0x%.08x.\n", i, | ||
500 | le32_to_cpu(ricb->ipv6_hash_key[i])); | ||
501 | for (i = 0; i < 4; i++) | ||
502 | printk(KERN_ERR PFX "ricb->ipv4_hash_key[%d] = 0x%.08x.\n", i, | ||
503 | le32_to_cpu(ricb->ipv4_hash_key[i])); | ||
504 | } | ||
505 | |||
506 | void ql_dump_cqicb(struct cqicb *cqicb) | ||
507 | { | ||
508 | printk(KERN_ERR PFX "Dumping cqicb stuff...\n"); | ||
509 | |||
510 | printk(KERN_ERR PFX "cqicb->msix_vect = %d.\n", cqicb->msix_vect); | ||
511 | printk(KERN_ERR PFX "cqicb->flags = %x.\n", cqicb->flags); | ||
512 | printk(KERN_ERR PFX "cqicb->len = %d.\n", le16_to_cpu(cqicb->len)); | ||
513 | printk(KERN_ERR PFX "cqicb->addr_lo = %x.\n", | ||
514 | le32_to_cpu(cqicb->addr_lo)); | ||
515 | printk(KERN_ERR PFX "cqicb->addr_hi = %x.\n", | ||
516 | le32_to_cpu(cqicb->addr_hi)); | ||
517 | printk(KERN_ERR PFX "cqicb->prod_idx_addr_lo = %x.\n", | ||
518 | le32_to_cpu(cqicb->prod_idx_addr_lo)); | ||
519 | printk(KERN_ERR PFX "cqicb->prod_idx_addr_hi = %x.\n", | ||
520 | le32_to_cpu(cqicb->prod_idx_addr_hi)); | ||
521 | printk(KERN_ERR PFX "cqicb->pkt_delay = 0x%.04x.\n", | ||
522 | le16_to_cpu(cqicb->pkt_delay)); | ||
523 | printk(KERN_ERR PFX "cqicb->irq_delay = 0x%.04x.\n", | ||
524 | le16_to_cpu(cqicb->irq_delay)); | ||
525 | printk(KERN_ERR PFX "cqicb->lbq_addr_lo = %x.\n", | ||
526 | le32_to_cpu(cqicb->lbq_addr_lo)); | ||
527 | printk(KERN_ERR PFX "cqicb->lbq_addr_hi = %x.\n", | ||
528 | le32_to_cpu(cqicb->lbq_addr_hi)); | ||
529 | printk(KERN_ERR PFX "cqicb->lbq_buf_size = 0x%.04x.\n", | ||
530 | le16_to_cpu(cqicb->lbq_buf_size)); | ||
531 | printk(KERN_ERR PFX "cqicb->lbq_len = 0x%.04x.\n", | ||
532 | le16_to_cpu(cqicb->lbq_len)); | ||
533 | printk(KERN_ERR PFX "cqicb->sbq_addr_lo = %x.\n", | ||
534 | le32_to_cpu(cqicb->sbq_addr_lo)); | ||
535 | printk(KERN_ERR PFX "cqicb->sbq_addr_hi = %x.\n", | ||
536 | le32_to_cpu(cqicb->sbq_addr_hi)); | ||
537 | printk(KERN_ERR PFX "cqicb->sbq_buf_size = 0x%.04x.\n", | ||
538 | le16_to_cpu(cqicb->sbq_buf_size)); | ||
539 | printk(KERN_ERR PFX "cqicb->sbq_len = 0x%.04x.\n", | ||
540 | le16_to_cpu(cqicb->sbq_len)); | ||
541 | } | ||
542 | |||
543 | void ql_dump_rx_ring(struct rx_ring *rx_ring) | ||
544 | { | ||
545 | if (rx_ring == NULL) | ||
546 | return; | ||
547 | printk(KERN_ERR PFX | ||
548 | "===================== Dumping rx_ring %d ===============.\n", | ||
549 | rx_ring->cq_id); | ||
550 | printk(KERN_ERR PFX "Dumping rx_ring %d, type = %s%s%s.\n", | ||
551 | rx_ring->cq_id, rx_ring->type == DEFAULT_Q ? "DEFAULT" : "", | ||
552 | rx_ring->type == TX_Q ? "OUTBOUND COMPLETIONS" : "", | ||
553 | rx_ring->type == RX_Q ? "INBOUND_COMPLETIONS" : ""); | ||
554 | printk(KERN_ERR PFX "rx_ring->cqicb = %p.\n", &rx_ring->cqicb); | ||
555 | printk(KERN_ERR PFX "rx_ring->cq_base = %p.\n", rx_ring->cq_base); | ||
556 | printk(KERN_ERR PFX "rx_ring->cq_base_dma = %llx.\n", | ||
557 | (u64) rx_ring->cq_base_dma); | ||
558 | printk(KERN_ERR PFX "rx_ring->cq_size = %d.\n", rx_ring->cq_size); | ||
559 | printk(KERN_ERR PFX "rx_ring->cq_len = %d.\n", rx_ring->cq_len); | ||
560 | printk(KERN_ERR PFX | ||
561 | "rx_ring->prod_idx_sh_reg, addr = %p, value = %d.\n", | ||
562 | rx_ring->prod_idx_sh_reg, | ||
563 | rx_ring->prod_idx_sh_reg ? *(rx_ring->prod_idx_sh_reg) : 0); | ||
564 | printk(KERN_ERR PFX "rx_ring->prod_idx_sh_reg_dma = %llx.\n", | ||
565 | (u64) rx_ring->prod_idx_sh_reg_dma); | ||
566 | printk(KERN_ERR PFX "rx_ring->cnsmr_idx_db_reg = %p.\n", | ||
567 | rx_ring->cnsmr_idx_db_reg); | ||
568 | printk(KERN_ERR PFX "rx_ring->cnsmr_idx = %d.\n", rx_ring->cnsmr_idx); | ||
569 | printk(KERN_ERR PFX "rx_ring->curr_entry = %p.\n", rx_ring->curr_entry); | ||
570 | printk(KERN_ERR PFX "rx_ring->valid_db_reg = %p.\n", | ||
571 | rx_ring->valid_db_reg); | ||
572 | |||
573 | printk(KERN_ERR PFX "rx_ring->lbq_base = %p.\n", rx_ring->lbq_base); | ||
574 | printk(KERN_ERR PFX "rx_ring->lbq_base_dma = %llx.\n", | ||
575 | (u64) rx_ring->lbq_base_dma); | ||
576 | printk(KERN_ERR PFX "rx_ring->lbq_base_indirect = %p.\n", | ||
577 | rx_ring->lbq_base_indirect); | ||
578 | printk(KERN_ERR PFX "rx_ring->lbq_base_indirect_dma = %llx.\n", | ||
579 | (u64) rx_ring->lbq_base_indirect_dma); | ||
580 | printk(KERN_ERR PFX "rx_ring->lbq = %p.\n", rx_ring->lbq); | ||
581 | printk(KERN_ERR PFX "rx_ring->lbq_len = %d.\n", rx_ring->lbq_len); | ||
582 | printk(KERN_ERR PFX "rx_ring->lbq_size = %d.\n", rx_ring->lbq_size); | ||
583 | printk(KERN_ERR PFX "rx_ring->lbq_prod_idx_db_reg = %p.\n", | ||
584 | rx_ring->lbq_prod_idx_db_reg); | ||
585 | printk(KERN_ERR PFX "rx_ring->lbq_prod_idx = %d.\n", | ||
586 | rx_ring->lbq_prod_idx); | ||
587 | printk(KERN_ERR PFX "rx_ring->lbq_curr_idx = %d.\n", | ||
588 | rx_ring->lbq_curr_idx); | ||
589 | printk(KERN_ERR PFX "rx_ring->lbq_clean_idx = %d.\n", | ||
590 | rx_ring->lbq_clean_idx); | ||
591 | printk(KERN_ERR PFX "rx_ring->lbq_free_cnt = %d.\n", | ||
592 | rx_ring->lbq_free_cnt); | ||
593 | printk(KERN_ERR PFX "rx_ring->lbq_buf_size = %d.\n", | ||
594 | rx_ring->lbq_buf_size); | ||
595 | |||
596 | printk(KERN_ERR PFX "rx_ring->sbq_base = %p.\n", rx_ring->sbq_base); | ||
597 | printk(KERN_ERR PFX "rx_ring->sbq_base_dma = %llx.\n", | ||
598 | (u64) rx_ring->sbq_base_dma); | ||
599 | printk(KERN_ERR PFX "rx_ring->sbq_base_indirect = %p.\n", | ||
600 | rx_ring->sbq_base_indirect); | ||
601 | printk(KERN_ERR PFX "rx_ring->sbq_base_indirect_dma = %llx.\n", | ||
602 | (u64) rx_ring->sbq_base_indirect_dma); | ||
603 | printk(KERN_ERR PFX "rx_ring->sbq = %p.\n", rx_ring->sbq); | ||
604 | printk(KERN_ERR PFX "rx_ring->sbq_len = %d.\n", rx_ring->sbq_len); | ||
605 | printk(KERN_ERR PFX "rx_ring->sbq_size = %d.\n", rx_ring->sbq_size); | ||
606 | printk(KERN_ERR PFX "rx_ring->sbq_prod_idx_db_reg addr = %p.\n", | ||
607 | rx_ring->sbq_prod_idx_db_reg); | ||
608 | printk(KERN_ERR PFX "rx_ring->sbq_prod_idx = %d.\n", | ||
609 | rx_ring->sbq_prod_idx); | ||
610 | printk(KERN_ERR PFX "rx_ring->sbq_curr_idx = %d.\n", | ||
611 | rx_ring->sbq_curr_idx); | ||
612 | printk(KERN_ERR PFX "rx_ring->sbq_clean_idx = %d.\n", | ||
613 | rx_ring->sbq_clean_idx); | ||
614 | printk(KERN_ERR PFX "rx_ring->sbq_free_cnt = %d.\n", | ||
615 | rx_ring->sbq_free_cnt); | ||
616 | printk(KERN_ERR PFX "rx_ring->sbq_buf_size = %d.\n", | ||
617 | rx_ring->sbq_buf_size); | ||
618 | printk(KERN_ERR PFX "rx_ring->cq_id = %d.\n", rx_ring->cq_id); | ||
619 | printk(KERN_ERR PFX "rx_ring->irq = %d.\n", rx_ring->irq); | ||
620 | printk(KERN_ERR PFX "rx_ring->cpu = %d.\n", rx_ring->cpu); | ||
621 | printk(KERN_ERR PFX "rx_ring->qdev = %p.\n", rx_ring->qdev); | ||
622 | } | ||
623 | |||
624 | void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id) | ||
625 | { | ||
626 | void *ptr; | ||
627 | |||
628 | printk(KERN_ERR PFX "%s: Enter.\n", __func__); | ||
629 | |||
630 | ptr = kmalloc(size, GFP_ATOMIC); | ||
631 | if (ptr == NULL) { | ||
632 | printk(KERN_ERR PFX "%s: Couldn't allocate a buffer.\n", | ||
633 | __func__); | ||
634 | return; | ||
635 | } | ||
636 | |||
637 | if (ql_write_cfg(qdev, ptr, size, bit, q_id)) { | ||
638 | printk(KERN_ERR "%s: Failed to upload control block!\n", | ||
639 | __func__); | ||
640 | goto fail_it; | ||
641 | } | ||
642 | switch (bit) { | ||
643 | case CFG_DRQ: | ||
644 | ql_dump_wqicb((struct wqicb *)ptr); | ||
645 | break; | ||
646 | case CFG_DCQ: | ||
647 | ql_dump_cqicb((struct cqicb *)ptr); | ||
648 | break; | ||
649 | case CFG_DR: | ||
650 | ql_dump_ricb((struct ricb *)ptr); | ||
651 | break; | ||
652 | default: | ||
653 | printk(KERN_ERR PFX "%s: Invalid bit value = %x.\n", | ||
654 | __func__, bit); | ||
655 | break; | ||
656 | } | ||
657 | fail_it: | ||
658 | kfree(ptr); | ||
659 | } | ||
660 | #endif | ||
661 | |||
662 | #ifdef QL_OB_DUMP | ||
663 | void ql_dump_tx_desc(struct tx_buf_desc *tbd) | ||
664 | { | ||
665 | printk(KERN_ERR PFX "tbd->addr = 0x%llx\n", | ||
666 | le64_to_cpu((u64) tbd->addr)); | ||
667 | printk(KERN_ERR PFX "tbd->len = %d\n", | ||
668 | le32_to_cpu(tbd->len & TX_DESC_LEN_MASK)); | ||
669 | printk(KERN_ERR PFX "tbd->flags = %s %s\n", | ||
670 | tbd->len & TX_DESC_C ? "C" : ".", | ||
671 | tbd->len & TX_DESC_E ? "E" : "."); | ||
672 | tbd++; | ||
673 | printk(KERN_ERR PFX "tbd->addr = 0x%llx\n", | ||
674 | le64_to_cpu((u64) tbd->addr)); | ||
675 | printk(KERN_ERR PFX "tbd->len = %d\n", | ||
676 | le32_to_cpu(tbd->len & TX_DESC_LEN_MASK)); | ||
677 | printk(KERN_ERR PFX "tbd->flags = %s %s\n", | ||
678 | tbd->len & TX_DESC_C ? "C" : ".", | ||
679 | tbd->len & TX_DESC_E ? "E" : "."); | ||
680 | tbd++; | ||
681 | printk(KERN_ERR PFX "tbd->addr = 0x%llx\n", | ||
682 | le64_to_cpu((u64) tbd->addr)); | ||
683 | printk(KERN_ERR PFX "tbd->len = %d\n", | ||
684 | le32_to_cpu(tbd->len & TX_DESC_LEN_MASK)); | ||
685 | printk(KERN_ERR PFX "tbd->flags = %s %s\n", | ||
686 | tbd->len & TX_DESC_C ? "C" : ".", | ||
687 | tbd->len & TX_DESC_E ? "E" : "."); | ||
688 | |||
689 | } | ||
690 | |||
691 | void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb) | ||
692 | { | ||
693 | struct ob_mac_tso_iocb_req *ob_mac_tso_iocb = | ||
694 | (struct ob_mac_tso_iocb_req *)ob_mac_iocb; | ||
695 | struct tx_buf_desc *tbd; | ||
696 | u16 frame_len; | ||
697 | |||
698 | printk(KERN_ERR PFX "%s\n", __func__); | ||
699 | printk(KERN_ERR PFX "opcode = %s\n", | ||
700 | (ob_mac_iocb->opcode == OPCODE_OB_MAC_IOCB) ? "MAC" : "TSO"); | ||
701 | printk(KERN_ERR PFX "flags1 = %s %s %s %s %s\n", | ||
702 | ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_OI ? "OI" : "", | ||
703 | ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_I ? "I" : "", | ||
704 | ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_D ? "D" : "", | ||
705 | ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP4 ? "IP4" : "", | ||
706 | ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP6 ? "IP6" : ""); | ||
707 | printk(KERN_ERR PFX "flags2 = %s %s %s\n", | ||
708 | ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "", | ||
709 | ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "", | ||
710 | ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : ""); | ||
711 | printk(KERN_ERR PFX "flags3 = %s %s %s \n", | ||
712 | ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "", | ||
713 | ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "", | ||
714 | ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : ""); | ||
715 | printk(KERN_ERR PFX "tid = %x\n", ob_mac_iocb->tid); | ||
716 | printk(KERN_ERR PFX "txq_idx = %d\n", ob_mac_iocb->txq_idx); | ||
717 | printk(KERN_ERR PFX "vlan_tci = %x\n", ob_mac_tso_iocb->vlan_tci); | ||
718 | if (ob_mac_iocb->opcode == OPCODE_OB_MAC_TSO_IOCB) { | ||
719 | printk(KERN_ERR PFX "frame_len = %d\n", | ||
720 | le32_to_cpu(ob_mac_tso_iocb->frame_len)); | ||
721 | printk(KERN_ERR PFX "mss = %d\n", | ||
722 | le16_to_cpu(ob_mac_tso_iocb->mss)); | ||
723 | printk(KERN_ERR PFX "prot_hdr_len = %d\n", | ||
724 | le16_to_cpu(ob_mac_tso_iocb->total_hdrs_len)); | ||
725 | printk(KERN_ERR PFX "hdr_offset = 0x%.04x\n", | ||
726 | le16_to_cpu(ob_mac_tso_iocb->net_trans_offset)); | ||
727 | frame_len = le32_to_cpu(ob_mac_tso_iocb->frame_len); | ||
728 | } else { | ||
729 | printk(KERN_ERR PFX "frame_len = %d\n", | ||
730 | le16_to_cpu(ob_mac_iocb->frame_len)); | ||
731 | frame_len = le16_to_cpu(ob_mac_iocb->frame_len); | ||
732 | } | ||
733 | tbd = &ob_mac_iocb->tbd[0]; | ||
734 | ql_dump_tx_desc(tbd); | ||
735 | } | ||
736 | |||
737 | void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp) | ||
738 | { | ||
739 | printk(KERN_ERR PFX "%s\n", __func__); | ||
740 | printk(KERN_ERR PFX "opcode = %d\n", ob_mac_rsp->opcode); | ||
741 | printk(KERN_ERR PFX "flags = %s %s %s %s %s %s %s\n", | ||
742 | ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_OI ? "OI" : ".", | ||
743 | ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".", | ||
744 | ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_E ? "E" : ".", | ||
745 | ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_S ? "S" : ".", | ||
746 | ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_L ? "L" : ".", | ||
747 | ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_P ? "P" : ".", | ||
748 | ob_mac_rsp->flags2 & OB_MAC_IOCB_RSP_B ? "B" : "."); | ||
749 | printk(KERN_ERR PFX "tid = %x\n", ob_mac_rsp->tid); | ||
750 | } | ||
751 | #endif | ||
752 | |||
753 | #ifdef QL_IB_DUMP | ||
754 | void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp) | ||
755 | { | ||
756 | printk(KERN_ERR PFX "%s\n", __func__); | ||
757 | printk(KERN_ERR PFX "opcode = 0x%x\n", ib_mac_rsp->opcode); | ||
758 | printk(KERN_ERR PFX "flags1 = %s%s%s%s%s%s\n", | ||
759 | ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_OI ? "OI " : "", | ||
760 | ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_I ? "I " : "", | ||
761 | ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_TE ? "TE " : "", | ||
762 | ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU ? "NU " : "", | ||
763 | ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_IE ? "IE " : "", | ||
764 | ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_B ? "B " : ""); | ||
765 | |||
766 | if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) | ||
767 | printk(KERN_ERR PFX "%s%s%s Multicast.\n", | ||
768 | (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == | ||
769 | IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "", | ||
770 | (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == | ||
771 | IB_MAC_IOCB_RSP_M_REG ? "Registered" : "", | ||
772 | (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == | ||
773 | IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); | ||
774 | |||
775 | printk(KERN_ERR PFX "flags2 = %s%s%s%s%s\n", | ||
776 | (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) ? "P " : "", | ||
777 | (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? "V " : "", | ||
778 | (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) ? "U " : "", | ||
779 | (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ? "T " : "", | ||
780 | (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_FO) ? "FO " : ""); | ||
781 | |||
782 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) | ||
783 | printk(KERN_ERR PFX "%s%s%s%s%s error.\n", | ||
784 | (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == | ||
785 | IB_MAC_IOCB_RSP_ERR_OVERSIZE ? "oversize" : "", | ||
786 | (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == | ||
787 | IB_MAC_IOCB_RSP_ERR_UNDERSIZE ? "undersize" : "", | ||
788 | (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == | ||
789 | IB_MAC_IOCB_RSP_ERR_PREAMBLE ? "preamble" : "", | ||
790 | (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == | ||
791 | IB_MAC_IOCB_RSP_ERR_FRAME_LEN ? "frame length" : "", | ||
792 | (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == | ||
793 | IB_MAC_IOCB_RSP_ERR_CRC ? "CRC" : ""); | ||
794 | |||
795 | printk(KERN_ERR PFX "flags3 = %s%s.\n", | ||
796 | ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS ? "DS " : "", | ||
797 | ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL ? "DL " : ""); | ||
798 | |||
799 | if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) | ||
800 | printk(KERN_ERR PFX "RSS flags = %s%s%s%s.\n", | ||
801 | ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == | ||
802 | IB_MAC_IOCB_RSP_M_IPV4) ? "IPv4 RSS" : "", | ||
803 | ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == | ||
804 | IB_MAC_IOCB_RSP_M_IPV6) ? "IPv6 RSS " : "", | ||
805 | ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == | ||
806 | IB_MAC_IOCB_RSP_M_TCP_V4) ? "TCP/IPv4 RSS" : "", | ||
807 | ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == | ||
808 | IB_MAC_IOCB_RSP_M_TCP_V6) ? "TCP/IPv6 RSS" : ""); | ||
809 | |||
810 | printk(KERN_ERR PFX "data_len = %d\n", | ||
811 | le32_to_cpu(ib_mac_rsp->data_len)); | ||
812 | printk(KERN_ERR PFX "data_addr_hi = 0x%x\n", | ||
813 | le32_to_cpu(ib_mac_rsp->data_addr_hi)); | ||
814 | printk(KERN_ERR PFX "data_addr_lo = 0x%x\n", | ||
815 | le32_to_cpu(ib_mac_rsp->data_addr_lo)); | ||
816 | if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) | ||
817 | printk(KERN_ERR PFX "rss = %x\n", | ||
818 | le32_to_cpu(ib_mac_rsp->rss)); | ||
819 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) | ||
820 | printk(KERN_ERR PFX "vlan_id = %x\n", | ||
821 | le16_to_cpu(ib_mac_rsp->vlan_id)); | ||
822 | |||
823 | printk(KERN_ERR PFX "flags4 = %s%s%s.\n", | ||
824 | le32_to_cpu(ib_mac_rsp-> | ||
825 | flags4) & IB_MAC_IOCB_RSP_HV ? "HV " : "", | ||
826 | le32_to_cpu(ib_mac_rsp-> | ||
827 | flags4) & IB_MAC_IOCB_RSP_HS ? "HS " : "", | ||
828 | le32_to_cpu(ib_mac_rsp-> | ||
829 | flags4) & IB_MAC_IOCB_RSP_HL ? "HL " : ""); | ||
830 | |||
831 | if (le32_to_cpu(ib_mac_rsp->flags4) & IB_MAC_IOCB_RSP_HV) { | ||
832 | printk(KERN_ERR PFX "hdr length = %d.\n", | ||
833 | le32_to_cpu(ib_mac_rsp->hdr_len)); | ||
834 | printk(KERN_ERR PFX "hdr addr_hi = 0x%x.\n", | ||
835 | le32_to_cpu(ib_mac_rsp->hdr_addr_hi)); | ||
836 | printk(KERN_ERR PFX "hdr addr_lo = 0x%x.\n", | ||
837 | le32_to_cpu(ib_mac_rsp->hdr_addr_lo)); | ||
838 | } | ||
839 | } | ||
840 | #endif | ||
841 | |||
842 | #ifdef QL_ALL_DUMP | ||
843 | void ql_dump_all(struct ql_adapter *qdev) | ||
844 | { | ||
845 | int i; | ||
846 | |||
847 | QL_DUMP_REGS(qdev); | ||
848 | QL_DUMP_QDEV(qdev); | ||
849 | for (i = 0; i < qdev->tx_ring_count; i++) { | ||
850 | QL_DUMP_TX_RING(&qdev->tx_ring[i]); | ||
851 | QL_DUMP_WQICB((struct wqicb *)&qdev->tx_ring[i]); | ||
852 | } | ||
853 | for (i = 0; i < qdev->rx_ring_count; i++) { | ||
854 | QL_DUMP_RX_RING(&qdev->rx_ring[i]); | ||
855 | QL_DUMP_CQICB((struct cqicb *)&qdev->rx_ring[i]); | ||
856 | } | ||
857 | } | ||
858 | #endif | ||
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c new file mode 100644 index 000000000000..6457f8c4fdaa --- /dev/null +++ b/drivers/net/qlge/qlge_ethtool.c | |||
@@ -0,0 +1,415 @@ | |||
1 | #include <linux/kernel.h> | ||
2 | #include <linux/init.h> | ||
3 | #include <linux/types.h> | ||
4 | #include <linux/module.h> | ||
5 | #include <linux/list.h> | ||
6 | #include <linux/pci.h> | ||
7 | #include <linux/dma-mapping.h> | ||
8 | #include <linux/pagemap.h> | ||
9 | #include <linux/sched.h> | ||
10 | #include <linux/slab.h> | ||
11 | #include <linux/dmapool.h> | ||
12 | #include <linux/mempool.h> | ||
13 | #include <linux/spinlock.h> | ||
14 | #include <linux/kthread.h> | ||
15 | #include <linux/interrupt.h> | ||
16 | #include <linux/errno.h> | ||
17 | #include <linux/ioport.h> | ||
18 | #include <linux/in.h> | ||
19 | #include <linux/ip.h> | ||
20 | #include <linux/ipv6.h> | ||
21 | #include <net/ipv6.h> | ||
22 | #include <linux/tcp.h> | ||
23 | #include <linux/udp.h> | ||
24 | #include <linux/if_arp.h> | ||
25 | #include <linux/if_ether.h> | ||
26 | #include <linux/netdevice.h> | ||
27 | #include <linux/etherdevice.h> | ||
28 | #include <linux/ethtool.h> | ||
29 | #include <linux/skbuff.h> | ||
30 | #include <linux/rtnetlink.h> | ||
31 | #include <linux/if_vlan.h> | ||
32 | #include <linux/init.h> | ||
33 | #include <linux/delay.h> | ||
34 | #include <linux/mm.h> | ||
35 | #include <linux/vmalloc.h> | ||
36 | |||
37 | #include <linux/version.h> | ||
38 | |||
39 | #include "qlge.h" | ||
40 | |||
41 | static int ql_update_ring_coalescing(struct ql_adapter *qdev) | ||
42 | { | ||
43 | int i, status = 0; | ||
44 | struct rx_ring *rx_ring; | ||
45 | struct cqicb *cqicb; | ||
46 | |||
47 | if (!netif_running(qdev->ndev)) | ||
48 | return status; | ||
49 | |||
50 | spin_lock(&qdev->hw_lock); | ||
51 | /* Skip the default queue, and update the outbound handler | ||
52 | * queues if they changed. | ||
53 | */ | ||
54 | cqicb = (struct cqicb *)&qdev->rx_ring[1]; | ||
55 | if (le16_to_cpu(cqicb->irq_delay) != qdev->tx_coalesce_usecs || | ||
56 | le16_to_cpu(cqicb->pkt_delay) != qdev->tx_max_coalesced_frames) { | ||
57 | for (i = 1; i < qdev->rss_ring_first_cq_id; i++, rx_ring++) { | ||
58 | rx_ring = &qdev->rx_ring[i]; | ||
59 | cqicb = (struct cqicb *)rx_ring; | ||
60 | cqicb->irq_delay = le16_to_cpu(qdev->tx_coalesce_usecs); | ||
61 | cqicb->pkt_delay = | ||
62 | le16_to_cpu(qdev->tx_max_coalesced_frames); | ||
63 | cqicb->flags = FLAGS_LI; | ||
64 | status = ql_write_cfg(qdev, cqicb, sizeof(cqicb), | ||
65 | CFG_LCQ, rx_ring->cq_id); | ||
66 | if (status) { | ||
67 | QPRINTK(qdev, IFUP, ERR, | ||
68 | "Failed to load CQICB.\n"); | ||
69 | goto exit; | ||
70 | } | ||
71 | } | ||
72 | } | ||
73 | |||
74 | /* Update the inbound (RSS) handler queues if they changed. */ | ||
75 | cqicb = (struct cqicb *)&qdev->rx_ring[qdev->rss_ring_first_cq_id]; | ||
76 | if (le16_to_cpu(cqicb->irq_delay) != qdev->rx_coalesce_usecs || | ||
77 | le16_to_cpu(cqicb->pkt_delay) != qdev->rx_max_coalesced_frames) { | ||
78 | for (i = qdev->rss_ring_first_cq_id; | ||
79 | i <= qdev->rss_ring_first_cq_id + qdev->rss_ring_count; | ||
80 | i++) { | ||
81 | rx_ring = &qdev->rx_ring[i]; | ||
82 | cqicb = (struct cqicb *)rx_ring; | ||
83 | cqicb->irq_delay = le16_to_cpu(qdev->rx_coalesce_usecs); | ||
84 | cqicb->pkt_delay = | ||
85 | le16_to_cpu(qdev->rx_max_coalesced_frames); | ||
86 | cqicb->flags = FLAGS_LI; | ||
87 | status = ql_write_cfg(qdev, cqicb, sizeof(cqicb), | ||
88 | CFG_LCQ, rx_ring->cq_id); | ||
89 | if (status) { | ||
90 | QPRINTK(qdev, IFUP, ERR, | ||
91 | "Failed to load CQICB.\n"); | ||
92 | goto exit; | ||
93 | } | ||
94 | } | ||
95 | } | ||
96 | exit: | ||
97 | spin_unlock(&qdev->hw_lock); | ||
98 | return status; | ||
99 | } | ||
100 | |||
101 | void ql_update_stats(struct ql_adapter *qdev) | ||
102 | { | ||
103 | u32 i; | ||
104 | u64 data; | ||
105 | u64 *iter = &qdev->nic_stats.tx_pkts; | ||
106 | |||
107 | spin_lock(&qdev->stats_lock); | ||
108 | if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) { | ||
109 | QPRINTK(qdev, DRV, ERR, | ||
110 | "Couldn't get xgmac sem.\n"); | ||
111 | goto quit; | ||
112 | } | ||
113 | /* | ||
114 | * Get TX statistics. | ||
115 | */ | ||
116 | for (i = 0x200; i < 0x280; i += 8) { | ||
117 | if (ql_read_xgmac_reg64(qdev, i, &data)) { | ||
118 | QPRINTK(qdev, DRV, ERR, | ||
119 | "Error reading status register 0x%.04x.\n", i); | ||
120 | goto end; | ||
121 | } else | ||
122 | *iter = data; | ||
123 | iter++; | ||
124 | } | ||
125 | |||
126 | /* | ||
127 | * Get RX statistics. | ||
128 | */ | ||
129 | for (i = 0x300; i < 0x3d0; i += 8) { | ||
130 | if (ql_read_xgmac_reg64(qdev, i, &data)) { | ||
131 | QPRINTK(qdev, DRV, ERR, | ||
132 | "Error reading status register 0x%.04x.\n", i); | ||
133 | goto end; | ||
134 | } else | ||
135 | *iter = data; | ||
136 | iter++; | ||
137 | } | ||
138 | |||
139 | end: | ||
140 | ql_sem_unlock(qdev, qdev->xg_sem_mask); | ||
141 | quit: | ||
142 | spin_unlock(&qdev->stats_lock); | ||
143 | |||
144 | QL_DUMP_STAT(qdev); | ||
145 | |||
146 | return; | ||
147 | } | ||
148 | |||
149 | static char ql_stats_str_arr[][ETH_GSTRING_LEN] = { | ||
150 | {"tx_pkts"}, | ||
151 | {"tx_bytes"}, | ||
152 | {"tx_mcast_pkts"}, | ||
153 | {"tx_bcast_pkts"}, | ||
154 | {"tx_ucast_pkts"}, | ||
155 | {"tx_ctl_pkts"}, | ||
156 | {"tx_pause_pkts"}, | ||
157 | {"tx_64_pkts"}, | ||
158 | {"tx_65_to_127_pkts"}, | ||
159 | {"tx_128_to_255_pkts"}, | ||
160 | {"tx_256_511_pkts"}, | ||
161 | {"tx_512_to_1023_pkts"}, | ||
162 | {"tx_1024_to_1518_pkts"}, | ||
163 | {"tx_1519_to_max_pkts"}, | ||
164 | {"tx_undersize_pkts"}, | ||
165 | {"tx_oversize_pkts"}, | ||
166 | {"rx_bytes"}, | ||
167 | {"rx_bytes_ok"}, | ||
168 | {"rx_pkts"}, | ||
169 | {"rx_pkts_ok"}, | ||
170 | {"rx_bcast_pkts"}, | ||
171 | {"rx_mcast_pkts"}, | ||
172 | {"rx_ucast_pkts"}, | ||
173 | {"rx_undersize_pkts"}, | ||
174 | {"rx_oversize_pkts"}, | ||
175 | {"rx_jabber_pkts"}, | ||
176 | {"rx_undersize_fcerr_pkts"}, | ||
177 | {"rx_drop_events"}, | ||
178 | {"rx_fcerr_pkts"}, | ||
179 | {"rx_align_err"}, | ||
180 | {"rx_symbol_err"}, | ||
181 | {"rx_mac_err"}, | ||
182 | {"rx_ctl_pkts"}, | ||
183 | {"rx_pause_pkts"}, | ||
184 | {"rx_64_pkts"}, | ||
185 | {"rx_65_to_127_pkts"}, | ||
186 | {"rx_128_255_pkts"}, | ||
187 | {"rx_256_511_pkts"}, | ||
188 | {"rx_512_to_1023_pkts"}, | ||
189 | {"rx_1024_to_1518_pkts"}, | ||
190 | {"rx_1519_to_max_pkts"}, | ||
191 | {"rx_len_err_pkts"}, | ||
192 | }; | ||
193 | |||
194 | static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf) | ||
195 | { | ||
196 | switch (stringset) { | ||
197 | case ETH_SS_STATS: | ||
198 | memcpy(buf, ql_stats_str_arr, sizeof(ql_stats_str_arr)); | ||
199 | break; | ||
200 | } | ||
201 | } | ||
202 | |||
203 | static int ql_get_sset_count(struct net_device *dev, int sset) | ||
204 | { | ||
205 | switch (sset) { | ||
206 | case ETH_SS_STATS: | ||
207 | return ARRAY_SIZE(ql_stats_str_arr); | ||
208 | default: | ||
209 | return -EOPNOTSUPP; | ||
210 | } | ||
211 | } | ||
212 | |||
213 | static void | ||
214 | ql_get_ethtool_stats(struct net_device *ndev, | ||
215 | struct ethtool_stats *stats, u64 *data) | ||
216 | { | ||
217 | struct ql_adapter *qdev = netdev_priv(ndev); | ||
218 | struct nic_stats *s = &qdev->nic_stats; | ||
219 | |||
220 | ql_update_stats(qdev); | ||
221 | |||
222 | *data++ = s->tx_pkts; | ||
223 | *data++ = s->tx_bytes; | ||
224 | *data++ = s->tx_mcast_pkts; | ||
225 | *data++ = s->tx_bcast_pkts; | ||
226 | *data++ = s->tx_ucast_pkts; | ||
227 | *data++ = s->tx_ctl_pkts; | ||
228 | *data++ = s->tx_pause_pkts; | ||
229 | *data++ = s->tx_64_pkt; | ||
230 | *data++ = s->tx_65_to_127_pkt; | ||
231 | *data++ = s->tx_128_to_255_pkt; | ||
232 | *data++ = s->tx_256_511_pkt; | ||
233 | *data++ = s->tx_512_to_1023_pkt; | ||
234 | *data++ = s->tx_1024_to_1518_pkt; | ||
235 | *data++ = s->tx_1519_to_max_pkt; | ||
236 | *data++ = s->tx_undersize_pkt; | ||
237 | *data++ = s->tx_oversize_pkt; | ||
238 | *data++ = s->rx_bytes; | ||
239 | *data++ = s->rx_bytes_ok; | ||
240 | *data++ = s->rx_pkts; | ||
241 | *data++ = s->rx_pkts_ok; | ||
242 | *data++ = s->rx_bcast_pkts; | ||
243 | *data++ = s->rx_mcast_pkts; | ||
244 | *data++ = s->rx_ucast_pkts; | ||
245 | *data++ = s->rx_undersize_pkts; | ||
246 | *data++ = s->rx_oversize_pkts; | ||
247 | *data++ = s->rx_jabber_pkts; | ||
248 | *data++ = s->rx_undersize_fcerr_pkts; | ||
249 | *data++ = s->rx_drop_events; | ||
250 | *data++ = s->rx_fcerr_pkts; | ||
251 | *data++ = s->rx_align_err; | ||
252 | *data++ = s->rx_symbol_err; | ||
253 | *data++ = s->rx_mac_err; | ||
254 | *data++ = s->rx_ctl_pkts; | ||
255 | *data++ = s->rx_pause_pkts; | ||
256 | *data++ = s->rx_64_pkts; | ||
257 | *data++ = s->rx_65_to_127_pkts; | ||
258 | *data++ = s->rx_128_255_pkts; | ||
259 | *data++ = s->rx_256_511_pkts; | ||
260 | *data++ = s->rx_512_to_1023_pkts; | ||
261 | *data++ = s->rx_1024_to_1518_pkts; | ||
262 | *data++ = s->rx_1519_to_max_pkts; | ||
263 | *data++ = s->rx_len_err_pkts; | ||
264 | } | ||
265 | |||
266 | static int ql_get_settings(struct net_device *ndev, | ||
267 | struct ethtool_cmd *ecmd) | ||
268 | { | ||
269 | struct ql_adapter *qdev = netdev_priv(ndev); | ||
270 | |||
271 | ecmd->supported = SUPPORTED_10000baseT_Full; | ||
272 | ecmd->advertising = ADVERTISED_10000baseT_Full; | ||
273 | ecmd->autoneg = AUTONEG_ENABLE; | ||
274 | ecmd->transceiver = XCVR_EXTERNAL; | ||
275 | if ((qdev->link_status & LINK_TYPE_MASK) == LINK_TYPE_10GBASET) { | ||
276 | ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); | ||
277 | ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg); | ||
278 | ecmd->port = PORT_TP; | ||
279 | } else { | ||
280 | ecmd->supported |= SUPPORTED_FIBRE; | ||
281 | ecmd->advertising |= ADVERTISED_FIBRE; | ||
282 | ecmd->port = PORT_FIBRE; | ||
283 | } | ||
284 | |||
285 | ecmd->speed = SPEED_10000; | ||
286 | ecmd->duplex = DUPLEX_FULL; | ||
287 | |||
288 | return 0; | ||
289 | } | ||
290 | |||
291 | static void ql_get_drvinfo(struct net_device *ndev, | ||
292 | struct ethtool_drvinfo *drvinfo) | ||
293 | { | ||
294 | struct ql_adapter *qdev = netdev_priv(ndev); | ||
295 | strncpy(drvinfo->driver, qlge_driver_name, 32); | ||
296 | strncpy(drvinfo->version, qlge_driver_version, 32); | ||
297 | strncpy(drvinfo->fw_version, "N/A", 32); | ||
298 | strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32); | ||
299 | drvinfo->n_stats = 0; | ||
300 | drvinfo->testinfo_len = 0; | ||
301 | drvinfo->regdump_len = 0; | ||
302 | drvinfo->eedump_len = 0; | ||
303 | } | ||
304 | |||
305 | static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) | ||
306 | { | ||
307 | struct ql_adapter *qdev = netdev_priv(dev); | ||
308 | |||
309 | c->rx_coalesce_usecs = qdev->rx_coalesce_usecs; | ||
310 | c->tx_coalesce_usecs = qdev->tx_coalesce_usecs; | ||
311 | |||
312 | /* This chip coalesces as follows: | ||
313 | * If a packet arrives, hold off interrupts until | ||
314 | * cqicb->int_delay expires, but if no other packets arrive don't | ||
315 | * wait longer than cqicb->pkt_int_delay. But ethtool doesn't use a | ||
316 | * timer to coalesce on a frame basis. So, we have to take ethtool's | ||
317 | * max_coalesced_frames value and convert it to a delay in microseconds. | ||
318 | * We do this by using a basic thoughput of 1,000,000 frames per | ||
319 | * second @ (1024 bytes). This means one frame per usec. So it's a | ||
320 | * simple one to one ratio. | ||
321 | */ | ||
322 | c->rx_max_coalesced_frames = qdev->rx_max_coalesced_frames; | ||
323 | c->tx_max_coalesced_frames = qdev->tx_max_coalesced_frames; | ||
324 | |||
325 | return 0; | ||
326 | } | ||
327 | |||
328 | static int ql_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *c) | ||
329 | { | ||
330 | struct ql_adapter *qdev = netdev_priv(ndev); | ||
331 | |||
332 | /* Validate user parameters. */ | ||
333 | if (c->rx_coalesce_usecs > qdev->rx_ring_size / 2) | ||
334 | return -EINVAL; | ||
335 | /* Don't wait more than 10 usec. */ | ||
336 | if (c->rx_max_coalesced_frames > MAX_INTER_FRAME_WAIT) | ||
337 | return -EINVAL; | ||
338 | if (c->tx_coalesce_usecs > qdev->tx_ring_size / 2) | ||
339 | return -EINVAL; | ||
340 | if (c->tx_max_coalesced_frames > MAX_INTER_FRAME_WAIT) | ||
341 | return -EINVAL; | ||
342 | |||
343 | /* Verify a change took place before updating the hardware. */ | ||
344 | if (qdev->rx_coalesce_usecs == c->rx_coalesce_usecs && | ||
345 | qdev->tx_coalesce_usecs == c->tx_coalesce_usecs && | ||
346 | qdev->rx_max_coalesced_frames == c->rx_max_coalesced_frames && | ||
347 | qdev->tx_max_coalesced_frames == c->tx_max_coalesced_frames) | ||
348 | return 0; | ||
349 | |||
350 | qdev->rx_coalesce_usecs = c->rx_coalesce_usecs; | ||
351 | qdev->tx_coalesce_usecs = c->tx_coalesce_usecs; | ||
352 | qdev->rx_max_coalesced_frames = c->rx_max_coalesced_frames; | ||
353 | qdev->tx_max_coalesced_frames = c->tx_max_coalesced_frames; | ||
354 | |||
355 | return ql_update_ring_coalescing(qdev); | ||
356 | } | ||
357 | |||
358 | static u32 ql_get_rx_csum(struct net_device *netdev) | ||
359 | { | ||
360 | struct ql_adapter *qdev = netdev_priv(netdev); | ||
361 | return qdev->rx_csum; | ||
362 | } | ||
363 | |||
364 | static int ql_set_rx_csum(struct net_device *netdev, uint32_t data) | ||
365 | { | ||
366 | struct ql_adapter *qdev = netdev_priv(netdev); | ||
367 | qdev->rx_csum = data; | ||
368 | return 0; | ||
369 | } | ||
370 | |||
371 | static int ql_set_tso(struct net_device *ndev, uint32_t data) | ||
372 | { | ||
373 | |||
374 | if (data) { | ||
375 | ndev->features |= NETIF_F_TSO; | ||
376 | ndev->features |= NETIF_F_TSO6; | ||
377 | } else { | ||
378 | ndev->features &= ~NETIF_F_TSO; | ||
379 | ndev->features &= ~NETIF_F_TSO6; | ||
380 | } | ||
381 | return 0; | ||
382 | } | ||
383 | |||
384 | static u32 ql_get_msglevel(struct net_device *ndev) | ||
385 | { | ||
386 | struct ql_adapter *qdev = netdev_priv(ndev); | ||
387 | return qdev->msg_enable; | ||
388 | } | ||
389 | |||
390 | static void ql_set_msglevel(struct net_device *ndev, u32 value) | ||
391 | { | ||
392 | struct ql_adapter *qdev = netdev_priv(ndev); | ||
393 | qdev->msg_enable = value; | ||
394 | } | ||
395 | |||
396 | const struct ethtool_ops qlge_ethtool_ops = { | ||
397 | .get_settings = ql_get_settings, | ||
398 | .get_drvinfo = ql_get_drvinfo, | ||
399 | .get_msglevel = ql_get_msglevel, | ||
400 | .set_msglevel = ql_set_msglevel, | ||
401 | .get_link = ethtool_op_get_link, | ||
402 | .get_rx_csum = ql_get_rx_csum, | ||
403 | .set_rx_csum = ql_set_rx_csum, | ||
404 | .get_tx_csum = ethtool_op_get_tx_csum, | ||
405 | .get_sg = ethtool_op_get_sg, | ||
406 | .set_sg = ethtool_op_set_sg, | ||
407 | .get_tso = ethtool_op_get_tso, | ||
408 | .set_tso = ql_set_tso, | ||
409 | .get_coalesce = ql_get_coalesce, | ||
410 | .set_coalesce = ql_set_coalesce, | ||
411 | .get_sset_count = ql_get_sset_count, | ||
412 | .get_strings = ql_get_strings, | ||
413 | .get_ethtool_stats = ql_get_ethtool_stats, | ||
414 | }; | ||
415 | |||
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c new file mode 100644 index 000000000000..ad878e2b9ded --- /dev/null +++ b/drivers/net/qlge/qlge_main.c | |||
@@ -0,0 +1,3954 @@ | |||
1 | /* | ||
2 | * QLogic qlge NIC HBA Driver | ||
3 | * Copyright (c) 2003-2008 QLogic Corporation | ||
4 | * See LICENSE.qlge for copyright and licensing details. | ||
5 | * Author: Linux qlge network device driver by | ||
6 | * Ron Mercer <ron.mercer@qlogic.com> | ||
7 | */ | ||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/init.h> | ||
10 | #include <linux/types.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/list.h> | ||
13 | #include <linux/pci.h> | ||
14 | #include <linux/dma-mapping.h> | ||
15 | #include <linux/pagemap.h> | ||
16 | #include <linux/sched.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/dmapool.h> | ||
19 | #include <linux/mempool.h> | ||
20 | #include <linux/spinlock.h> | ||
21 | #include <linux/kthread.h> | ||
22 | #include <linux/interrupt.h> | ||
23 | #include <linux/errno.h> | ||
24 | #include <linux/ioport.h> | ||
25 | #include <linux/in.h> | ||
26 | #include <linux/ip.h> | ||
27 | #include <linux/ipv6.h> | ||
28 | #include <net/ipv6.h> | ||
29 | #include <linux/tcp.h> | ||
30 | #include <linux/udp.h> | ||
31 | #include <linux/if_arp.h> | ||
32 | #include <linux/if_ether.h> | ||
33 | #include <linux/netdevice.h> | ||
34 | #include <linux/etherdevice.h> | ||
35 | #include <linux/ethtool.h> | ||
36 | #include <linux/skbuff.h> | ||
37 | #include <linux/rtnetlink.h> | ||
38 | #include <linux/if_vlan.h> | ||
39 | #include <linux/init.h> | ||
40 | #include <linux/delay.h> | ||
41 | #include <linux/mm.h> | ||
42 | #include <linux/vmalloc.h> | ||
43 | |||
44 | #include "qlge.h" | ||
45 | |||
46 | char qlge_driver_name[] = DRV_NAME; | ||
47 | const char qlge_driver_version[] = DRV_VERSION; | ||
48 | |||
49 | MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>"); | ||
50 | MODULE_DESCRIPTION(DRV_STRING " "); | ||
51 | MODULE_LICENSE("GPL"); | ||
52 | MODULE_VERSION(DRV_VERSION); | ||
53 | |||
54 | static const u32 default_msg = | ||
55 | NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | | ||
56 | /* NETIF_MSG_TIMER | */ | ||
57 | NETIF_MSG_IFDOWN | | ||
58 | NETIF_MSG_IFUP | | ||
59 | NETIF_MSG_RX_ERR | | ||
60 | NETIF_MSG_TX_ERR | | ||
61 | NETIF_MSG_TX_QUEUED | | ||
62 | NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | | ||
63 | /* NETIF_MSG_PKTDATA | */ | ||
64 | NETIF_MSG_HW | NETIF_MSG_WOL | 0; | ||
65 | |||
66 | static int debug = 0x00007fff; /* defaults above */ | ||
67 | module_param(debug, int, 0); | ||
68 | MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); | ||
69 | |||
70 | #define MSIX_IRQ 0 | ||
71 | #define MSI_IRQ 1 | ||
72 | #define LEG_IRQ 2 | ||
73 | static int irq_type = MSIX_IRQ; | ||
74 | module_param(irq_type, int, MSIX_IRQ); | ||
75 | MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy."); | ||
76 | |||
77 | static struct pci_device_id qlge_pci_tbl[] __devinitdata = { | ||
78 | {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID)}, | ||
79 | {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID1)}, | ||
80 | /* required last entry */ | ||
81 | {0,} | ||
82 | }; | ||
83 | |||
84 | MODULE_DEVICE_TABLE(pci, qlge_pci_tbl); | ||
85 | |||
86 | /* This hardware semaphore causes exclusive access to | ||
87 | * resources shared between the NIC driver, MPI firmware, | ||
88 | * FCOE firmware and the FC driver. | ||
89 | */ | ||
90 | static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask) | ||
91 | { | ||
92 | u32 sem_bits = 0; | ||
93 | |||
94 | switch (sem_mask) { | ||
95 | case SEM_XGMAC0_MASK: | ||
96 | sem_bits = SEM_SET << SEM_XGMAC0_SHIFT; | ||
97 | break; | ||
98 | case SEM_XGMAC1_MASK: | ||
99 | sem_bits = SEM_SET << SEM_XGMAC1_SHIFT; | ||
100 | break; | ||
101 | case SEM_ICB_MASK: | ||
102 | sem_bits = SEM_SET << SEM_ICB_SHIFT; | ||
103 | break; | ||
104 | case SEM_MAC_ADDR_MASK: | ||
105 | sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT; | ||
106 | break; | ||
107 | case SEM_FLASH_MASK: | ||
108 | sem_bits = SEM_SET << SEM_FLASH_SHIFT; | ||
109 | break; | ||
110 | case SEM_PROBE_MASK: | ||
111 | sem_bits = SEM_SET << SEM_PROBE_SHIFT; | ||
112 | break; | ||
113 | case SEM_RT_IDX_MASK: | ||
114 | sem_bits = SEM_SET << SEM_RT_IDX_SHIFT; | ||
115 | break; | ||
116 | case SEM_PROC_REG_MASK: | ||
117 | sem_bits = SEM_SET << SEM_PROC_REG_SHIFT; | ||
118 | break; | ||
119 | default: | ||
120 | QPRINTK(qdev, PROBE, ALERT, "Bad Semaphore mask!.\n"); | ||
121 | return -EINVAL; | ||
122 | } | ||
123 | |||
124 | ql_write32(qdev, SEM, sem_bits | sem_mask); | ||
125 | return !(ql_read32(qdev, SEM) & sem_bits); | ||
126 | } | ||
127 | |||
128 | int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask) | ||
129 | { | ||
130 | unsigned int seconds = 3; | ||
131 | do { | ||
132 | if (!ql_sem_trylock(qdev, sem_mask)) | ||
133 | return 0; | ||
134 | ssleep(1); | ||
135 | } while (--seconds); | ||
136 | return -ETIMEDOUT; | ||
137 | } | ||
138 | |||
139 | void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask) | ||
140 | { | ||
141 | ql_write32(qdev, SEM, sem_mask); | ||
142 | ql_read32(qdev, SEM); /* flush */ | ||
143 | } | ||
144 | |||
145 | /* This function waits for a specific bit to come ready | ||
146 | * in a given register. It is used mostly by the initialize | ||
147 | * process, but is also used in kernel thread API such as | ||
148 | * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid. | ||
149 | */ | ||
150 | int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit) | ||
151 | { | ||
152 | u32 temp; | ||
153 | int count = UDELAY_COUNT; | ||
154 | |||
155 | while (count) { | ||
156 | temp = ql_read32(qdev, reg); | ||
157 | |||
158 | /* check for errors */ | ||
159 | if (temp & err_bit) { | ||
160 | QPRINTK(qdev, PROBE, ALERT, | ||
161 | "register 0x%.08x access error, value = 0x%.08x!.\n", | ||
162 | reg, temp); | ||
163 | return -EIO; | ||
164 | } else if (temp & bit) | ||
165 | return 0; | ||
166 | udelay(UDELAY_DELAY); | ||
167 | count--; | ||
168 | } | ||
169 | QPRINTK(qdev, PROBE, ALERT, | ||
170 | "Timed out waiting for reg %x to come ready.\n", reg); | ||
171 | return -ETIMEDOUT; | ||
172 | } | ||
173 | |||
174 | /* The CFG register is used to download TX and RX control blocks | ||
175 | * to the chip. This function waits for an operation to complete. | ||
176 | */ | ||
177 | static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit) | ||
178 | { | ||
179 | int count = UDELAY_COUNT; | ||
180 | u32 temp; | ||
181 | |||
182 | while (count) { | ||
183 | temp = ql_read32(qdev, CFG); | ||
184 | if (temp & CFG_LE) | ||
185 | return -EIO; | ||
186 | if (!(temp & bit)) | ||
187 | return 0; | ||
188 | udelay(UDELAY_DELAY); | ||
189 | count--; | ||
190 | } | ||
191 | return -ETIMEDOUT; | ||
192 | } | ||
193 | |||
194 | |||
195 | /* Used to issue init control blocks to hw. Maps control block, | ||
196 | * sets address, triggers download, waits for completion. | ||
197 | */ | ||
198 | int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit, | ||
199 | u16 q_id) | ||
200 | { | ||
201 | u64 map; | ||
202 | int status = 0; | ||
203 | int direction; | ||
204 | u32 mask; | ||
205 | u32 value; | ||
206 | |||
207 | direction = | ||
208 | (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE : | ||
209 | PCI_DMA_FROMDEVICE; | ||
210 | |||
211 | map = pci_map_single(qdev->pdev, ptr, size, direction); | ||
212 | if (pci_dma_mapping_error(qdev->pdev, map)) { | ||
213 | QPRINTK(qdev, IFUP, ERR, "Couldn't map DMA area.\n"); | ||
214 | return -ENOMEM; | ||
215 | } | ||
216 | |||
217 | status = ql_wait_cfg(qdev, bit); | ||
218 | if (status) { | ||
219 | QPRINTK(qdev, IFUP, ERR, | ||
220 | "Timed out waiting for CFG to come ready.\n"); | ||
221 | goto exit; | ||
222 | } | ||
223 | |||
224 | status = ql_sem_spinlock(qdev, SEM_ICB_MASK); | ||
225 | if (status) | ||
226 | goto exit; | ||
227 | ql_write32(qdev, ICB_L, (u32) map); | ||
228 | ql_write32(qdev, ICB_H, (u32) (map >> 32)); | ||
229 | ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */ | ||
230 | |||
231 | mask = CFG_Q_MASK | (bit << 16); | ||
232 | value = bit | (q_id << CFG_Q_SHIFT); | ||
233 | ql_write32(qdev, CFG, (mask | value)); | ||
234 | |||
235 | /* | ||
236 | * Wait for the bit to clear after signaling hw. | ||
237 | */ | ||
238 | status = ql_wait_cfg(qdev, bit); | ||
239 | exit: | ||
240 | pci_unmap_single(qdev->pdev, map, size, direction); | ||
241 | return status; | ||
242 | } | ||
243 | |||
244 | /* Get a specific MAC address from the CAM. Used for debug and reg dump. */ | ||
245 | int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index, | ||
246 | u32 *value) | ||
247 | { | ||
248 | u32 offset = 0; | ||
249 | int status; | ||
250 | |||
251 | status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); | ||
252 | if (status) | ||
253 | return status; | ||
254 | switch (type) { | ||
255 | case MAC_ADDR_TYPE_MULTI_MAC: | ||
256 | case MAC_ADDR_TYPE_CAM_MAC: | ||
257 | { | ||
258 | status = | ||
259 | ql_wait_reg_rdy(qdev, | ||
260 | MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E); | ||
261 | if (status) | ||
262 | goto exit; | ||
263 | ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ | ||
264 | (index << MAC_ADDR_IDX_SHIFT) | /* index */ | ||
265 | MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */ | ||
266 | status = | ||
267 | ql_wait_reg_rdy(qdev, | ||
268 | MAC_ADDR_IDX, MAC_ADDR_MR, MAC_ADDR_E); | ||
269 | if (status) | ||
270 | goto exit; | ||
271 | *value++ = ql_read32(qdev, MAC_ADDR_DATA); | ||
272 | status = | ||
273 | ql_wait_reg_rdy(qdev, | ||
274 | MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E); | ||
275 | if (status) | ||
276 | goto exit; | ||
277 | ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ | ||
278 | (index << MAC_ADDR_IDX_SHIFT) | /* index */ | ||
279 | MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */ | ||
280 | status = | ||
281 | ql_wait_reg_rdy(qdev, | ||
282 | MAC_ADDR_IDX, MAC_ADDR_MR, MAC_ADDR_E); | ||
283 | if (status) | ||
284 | goto exit; | ||
285 | *value++ = ql_read32(qdev, MAC_ADDR_DATA); | ||
286 | if (type == MAC_ADDR_TYPE_CAM_MAC) { | ||
287 | status = | ||
288 | ql_wait_reg_rdy(qdev, | ||
289 | MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E); | ||
290 | if (status) | ||
291 | goto exit; | ||
292 | ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ | ||
293 | (index << MAC_ADDR_IDX_SHIFT) | /* index */ | ||
294 | MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */ | ||
295 | status = | ||
296 | ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, | ||
297 | MAC_ADDR_MR, MAC_ADDR_E); | ||
298 | if (status) | ||
299 | goto exit; | ||
300 | *value++ = ql_read32(qdev, MAC_ADDR_DATA); | ||
301 | } | ||
302 | break; | ||
303 | } | ||
304 | case MAC_ADDR_TYPE_VLAN: | ||
305 | case MAC_ADDR_TYPE_MULTI_FLTR: | ||
306 | default: | ||
307 | QPRINTK(qdev, IFUP, CRIT, | ||
308 | "Address type %d not yet supported.\n", type); | ||
309 | status = -EPERM; | ||
310 | } | ||
311 | exit: | ||
312 | ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); | ||
313 | return status; | ||
314 | } | ||
315 | |||
316 | /* Set up a MAC, multicast or VLAN address for the | ||
317 | * inbound frame matching. | ||
318 | */ | ||
319 | static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type, | ||
320 | u16 index) | ||
321 | { | ||
322 | u32 offset = 0; | ||
323 | int status = 0; | ||
324 | |||
325 | status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); | ||
326 | if (status) | ||
327 | return status; | ||
328 | switch (type) { | ||
329 | case MAC_ADDR_TYPE_MULTI_MAC: | ||
330 | case MAC_ADDR_TYPE_CAM_MAC: | ||
331 | { | ||
332 | u32 cam_output; | ||
333 | u32 upper = (addr[0] << 8) | addr[1]; | ||
334 | u32 lower = | ||
335 | (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | | ||
336 | (addr[5]); | ||
337 | |||
338 | QPRINTK(qdev, IFUP, INFO, | ||
339 | "Adding %s address %02x:%02x:%02x:%02x:%02x:%02x" | ||
340 | " at index %d in the CAM.\n", | ||
341 | ((type == | ||
342 | MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" : | ||
343 | "UNICAST"), addr[0], addr[1], addr[2], addr[3], | ||
344 | addr[4], addr[5], index); | ||
345 | |||
346 | status = | ||
347 | ql_wait_reg_rdy(qdev, | ||
348 | MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E); | ||
349 | if (status) | ||
350 | goto exit; | ||
351 | ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ | ||
352 | (index << MAC_ADDR_IDX_SHIFT) | /* index */ | ||
353 | type); /* type */ | ||
354 | ql_write32(qdev, MAC_ADDR_DATA, lower); | ||
355 | status = | ||
356 | ql_wait_reg_rdy(qdev, | ||
357 | MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E); | ||
358 | if (status) | ||
359 | goto exit; | ||
360 | ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ | ||
361 | (index << MAC_ADDR_IDX_SHIFT) | /* index */ | ||
362 | type); /* type */ | ||
363 | ql_write32(qdev, MAC_ADDR_DATA, upper); | ||
364 | status = | ||
365 | ql_wait_reg_rdy(qdev, | ||
366 | MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E); | ||
367 | if (status) | ||
368 | goto exit; | ||
369 | ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */ | ||
370 | (index << MAC_ADDR_IDX_SHIFT) | /* index */ | ||
371 | type); /* type */ | ||
372 | /* This field should also include the queue id | ||
373 | and possibly the function id. Right now we hardcode | ||
374 | the route field to NIC core. | ||
375 | */ | ||
376 | if (type == MAC_ADDR_TYPE_CAM_MAC) { | ||
377 | cam_output = (CAM_OUT_ROUTE_NIC | | ||
378 | (qdev-> | ||
379 | func << CAM_OUT_FUNC_SHIFT) | | ||
380 | (qdev-> | ||
381 | rss_ring_first_cq_id << | ||
382 | CAM_OUT_CQ_ID_SHIFT)); | ||
383 | if (qdev->vlgrp) | ||
384 | cam_output |= CAM_OUT_RV; | ||
385 | /* route to NIC core */ | ||
386 | ql_write32(qdev, MAC_ADDR_DATA, cam_output); | ||
387 | } | ||
388 | break; | ||
389 | } | ||
390 | case MAC_ADDR_TYPE_VLAN: | ||
391 | { | ||
392 | u32 enable_bit = *((u32 *) &addr[0]); | ||
393 | /* For VLAN, the addr actually holds a bit that | ||
394 | * either enables or disables the vlan id we are | ||
395 | * addressing. It's either MAC_ADDR_E on or off. | ||
396 | * That's bit-27 we're talking about. | ||
397 | */ | ||
398 | QPRINTK(qdev, IFUP, INFO, "%s VLAN ID %d %s the CAM.\n", | ||
399 | (enable_bit ? "Adding" : "Removing"), | ||
400 | index, (enable_bit ? "to" : "from")); | ||
401 | |||
402 | status = | ||
403 | ql_wait_reg_rdy(qdev, | ||
404 | MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E); | ||
405 | if (status) | ||
406 | goto exit; | ||
407 | ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */ | ||
408 | (index << MAC_ADDR_IDX_SHIFT) | /* index */ | ||
409 | type | /* type */ | ||
410 | enable_bit); /* enable/disable */ | ||
411 | break; | ||
412 | } | ||
413 | case MAC_ADDR_TYPE_MULTI_FLTR: | ||
414 | default: | ||
415 | QPRINTK(qdev, IFUP, CRIT, | ||
416 | "Address type %d not yet supported.\n", type); | ||
417 | status = -EPERM; | ||
418 | } | ||
419 | exit: | ||
420 | ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); | ||
421 | return status; | ||
422 | } | ||
423 | |||
424 | /* Get a specific frame routing value from the CAM. | ||
425 | * Used for debug and reg dump. | ||
426 | */ | ||
427 | int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value) | ||
428 | { | ||
429 | int status = 0; | ||
430 | |||
431 | status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); | ||
432 | if (status) | ||
433 | goto exit; | ||
434 | |||
435 | status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, RT_IDX_E); | ||
436 | if (status) | ||
437 | goto exit; | ||
438 | |||
439 | ql_write32(qdev, RT_IDX, | ||
440 | RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT)); | ||
441 | status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, RT_IDX_E); | ||
442 | if (status) | ||
443 | goto exit; | ||
444 | *value = ql_read32(qdev, RT_DATA); | ||
445 | exit: | ||
446 | ql_sem_unlock(qdev, SEM_RT_IDX_MASK); | ||
447 | return status; | ||
448 | } | ||
449 | |||
450 | /* The NIC function for this chip has 16 routing indexes. Each one can be used | ||
451 | * to route different frame types to various inbound queues. We send broadcast/ | ||
452 | * multicast/error frames to the default queue for slow handling, | ||
453 | * and CAM hit/RSS frames to the fast handling queues. | ||
454 | */ | ||
455 | static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask, | ||
456 | int enable) | ||
457 | { | ||
458 | int status; | ||
459 | u32 value = 0; | ||
460 | |||
461 | status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); | ||
462 | if (status) | ||
463 | return status; | ||
464 | |||
465 | QPRINTK(qdev, IFUP, DEBUG, | ||
466 | "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n", | ||
467 | (enable ? "Adding" : "Removing"), | ||
468 | ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""), | ||
469 | ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""), | ||
470 | ((index == | ||
471 | RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""), | ||
472 | ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""), | ||
473 | ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""), | ||
474 | ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""), | ||
475 | ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""), | ||
476 | ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""), | ||
477 | ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""), | ||
478 | ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""), | ||
479 | ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""), | ||
480 | ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""), | ||
481 | ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""), | ||
482 | ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""), | ||
483 | ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""), | ||
484 | ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""), | ||
485 | (enable ? "to" : "from")); | ||
486 | |||
487 | switch (mask) { | ||
488 | case RT_IDX_CAM_HIT: | ||
489 | { | ||
490 | value = RT_IDX_DST_CAM_Q | /* dest */ | ||
491 | RT_IDX_TYPE_NICQ | /* type */ | ||
492 | (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */ | ||
493 | break; | ||
494 | } | ||
495 | case RT_IDX_VALID: /* Promiscuous Mode frames. */ | ||
496 | { | ||
497 | value = RT_IDX_DST_DFLT_Q | /* dest */ | ||
498 | RT_IDX_TYPE_NICQ | /* type */ | ||
499 | (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */ | ||
500 | break; | ||
501 | } | ||
502 | case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */ | ||
503 | { | ||
504 | value = RT_IDX_DST_DFLT_Q | /* dest */ | ||
505 | RT_IDX_TYPE_NICQ | /* type */ | ||
506 | (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */ | ||
507 | break; | ||
508 | } | ||
509 | case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */ | ||
510 | { | ||
511 | value = RT_IDX_DST_DFLT_Q | /* dest */ | ||
512 | RT_IDX_TYPE_NICQ | /* type */ | ||
513 | (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */ | ||
514 | break; | ||
515 | } | ||
516 | case RT_IDX_MCAST: /* Pass up All Multicast frames. */ | ||
517 | { | ||
518 | value = RT_IDX_DST_CAM_Q | /* dest */ | ||
519 | RT_IDX_TYPE_NICQ | /* type */ | ||
520 | (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */ | ||
521 | break; | ||
522 | } | ||
523 | case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */ | ||
524 | { | ||
525 | value = RT_IDX_DST_CAM_Q | /* dest */ | ||
526 | RT_IDX_TYPE_NICQ | /* type */ | ||
527 | (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */ | ||
528 | break; | ||
529 | } | ||
530 | case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */ | ||
531 | { | ||
532 | value = RT_IDX_DST_RSS | /* dest */ | ||
533 | RT_IDX_TYPE_NICQ | /* type */ | ||
534 | (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */ | ||
535 | break; | ||
536 | } | ||
537 | case 0: /* Clear the E-bit on an entry. */ | ||
538 | { | ||
539 | value = RT_IDX_DST_DFLT_Q | /* dest */ | ||
540 | RT_IDX_TYPE_NICQ | /* type */ | ||
541 | (index << RT_IDX_IDX_SHIFT);/* index */ | ||
542 | break; | ||
543 | } | ||
544 | default: | ||
545 | QPRINTK(qdev, IFUP, ERR, "Mask type %d not yet supported.\n", | ||
546 | mask); | ||
547 | status = -EPERM; | ||
548 | goto exit; | ||
549 | } | ||
550 | |||
551 | if (value) { | ||
552 | status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0); | ||
553 | if (status) | ||
554 | goto exit; | ||
555 | value |= (enable ? RT_IDX_E : 0); | ||
556 | ql_write32(qdev, RT_IDX, value); | ||
557 | ql_write32(qdev, RT_DATA, enable ? mask : 0); | ||
558 | } | ||
559 | exit: | ||
560 | ql_sem_unlock(qdev, SEM_RT_IDX_MASK); | ||
561 | return status; | ||
562 | } | ||
563 | |||
564 | static void ql_enable_interrupts(struct ql_adapter *qdev) | ||
565 | { | ||
566 | ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI); | ||
567 | } | ||
568 | |||
569 | static void ql_disable_interrupts(struct ql_adapter *qdev) | ||
570 | { | ||
571 | ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16)); | ||
572 | } | ||
573 | |||
574 | /* If we're running with multiple MSI-X vectors then we enable on the fly. | ||
575 | * Otherwise, we may have multiple outstanding workers and don't want to | ||
576 | * enable until the last one finishes. In this case, the irq_cnt gets | ||
577 | * incremented everytime we queue a worker and decremented everytime | ||
578 | * a worker finishes. Once it hits zero we enable the interrupt. | ||
579 | */ | ||
580 | void ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr) | ||
581 | { | ||
582 | if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) | ||
583 | ql_write32(qdev, INTR_EN, | ||
584 | qdev->intr_context[intr].intr_en_mask); | ||
585 | else { | ||
586 | if (qdev->legacy_check) | ||
587 | spin_lock(&qdev->legacy_lock); | ||
588 | if (atomic_dec_and_test(&qdev->intr_context[intr].irq_cnt)) { | ||
589 | QPRINTK(qdev, INTR, ERR, "Enabling interrupt %d.\n", | ||
590 | intr); | ||
591 | ql_write32(qdev, INTR_EN, | ||
592 | qdev->intr_context[intr].intr_en_mask); | ||
593 | } else { | ||
594 | QPRINTK(qdev, INTR, ERR, | ||
595 | "Skip enable, other queue(s) are active.\n"); | ||
596 | } | ||
597 | if (qdev->legacy_check) | ||
598 | spin_unlock(&qdev->legacy_lock); | ||
599 | } | ||
600 | } | ||
601 | |||
602 | static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr) | ||
603 | { | ||
604 | u32 var = 0; | ||
605 | |||
606 | if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) | ||
607 | goto exit; | ||
608 | else if (!atomic_read(&qdev->intr_context[intr].irq_cnt)) { | ||
609 | ql_write32(qdev, INTR_EN, | ||
610 | qdev->intr_context[intr].intr_dis_mask); | ||
611 | var = ql_read32(qdev, STS); | ||
612 | } | ||
613 | atomic_inc(&qdev->intr_context[intr].irq_cnt); | ||
614 | exit: | ||
615 | return var; | ||
616 | } | ||
617 | |||
618 | static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev) | ||
619 | { | ||
620 | int i; | ||
621 | for (i = 0; i < qdev->intr_count; i++) { | ||
622 | /* The enable call does a atomic_dec_and_test | ||
623 | * and enables only if the result is zero. | ||
624 | * So we precharge it here. | ||
625 | */ | ||
626 | atomic_set(&qdev->intr_context[i].irq_cnt, 1); | ||
627 | ql_enable_completion_interrupt(qdev, i); | ||
628 | } | ||
629 | |||
630 | } | ||
631 | |||
632 | int ql_read_flash_word(struct ql_adapter *qdev, int offset, u32 *data) | ||
633 | { | ||
634 | int status = 0; | ||
635 | /* wait for reg to come ready */ | ||
636 | status = ql_wait_reg_rdy(qdev, | ||
637 | FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR); | ||
638 | if (status) | ||
639 | goto exit; | ||
640 | /* set up for reg read */ | ||
641 | ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset); | ||
642 | /* wait for reg to come ready */ | ||
643 | status = ql_wait_reg_rdy(qdev, | ||
644 | FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR); | ||
645 | if (status) | ||
646 | goto exit; | ||
647 | /* get the data */ | ||
648 | *data = ql_read32(qdev, FLASH_DATA); | ||
649 | exit: | ||
650 | return status; | ||
651 | } | ||
652 | |||
653 | static int ql_get_flash_params(struct ql_adapter *qdev) | ||
654 | { | ||
655 | int i; | ||
656 | int status; | ||
657 | u32 *p = (u32 *)&qdev->flash; | ||
658 | |||
659 | if (ql_sem_spinlock(qdev, SEM_FLASH_MASK)) | ||
660 | return -ETIMEDOUT; | ||
661 | |||
662 | for (i = 0; i < sizeof(qdev->flash) / sizeof(u32); i++, p++) { | ||
663 | status = ql_read_flash_word(qdev, i, p); | ||
664 | if (status) { | ||
665 | QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n"); | ||
666 | goto exit; | ||
667 | } | ||
668 | |||
669 | } | ||
670 | exit: | ||
671 | ql_sem_unlock(qdev, SEM_FLASH_MASK); | ||
672 | return status; | ||
673 | } | ||
674 | |||
675 | /* xgmac register are located behind the xgmac_addr and xgmac_data | ||
676 | * register pair. Each read/write requires us to wait for the ready | ||
677 | * bit before reading/writing the data. | ||
678 | */ | ||
679 | static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data) | ||
680 | { | ||
681 | int status; | ||
682 | /* wait for reg to come ready */ | ||
683 | status = ql_wait_reg_rdy(qdev, | ||
684 | XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME); | ||
685 | if (status) | ||
686 | return status; | ||
687 | /* write the data to the data reg */ | ||
688 | ql_write32(qdev, XGMAC_DATA, data); | ||
689 | /* trigger the write */ | ||
690 | ql_write32(qdev, XGMAC_ADDR, reg); | ||
691 | return status; | ||
692 | } | ||
693 | |||
694 | /* xgmac register are located behind the xgmac_addr and xgmac_data | ||
695 | * register pair. Each read/write requires us to wait for the ready | ||
696 | * bit before reading/writing the data. | ||
697 | */ | ||
698 | int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data) | ||
699 | { | ||
700 | int status = 0; | ||
701 | /* wait for reg to come ready */ | ||
702 | status = ql_wait_reg_rdy(qdev, | ||
703 | XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME); | ||
704 | if (status) | ||
705 | goto exit; | ||
706 | /* set up for reg read */ | ||
707 | ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R); | ||
708 | /* wait for reg to come ready */ | ||
709 | status = ql_wait_reg_rdy(qdev, | ||
710 | XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME); | ||
711 | if (status) | ||
712 | goto exit; | ||
713 | /* get the data */ | ||
714 | *data = ql_read32(qdev, XGMAC_DATA); | ||
715 | exit: | ||
716 | return status; | ||
717 | } | ||
718 | |||
719 | /* This is used for reading the 64-bit statistics regs. */ | ||
720 | int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data) | ||
721 | { | ||
722 | int status = 0; | ||
723 | u32 hi = 0; | ||
724 | u32 lo = 0; | ||
725 | |||
726 | status = ql_read_xgmac_reg(qdev, reg, &lo); | ||
727 | if (status) | ||
728 | goto exit; | ||
729 | |||
730 | status = ql_read_xgmac_reg(qdev, reg + 4, &hi); | ||
731 | if (status) | ||
732 | goto exit; | ||
733 | |||
734 | *data = (u64) lo | ((u64) hi << 32); | ||
735 | |||
736 | exit: | ||
737 | return status; | ||
738 | } | ||
739 | |||
740 | /* Take the MAC Core out of reset. | ||
741 | * Enable statistics counting. | ||
742 | * Take the transmitter/receiver out of reset. | ||
743 | * This functionality may be done in the MPI firmware at a | ||
744 | * later date. | ||
745 | */ | ||
746 | static int ql_port_initialize(struct ql_adapter *qdev) | ||
747 | { | ||
748 | int status = 0; | ||
749 | u32 data; | ||
750 | |||
751 | if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) { | ||
752 | /* Another function has the semaphore, so | ||
753 | * wait for the port init bit to come ready. | ||
754 | */ | ||
755 | QPRINTK(qdev, LINK, INFO, | ||
756 | "Another function has the semaphore, so wait for the port init bit to come ready.\n"); | ||
757 | status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0); | ||
758 | if (status) { | ||
759 | QPRINTK(qdev, LINK, CRIT, | ||
760 | "Port initialize timed out.\n"); | ||
761 | } | ||
762 | return status; | ||
763 | } | ||
764 | |||
765 | QPRINTK(qdev, LINK, INFO, "Got xgmac semaphore!.\n"); | ||
766 | /* Set the core reset. */ | ||
767 | status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data); | ||
768 | if (status) | ||
769 | goto end; | ||
770 | data |= GLOBAL_CFG_RESET; | ||
771 | status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data); | ||
772 | if (status) | ||
773 | goto end; | ||
774 | |||
775 | /* Clear the core reset and turn on jumbo for receiver. */ | ||
776 | data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */ | ||
777 | data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */ | ||
778 | data |= GLOBAL_CFG_TX_STAT_EN; | ||
779 | data |= GLOBAL_CFG_RX_STAT_EN; | ||
780 | status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data); | ||
781 | if (status) | ||
782 | goto end; | ||
783 | |||
784 | /* Enable transmitter, and clear it's reset. */ | ||
785 | status = ql_read_xgmac_reg(qdev, TX_CFG, &data); | ||
786 | if (status) | ||
787 | goto end; | ||
788 | data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */ | ||
789 | data |= TX_CFG_EN; /* Enable the transmitter. */ | ||
790 | status = ql_write_xgmac_reg(qdev, TX_CFG, data); | ||
791 | if (status) | ||
792 | goto end; | ||
793 | |||
794 | /* Enable receiver and clear it's reset. */ | ||
795 | status = ql_read_xgmac_reg(qdev, RX_CFG, &data); | ||
796 | if (status) | ||
797 | goto end; | ||
798 | data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */ | ||
799 | data |= RX_CFG_EN; /* Enable the receiver. */ | ||
800 | status = ql_write_xgmac_reg(qdev, RX_CFG, data); | ||
801 | if (status) | ||
802 | goto end; | ||
803 | |||
804 | /* Turn on jumbo. */ | ||
805 | status = | ||
806 | ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16)); | ||
807 | if (status) | ||
808 | goto end; | ||
809 | status = | ||
810 | ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580); | ||
811 | if (status) | ||
812 | goto end; | ||
813 | |||
814 | /* Signal to the world that the port is enabled. */ | ||
815 | ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init)); | ||
816 | end: | ||
817 | ql_sem_unlock(qdev, qdev->xg_sem_mask); | ||
818 | return status; | ||
819 | } | ||
820 | |||
821 | /* Get the next large buffer. */ | ||
822 | struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring) | ||
823 | { | ||
824 | struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx]; | ||
825 | rx_ring->lbq_curr_idx++; | ||
826 | if (rx_ring->lbq_curr_idx == rx_ring->lbq_len) | ||
827 | rx_ring->lbq_curr_idx = 0; | ||
828 | rx_ring->lbq_free_cnt++; | ||
829 | return lbq_desc; | ||
830 | } | ||
831 | |||
832 | /* Get the next small buffer. */ | ||
833 | struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring) | ||
834 | { | ||
835 | struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx]; | ||
836 | rx_ring->sbq_curr_idx++; | ||
837 | if (rx_ring->sbq_curr_idx == rx_ring->sbq_len) | ||
838 | rx_ring->sbq_curr_idx = 0; | ||
839 | rx_ring->sbq_free_cnt++; | ||
840 | return sbq_desc; | ||
841 | } | ||
842 | |||
843 | /* Update an rx ring index. */ | ||
844 | static void ql_update_cq(struct rx_ring *rx_ring) | ||
845 | { | ||
846 | rx_ring->cnsmr_idx++; | ||
847 | rx_ring->curr_entry++; | ||
848 | if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) { | ||
849 | rx_ring->cnsmr_idx = 0; | ||
850 | rx_ring->curr_entry = rx_ring->cq_base; | ||
851 | } | ||
852 | } | ||
853 | |||
854 | static void ql_write_cq_idx(struct rx_ring *rx_ring) | ||
855 | { | ||
856 | ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg); | ||
857 | } | ||
858 | |||
859 | /* Process (refill) a large buffer queue. */ | ||
860 | static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) | ||
861 | { | ||
862 | int clean_idx = rx_ring->lbq_clean_idx; | ||
863 | struct bq_desc *lbq_desc; | ||
864 | struct bq_element *bq; | ||
865 | u64 map; | ||
866 | int i; | ||
867 | |||
868 | while (rx_ring->lbq_free_cnt > 16) { | ||
869 | for (i = 0; i < 16; i++) { | ||
870 | QPRINTK(qdev, RX_STATUS, DEBUG, | ||
871 | "lbq: try cleaning clean_idx = %d.\n", | ||
872 | clean_idx); | ||
873 | lbq_desc = &rx_ring->lbq[clean_idx]; | ||
874 | bq = lbq_desc->bq; | ||
875 | if (lbq_desc->p.lbq_page == NULL) { | ||
876 | QPRINTK(qdev, RX_STATUS, DEBUG, | ||
877 | "lbq: getting new page for index %d.\n", | ||
878 | lbq_desc->index); | ||
879 | lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC); | ||
880 | if (lbq_desc->p.lbq_page == NULL) { | ||
881 | QPRINTK(qdev, RX_STATUS, ERR, | ||
882 | "Couldn't get a page.\n"); | ||
883 | return; | ||
884 | } | ||
885 | map = pci_map_page(qdev->pdev, | ||
886 | lbq_desc->p.lbq_page, | ||
887 | 0, PAGE_SIZE, | ||
888 | PCI_DMA_FROMDEVICE); | ||
889 | if (pci_dma_mapping_error(qdev->pdev, map)) { | ||
890 | QPRINTK(qdev, RX_STATUS, ERR, | ||
891 | "PCI mapping failed.\n"); | ||
892 | return; | ||
893 | } | ||
894 | pci_unmap_addr_set(lbq_desc, mapaddr, map); | ||
895 | pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE); | ||
896 | bq->addr_lo = /*lbq_desc->addr_lo = */ | ||
897 | cpu_to_le32(map); | ||
898 | bq->addr_hi = /*lbq_desc->addr_hi = */ | ||
899 | cpu_to_le32(map >> 32); | ||
900 | } | ||
901 | clean_idx++; | ||
902 | if (clean_idx == rx_ring->lbq_len) | ||
903 | clean_idx = 0; | ||
904 | } | ||
905 | |||
906 | rx_ring->lbq_clean_idx = clean_idx; | ||
907 | rx_ring->lbq_prod_idx += 16; | ||
908 | if (rx_ring->lbq_prod_idx == rx_ring->lbq_len) | ||
909 | rx_ring->lbq_prod_idx = 0; | ||
910 | QPRINTK(qdev, RX_STATUS, DEBUG, | ||
911 | "lbq: updating prod idx = %d.\n", | ||
912 | rx_ring->lbq_prod_idx); | ||
913 | ql_write_db_reg(rx_ring->lbq_prod_idx, | ||
914 | rx_ring->lbq_prod_idx_db_reg); | ||
915 | rx_ring->lbq_free_cnt -= 16; | ||
916 | } | ||
917 | } | ||
918 | |||
919 | /* Process (refill) a small buffer queue. */ | ||
920 | static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) | ||
921 | { | ||
922 | int clean_idx = rx_ring->sbq_clean_idx; | ||
923 | struct bq_desc *sbq_desc; | ||
924 | struct bq_element *bq; | ||
925 | u64 map; | ||
926 | int i; | ||
927 | |||
928 | while (rx_ring->sbq_free_cnt > 16) { | ||
929 | for (i = 0; i < 16; i++) { | ||
930 | sbq_desc = &rx_ring->sbq[clean_idx]; | ||
931 | QPRINTK(qdev, RX_STATUS, DEBUG, | ||
932 | "sbq: try cleaning clean_idx = %d.\n", | ||
933 | clean_idx); | ||
934 | bq = sbq_desc->bq; | ||
935 | if (sbq_desc->p.skb == NULL) { | ||
936 | QPRINTK(qdev, RX_STATUS, DEBUG, | ||
937 | "sbq: getting new skb for index %d.\n", | ||
938 | sbq_desc->index); | ||
939 | sbq_desc->p.skb = | ||
940 | netdev_alloc_skb(qdev->ndev, | ||
941 | rx_ring->sbq_buf_size); | ||
942 | if (sbq_desc->p.skb == NULL) { | ||
943 | QPRINTK(qdev, PROBE, ERR, | ||
944 | "Couldn't get an skb.\n"); | ||
945 | rx_ring->sbq_clean_idx = clean_idx; | ||
946 | return; | ||
947 | } | ||
948 | skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD); | ||
949 | map = pci_map_single(qdev->pdev, | ||
950 | sbq_desc->p.skb->data, | ||
951 | rx_ring->sbq_buf_size / | ||
952 | 2, PCI_DMA_FROMDEVICE); | ||
953 | pci_unmap_addr_set(sbq_desc, mapaddr, map); | ||
954 | pci_unmap_len_set(sbq_desc, maplen, | ||
955 | rx_ring->sbq_buf_size / 2); | ||
956 | bq->addr_lo = cpu_to_le32(map); | ||
957 | bq->addr_hi = cpu_to_le32(map >> 32); | ||
958 | } | ||
959 | |||
960 | clean_idx++; | ||
961 | if (clean_idx == rx_ring->sbq_len) | ||
962 | clean_idx = 0; | ||
963 | } | ||
964 | rx_ring->sbq_clean_idx = clean_idx; | ||
965 | rx_ring->sbq_prod_idx += 16; | ||
966 | if (rx_ring->sbq_prod_idx == rx_ring->sbq_len) | ||
967 | rx_ring->sbq_prod_idx = 0; | ||
968 | QPRINTK(qdev, RX_STATUS, DEBUG, | ||
969 | "sbq: updating prod idx = %d.\n", | ||
970 | rx_ring->sbq_prod_idx); | ||
971 | ql_write_db_reg(rx_ring->sbq_prod_idx, | ||
972 | rx_ring->sbq_prod_idx_db_reg); | ||
973 | |||
974 | rx_ring->sbq_free_cnt -= 16; | ||
975 | } | ||
976 | } | ||
977 | |||
978 | static void ql_update_buffer_queues(struct ql_adapter *qdev, | ||
979 | struct rx_ring *rx_ring) | ||
980 | { | ||
981 | ql_update_sbq(qdev, rx_ring); | ||
982 | ql_update_lbq(qdev, rx_ring); | ||
983 | } | ||
984 | |||
985 | /* Unmaps tx buffers. Can be called from send() if a pci mapping | ||
986 | * fails at some stage, or from the interrupt when a tx completes. | ||
987 | */ | ||
988 | static void ql_unmap_send(struct ql_adapter *qdev, | ||
989 | struct tx_ring_desc *tx_ring_desc, int mapped) | ||
990 | { | ||
991 | int i; | ||
992 | for (i = 0; i < mapped; i++) { | ||
993 | if (i == 0 || (i == 7 && mapped > 7)) { | ||
994 | /* | ||
995 | * Unmap the skb->data area, or the | ||
996 | * external sglist (AKA the Outbound | ||
997 | * Address List (OAL)). | ||
998 | * If its the zeroeth element, then it's | ||
999 | * the skb->data area. If it's the 7th | ||
1000 | * element and there is more than 6 frags, | ||
1001 | * then its an OAL. | ||
1002 | */ | ||
1003 | if (i == 7) { | ||
1004 | QPRINTK(qdev, TX_DONE, DEBUG, | ||
1005 | "unmapping OAL area.\n"); | ||
1006 | } | ||
1007 | pci_unmap_single(qdev->pdev, | ||
1008 | pci_unmap_addr(&tx_ring_desc->map[i], | ||
1009 | mapaddr), | ||
1010 | pci_unmap_len(&tx_ring_desc->map[i], | ||
1011 | maplen), | ||
1012 | PCI_DMA_TODEVICE); | ||
1013 | } else { | ||
1014 | QPRINTK(qdev, TX_DONE, DEBUG, "unmapping frag %d.\n", | ||
1015 | i); | ||
1016 | pci_unmap_page(qdev->pdev, | ||
1017 | pci_unmap_addr(&tx_ring_desc->map[i], | ||
1018 | mapaddr), | ||
1019 | pci_unmap_len(&tx_ring_desc->map[i], | ||
1020 | maplen), PCI_DMA_TODEVICE); | ||
1021 | } | ||
1022 | } | ||
1023 | |||
1024 | } | ||
1025 | |||
1026 | /* Map the buffers for this transmit. This will return | ||
1027 | * NETDEV_TX_BUSY or NETDEV_TX_OK based on success. | ||
1028 | */ | ||
1029 | static int ql_map_send(struct ql_adapter *qdev, | ||
1030 | struct ob_mac_iocb_req *mac_iocb_ptr, | ||
1031 | struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc) | ||
1032 | { | ||
1033 | int len = skb_headlen(skb); | ||
1034 | dma_addr_t map; | ||
1035 | int frag_idx, err, map_idx = 0; | ||
1036 | struct tx_buf_desc *tbd = mac_iocb_ptr->tbd; | ||
1037 | int frag_cnt = skb_shinfo(skb)->nr_frags; | ||
1038 | |||
1039 | if (frag_cnt) { | ||
1040 | QPRINTK(qdev, TX_QUEUED, DEBUG, "frag_cnt = %d.\n", frag_cnt); | ||
1041 | } | ||
1042 | /* | ||
1043 | * Map the skb buffer first. | ||
1044 | */ | ||
1045 | map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); | ||
1046 | |||
1047 | err = pci_dma_mapping_error(qdev->pdev, map); | ||
1048 | if (err) { | ||
1049 | QPRINTK(qdev, TX_QUEUED, ERR, | ||
1050 | "PCI mapping failed with error: %d\n", err); | ||
1051 | |||
1052 | return NETDEV_TX_BUSY; | ||
1053 | } | ||
1054 | |||
1055 | tbd->len = cpu_to_le32(len); | ||
1056 | tbd->addr = cpu_to_le64(map); | ||
1057 | pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); | ||
1058 | pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len); | ||
1059 | map_idx++; | ||
1060 | |||
1061 | /* | ||
1062 | * This loop fills the remainder of the 8 address descriptors | ||
1063 | * in the IOCB. If there are more than 7 fragments, then the | ||
1064 | * eighth address desc will point to an external list (OAL). | ||
1065 | * When this happens, the remainder of the frags will be stored | ||
1066 | * in this list. | ||
1067 | */ | ||
1068 | for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) { | ||
1069 | skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx]; | ||
1070 | tbd++; | ||
1071 | if (frag_idx == 6 && frag_cnt > 7) { | ||
1072 | /* Let's tack on an sglist. | ||
1073 | * Our control block will now | ||
1074 | * look like this: | ||
1075 | * iocb->seg[0] = skb->data | ||
1076 | * iocb->seg[1] = frag[0] | ||
1077 | * iocb->seg[2] = frag[1] | ||
1078 | * iocb->seg[3] = frag[2] | ||
1079 | * iocb->seg[4] = frag[3] | ||
1080 | * iocb->seg[5] = frag[4] | ||
1081 | * iocb->seg[6] = frag[5] | ||
1082 | * iocb->seg[7] = ptr to OAL (external sglist) | ||
1083 | * oal->seg[0] = frag[6] | ||
1084 | * oal->seg[1] = frag[7] | ||
1085 | * oal->seg[2] = frag[8] | ||
1086 | * oal->seg[3] = frag[9] | ||
1087 | * oal->seg[4] = frag[10] | ||
1088 | * etc... | ||
1089 | */ | ||
1090 | /* Tack on the OAL in the eighth segment of IOCB. */ | ||
1091 | map = pci_map_single(qdev->pdev, &tx_ring_desc->oal, | ||
1092 | sizeof(struct oal), | ||
1093 | PCI_DMA_TODEVICE); | ||
1094 | err = pci_dma_mapping_error(qdev->pdev, map); | ||
1095 | if (err) { | ||
1096 | QPRINTK(qdev, TX_QUEUED, ERR, | ||
1097 | "PCI mapping outbound address list with error: %d\n", | ||
1098 | err); | ||
1099 | goto map_error; | ||
1100 | } | ||
1101 | |||
1102 | tbd->addr = cpu_to_le64(map); | ||
1103 | /* | ||
1104 | * The length is the number of fragments | ||
1105 | * that remain to be mapped times the length | ||
1106 | * of our sglist (OAL). | ||
1107 | */ | ||
1108 | tbd->len = | ||
1109 | cpu_to_le32((sizeof(struct tx_buf_desc) * | ||
1110 | (frag_cnt - frag_idx)) | TX_DESC_C); | ||
1111 | pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, | ||
1112 | map); | ||
1113 | pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, | ||
1114 | sizeof(struct oal)); | ||
1115 | tbd = (struct tx_buf_desc *)&tx_ring_desc->oal; | ||
1116 | map_idx++; | ||
1117 | } | ||
1118 | |||
1119 | map = | ||
1120 | pci_map_page(qdev->pdev, frag->page, | ||
1121 | frag->page_offset, frag->size, | ||
1122 | PCI_DMA_TODEVICE); | ||
1123 | |||
1124 | err = pci_dma_mapping_error(qdev->pdev, map); | ||
1125 | if (err) { | ||
1126 | QPRINTK(qdev, TX_QUEUED, ERR, | ||
1127 | "PCI mapping frags failed with error: %d.\n", | ||
1128 | err); | ||
1129 | goto map_error; | ||
1130 | } | ||
1131 | |||
1132 | tbd->addr = cpu_to_le64(map); | ||
1133 | tbd->len = cpu_to_le32(frag->size); | ||
1134 | pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); | ||
1135 | pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, | ||
1136 | frag->size); | ||
1137 | |||
1138 | } | ||
1139 | /* Save the number of segments we've mapped. */ | ||
1140 | tx_ring_desc->map_cnt = map_idx; | ||
1141 | /* Terminate the last segment. */ | ||
1142 | tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E); | ||
1143 | return NETDEV_TX_OK; | ||
1144 | |||
1145 | map_error: | ||
1146 | /* | ||
1147 | * If the first frag mapping failed, then i will be zero. | ||
1148 | * This causes the unmap of the skb->data area. Otherwise | ||
1149 | * we pass in the number of frags that mapped successfully | ||
1150 | * so they can be umapped. | ||
1151 | */ | ||
1152 | ql_unmap_send(qdev, tx_ring_desc, map_idx); | ||
1153 | return NETDEV_TX_BUSY; | ||
1154 | } | ||
1155 | |||
1156 | void ql_realign_skb(struct sk_buff *skb, int len) | ||
1157 | { | ||
1158 | void *temp_addr = skb->data; | ||
1159 | |||
1160 | /* Undo the skb_reserve(skb,32) we did before | ||
1161 | * giving to hardware, and realign data on | ||
1162 | * a 2-byte boundary. | ||
1163 | */ | ||
1164 | skb->data -= QLGE_SB_PAD - NET_IP_ALIGN; | ||
1165 | skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN; | ||
1166 | skb_copy_to_linear_data(skb, temp_addr, | ||
1167 | (unsigned int)len); | ||
1168 | } | ||
1169 | |||
1170 | /* | ||
1171 | * This function builds an skb for the given inbound | ||
1172 | * completion. It will be rewritten for readability in the near | ||
1173 | * future, but for not it works well. | ||
1174 | */ | ||
1175 | static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, | ||
1176 | struct rx_ring *rx_ring, | ||
1177 | struct ib_mac_iocb_rsp *ib_mac_rsp) | ||
1178 | { | ||
1179 | struct bq_desc *lbq_desc; | ||
1180 | struct bq_desc *sbq_desc; | ||
1181 | struct sk_buff *skb = NULL; | ||
1182 | u32 length = le32_to_cpu(ib_mac_rsp->data_len); | ||
1183 | u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len); | ||
1184 | |||
1185 | /* | ||
1186 | * Handle the header buffer if present. | ||
1187 | */ | ||
1188 | if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV && | ||
1189 | ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) { | ||
1190 | QPRINTK(qdev, RX_STATUS, DEBUG, "Header of %d bytes in small buffer.\n", hdr_len); | ||
1191 | /* | ||
1192 | * Headers fit nicely into a small buffer. | ||
1193 | */ | ||
1194 | sbq_desc = ql_get_curr_sbuf(rx_ring); | ||
1195 | pci_unmap_single(qdev->pdev, | ||
1196 | pci_unmap_addr(sbq_desc, mapaddr), | ||
1197 | pci_unmap_len(sbq_desc, maplen), | ||
1198 | PCI_DMA_FROMDEVICE); | ||
1199 | skb = sbq_desc->p.skb; | ||
1200 | ql_realign_skb(skb, hdr_len); | ||
1201 | skb_put(skb, hdr_len); | ||
1202 | sbq_desc->p.skb = NULL; | ||
1203 | } | ||
1204 | |||
1205 | /* | ||
1206 | * Handle the data buffer(s). | ||
1207 | */ | ||
1208 | if (unlikely(!length)) { /* Is there data too? */ | ||
1209 | QPRINTK(qdev, RX_STATUS, DEBUG, | ||
1210 | "No Data buffer in this packet.\n"); | ||
1211 | return skb; | ||
1212 | } | ||
1213 | |||
1214 | if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) { | ||
1215 | if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) { | ||
1216 | QPRINTK(qdev, RX_STATUS, DEBUG, | ||
1217 | "Headers in small, data of %d bytes in small, combine them.\n", length); | ||
1218 | /* | ||
1219 | * Data is less than small buffer size so it's | ||
1220 | * stuffed in a small buffer. | ||
1221 | * For this case we append the data | ||
1222 | * from the "data" small buffer to the "header" small | ||
1223 | * buffer. | ||
1224 | */ | ||
1225 | sbq_desc = ql_get_curr_sbuf(rx_ring); | ||
1226 | pci_dma_sync_single_for_cpu(qdev->pdev, | ||
1227 | pci_unmap_addr | ||
1228 | (sbq_desc, mapaddr), | ||
1229 | pci_unmap_len | ||
1230 | (sbq_desc, maplen), | ||
1231 | PCI_DMA_FROMDEVICE); | ||
1232 | memcpy(skb_put(skb, length), | ||
1233 | sbq_desc->p.skb->data, length); | ||
1234 | pci_dma_sync_single_for_device(qdev->pdev, | ||
1235 | pci_unmap_addr | ||
1236 | (sbq_desc, | ||
1237 | mapaddr), | ||
1238 | pci_unmap_len | ||
1239 | (sbq_desc, | ||
1240 | maplen), | ||
1241 | PCI_DMA_FROMDEVICE); | ||
1242 | } else { | ||
1243 | QPRINTK(qdev, RX_STATUS, DEBUG, | ||
1244 | "%d bytes in a single small buffer.\n", length); | ||
1245 | sbq_desc = ql_get_curr_sbuf(rx_ring); | ||
1246 | skb = sbq_desc->p.skb; | ||
1247 | ql_realign_skb(skb, length); | ||
1248 | skb_put(skb, length); | ||
1249 | pci_unmap_single(qdev->pdev, | ||
1250 | pci_unmap_addr(sbq_desc, | ||
1251 | mapaddr), | ||
1252 | pci_unmap_len(sbq_desc, | ||
1253 | maplen), | ||
1254 | PCI_DMA_FROMDEVICE); | ||
1255 | sbq_desc->p.skb = NULL; | ||
1256 | } | ||
1257 | } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) { | ||
1258 | if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) { | ||
1259 | QPRINTK(qdev, RX_STATUS, DEBUG, | ||
1260 | "Header in small, %d bytes in large. Chain large to small!\n", length); | ||
1261 | /* | ||
1262 | * The data is in a single large buffer. We | ||
1263 | * chain it to the header buffer's skb and let | ||
1264 | * it rip. | ||
1265 | */ | ||
1266 | lbq_desc = ql_get_curr_lbuf(rx_ring); | ||
1267 | pci_unmap_page(qdev->pdev, | ||
1268 | pci_unmap_addr(lbq_desc, | ||
1269 | mapaddr), | ||
1270 | pci_unmap_len(lbq_desc, maplen), | ||
1271 | PCI_DMA_FROMDEVICE); | ||
1272 | QPRINTK(qdev, RX_STATUS, DEBUG, | ||
1273 | "Chaining page to skb.\n"); | ||
1274 | skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page, | ||
1275 | 0, length); | ||
1276 | skb->len += length; | ||
1277 | skb->data_len += length; | ||
1278 | skb->truesize += length; | ||
1279 | lbq_desc->p.lbq_page = NULL; | ||
1280 | } else { | ||
1281 | /* | ||
1282 | * The headers and data are in a single large buffer. We | ||
1283 | * copy it to a new skb and let it go. This can happen with | ||
1284 | * jumbo mtu on a non-TCP/UDP frame. | ||
1285 | */ | ||
1286 | lbq_desc = ql_get_curr_lbuf(rx_ring); | ||
1287 | skb = netdev_alloc_skb(qdev->ndev, length); | ||
1288 | if (skb == NULL) { | ||
1289 | QPRINTK(qdev, PROBE, DEBUG, | ||
1290 | "No skb available, drop the packet.\n"); | ||
1291 | return NULL; | ||
1292 | } | ||
1293 | skb_reserve(skb, NET_IP_ALIGN); | ||
1294 | QPRINTK(qdev, RX_STATUS, DEBUG, | ||
1295 | "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length); | ||
1296 | skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page, | ||
1297 | 0, length); | ||
1298 | skb->len += length; | ||
1299 | skb->data_len += length; | ||
1300 | skb->truesize += length; | ||
1301 | length -= length; | ||
1302 | lbq_desc->p.lbq_page = NULL; | ||
1303 | __pskb_pull_tail(skb, | ||
1304 | (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? | ||
1305 | VLAN_ETH_HLEN : ETH_HLEN); | ||
1306 | } | ||
1307 | } else { | ||
1308 | /* | ||
1309 | * The data is in a chain of large buffers | ||
1310 | * pointed to by a small buffer. We loop | ||
1311 | * thru and chain them to the our small header | ||
1312 | * buffer's skb. | ||
1313 | * frags: There are 18 max frags and our small | ||
1314 | * buffer will hold 32 of them. The thing is, | ||
1315 | * we'll use 3 max for our 9000 byte jumbo | ||
1316 | * frames. If the MTU goes up we could | ||
1317 | * eventually be in trouble. | ||
1318 | */ | ||
1319 | int size, offset, i = 0; | ||
1320 | struct bq_element *bq, bq_array[8]; | ||
1321 | sbq_desc = ql_get_curr_sbuf(rx_ring); | ||
1322 | pci_unmap_single(qdev->pdev, | ||
1323 | pci_unmap_addr(sbq_desc, mapaddr), | ||
1324 | pci_unmap_len(sbq_desc, maplen), | ||
1325 | PCI_DMA_FROMDEVICE); | ||
1326 | if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) { | ||
1327 | /* | ||
1328 | * This is an non TCP/UDP IP frame, so | ||
1329 | * the headers aren't split into a small | ||
1330 | * buffer. We have to use the small buffer | ||
1331 | * that contains our sg list as our skb to | ||
1332 | * send upstairs. Copy the sg list here to | ||
1333 | * a local buffer and use it to find the | ||
1334 | * pages to chain. | ||
1335 | */ | ||
1336 | QPRINTK(qdev, RX_STATUS, DEBUG, | ||
1337 | "%d bytes of headers & data in chain of large.\n", length); | ||
1338 | skb = sbq_desc->p.skb; | ||
1339 | bq = &bq_array[0]; | ||
1340 | memcpy(bq, skb->data, sizeof(bq_array)); | ||
1341 | sbq_desc->p.skb = NULL; | ||
1342 | skb_reserve(skb, NET_IP_ALIGN); | ||
1343 | } else { | ||
1344 | QPRINTK(qdev, RX_STATUS, DEBUG, | ||
1345 | "Headers in small, %d bytes of data in chain of large.\n", length); | ||
1346 | bq = (struct bq_element *)sbq_desc->p.skb->data; | ||
1347 | } | ||
1348 | while (length > 0) { | ||
1349 | lbq_desc = ql_get_curr_lbuf(rx_ring); | ||
1350 | if ((bq->addr_lo & ~BQ_MASK) != lbq_desc->bq->addr_lo) { | ||
1351 | QPRINTK(qdev, RX_STATUS, ERR, | ||
1352 | "Panic!!! bad large buffer address, expected 0x%.08x, got 0x%.08x.\n", | ||
1353 | lbq_desc->bq->addr_lo, bq->addr_lo); | ||
1354 | return NULL; | ||
1355 | } | ||
1356 | pci_unmap_page(qdev->pdev, | ||
1357 | pci_unmap_addr(lbq_desc, | ||
1358 | mapaddr), | ||
1359 | pci_unmap_len(lbq_desc, | ||
1360 | maplen), | ||
1361 | PCI_DMA_FROMDEVICE); | ||
1362 | size = (length < PAGE_SIZE) ? length : PAGE_SIZE; | ||
1363 | offset = 0; | ||
1364 | |||
1365 | QPRINTK(qdev, RX_STATUS, DEBUG, | ||
1366 | "Adding page %d to skb for %d bytes.\n", | ||
1367 | i, size); | ||
1368 | skb_fill_page_desc(skb, i, lbq_desc->p.lbq_page, | ||
1369 | offset, size); | ||
1370 | skb->len += size; | ||
1371 | skb->data_len += size; | ||
1372 | skb->truesize += size; | ||
1373 | length -= size; | ||
1374 | lbq_desc->p.lbq_page = NULL; | ||
1375 | bq++; | ||
1376 | i++; | ||
1377 | } | ||
1378 | __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? | ||
1379 | VLAN_ETH_HLEN : ETH_HLEN); | ||
1380 | } | ||
1381 | return skb; | ||
1382 | } | ||
1383 | |||
1384 | /* Process an inbound completion from an rx ring. */ | ||
1385 | static void ql_process_mac_rx_intr(struct ql_adapter *qdev, | ||
1386 | struct rx_ring *rx_ring, | ||
1387 | struct ib_mac_iocb_rsp *ib_mac_rsp) | ||
1388 | { | ||
1389 | struct net_device *ndev = qdev->ndev; | ||
1390 | struct sk_buff *skb = NULL; | ||
1391 | |||
1392 | QL_DUMP_IB_MAC_RSP(ib_mac_rsp); | ||
1393 | |||
1394 | skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp); | ||
1395 | if (unlikely(!skb)) { | ||
1396 | QPRINTK(qdev, RX_STATUS, DEBUG, | ||
1397 | "No skb available, drop packet.\n"); | ||
1398 | return; | ||
1399 | } | ||
1400 | |||
1401 | prefetch(skb->data); | ||
1402 | skb->dev = ndev; | ||
1403 | if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) { | ||
1404 | QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n", | ||
1405 | (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == | ||
1406 | IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "", | ||
1407 | (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == | ||
1408 | IB_MAC_IOCB_RSP_M_REG ? "Registered" : "", | ||
1409 | (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == | ||
1410 | IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); | ||
1411 | } | ||
1412 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) { | ||
1413 | QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n"); | ||
1414 | } | ||
1415 | if (ib_mac_rsp->flags1 & (IB_MAC_IOCB_RSP_IE | IB_MAC_IOCB_RSP_TE)) { | ||
1416 | QPRINTK(qdev, RX_STATUS, ERR, | ||
1417 | "Bad checksum for this %s packet.\n", | ||
1418 | ((ib_mac_rsp-> | ||
1419 | flags2 & IB_MAC_IOCB_RSP_T) ? "TCP" : "UDP")); | ||
1420 | skb->ip_summed = CHECKSUM_NONE; | ||
1421 | } else if (qdev->rx_csum && | ||
1422 | ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) || | ||
1423 | ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && | ||
1424 | !(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU)))) { | ||
1425 | QPRINTK(qdev, RX_STATUS, DEBUG, "RX checksum done!\n"); | ||
1426 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
1427 | } | ||
1428 | qdev->stats.rx_packets++; | ||
1429 | qdev->stats.rx_bytes += skb->len; | ||
1430 | skb->protocol = eth_type_trans(skb, ndev); | ||
1431 | if (qdev->vlgrp && (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)) { | ||
1432 | QPRINTK(qdev, RX_STATUS, DEBUG, | ||
1433 | "Passing a VLAN packet upstream.\n"); | ||
1434 | vlan_hwaccel_rx(skb, qdev->vlgrp, | ||
1435 | le16_to_cpu(ib_mac_rsp->vlan_id)); | ||
1436 | } else { | ||
1437 | QPRINTK(qdev, RX_STATUS, DEBUG, | ||
1438 | "Passing a normal packet upstream.\n"); | ||
1439 | netif_rx(skb); | ||
1440 | } | ||
1441 | ndev->last_rx = jiffies; | ||
1442 | } | ||
1443 | |||
1444 | /* Process an outbound completion from an rx ring. */ | ||
1445 | static void ql_process_mac_tx_intr(struct ql_adapter *qdev, | ||
1446 | struct ob_mac_iocb_rsp *mac_rsp) | ||
1447 | { | ||
1448 | struct tx_ring *tx_ring; | ||
1449 | struct tx_ring_desc *tx_ring_desc; | ||
1450 | |||
1451 | QL_DUMP_OB_MAC_RSP(mac_rsp); | ||
1452 | tx_ring = &qdev->tx_ring[mac_rsp->txq_idx]; | ||
1453 | tx_ring_desc = &tx_ring->q[mac_rsp->tid]; | ||
1454 | ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt); | ||
1455 | qdev->stats.tx_bytes += tx_ring_desc->map_cnt; | ||
1456 | qdev->stats.tx_packets++; | ||
1457 | dev_kfree_skb(tx_ring_desc->skb); | ||
1458 | tx_ring_desc->skb = NULL; | ||
1459 | |||
1460 | if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E | | ||
1461 | OB_MAC_IOCB_RSP_S | | ||
1462 | OB_MAC_IOCB_RSP_L | | ||
1463 | OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) { | ||
1464 | if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) { | ||
1465 | QPRINTK(qdev, TX_DONE, WARNING, | ||
1466 | "Total descriptor length did not match transfer length.\n"); | ||
1467 | } | ||
1468 | if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) { | ||
1469 | QPRINTK(qdev, TX_DONE, WARNING, | ||
1470 | "Frame too short to be legal, not sent.\n"); | ||
1471 | } | ||
1472 | if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) { | ||
1473 | QPRINTK(qdev, TX_DONE, WARNING, | ||
1474 | "Frame too long, but sent anyway.\n"); | ||
1475 | } | ||
1476 | if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) { | ||
1477 | QPRINTK(qdev, TX_DONE, WARNING, | ||
1478 | "PCI backplane error. Frame not sent.\n"); | ||
1479 | } | ||
1480 | } | ||
1481 | atomic_inc(&tx_ring->tx_count); | ||
1482 | } | ||
1483 | |||
1484 | /* Fire up a handler to reset the MPI processor. */ | ||
1485 | void ql_queue_fw_error(struct ql_adapter *qdev) | ||
1486 | { | ||
1487 | netif_stop_queue(qdev->ndev); | ||
1488 | netif_carrier_off(qdev->ndev); | ||
1489 | queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0); | ||
1490 | } | ||
1491 | |||
1492 | void ql_queue_asic_error(struct ql_adapter *qdev) | ||
1493 | { | ||
1494 | netif_stop_queue(qdev->ndev); | ||
1495 | netif_carrier_off(qdev->ndev); | ||
1496 | ql_disable_interrupts(qdev); | ||
1497 | queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0); | ||
1498 | } | ||
1499 | |||
1500 | static void ql_process_chip_ae_intr(struct ql_adapter *qdev, | ||
1501 | struct ib_ae_iocb_rsp *ib_ae_rsp) | ||
1502 | { | ||
1503 | switch (ib_ae_rsp->event) { | ||
1504 | case MGMT_ERR_EVENT: | ||
1505 | QPRINTK(qdev, RX_ERR, ERR, | ||
1506 | "Management Processor Fatal Error.\n"); | ||
1507 | ql_queue_fw_error(qdev); | ||
1508 | return; | ||
1509 | |||
1510 | case CAM_LOOKUP_ERR_EVENT: | ||
1511 | QPRINTK(qdev, LINK, ERR, | ||
1512 | "Multiple CAM hits lookup occurred.\n"); | ||
1513 | QPRINTK(qdev, DRV, ERR, "This event shouldn't occur.\n"); | ||
1514 | ql_queue_asic_error(qdev); | ||
1515 | return; | ||
1516 | |||
1517 | case SOFT_ECC_ERROR_EVENT: | ||
1518 | QPRINTK(qdev, RX_ERR, ERR, "Soft ECC error detected.\n"); | ||
1519 | ql_queue_asic_error(qdev); | ||
1520 | break; | ||
1521 | |||
1522 | case PCI_ERR_ANON_BUF_RD: | ||
1523 | QPRINTK(qdev, RX_ERR, ERR, | ||
1524 | "PCI error occurred when reading anonymous buffers from rx_ring %d.\n", | ||
1525 | ib_ae_rsp->q_id); | ||
1526 | ql_queue_asic_error(qdev); | ||
1527 | break; | ||
1528 | |||
1529 | default: | ||
1530 | QPRINTK(qdev, DRV, ERR, "Unexpected event %d.\n", | ||
1531 | ib_ae_rsp->event); | ||
1532 | ql_queue_asic_error(qdev); | ||
1533 | break; | ||
1534 | } | ||
1535 | } | ||
1536 | |||
1537 | static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring) | ||
1538 | { | ||
1539 | struct ql_adapter *qdev = rx_ring->qdev; | ||
1540 | u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); | ||
1541 | struct ob_mac_iocb_rsp *net_rsp = NULL; | ||
1542 | int count = 0; | ||
1543 | |||
1544 | /* While there are entries in the completion queue. */ | ||
1545 | while (prod != rx_ring->cnsmr_idx) { | ||
1546 | |||
1547 | QPRINTK(qdev, RX_STATUS, DEBUG, | ||
1548 | "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id, | ||
1549 | prod, rx_ring->cnsmr_idx); | ||
1550 | |||
1551 | net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry; | ||
1552 | rmb(); | ||
1553 | switch (net_rsp->opcode) { | ||
1554 | |||
1555 | case OPCODE_OB_MAC_TSO_IOCB: | ||
1556 | case OPCODE_OB_MAC_IOCB: | ||
1557 | ql_process_mac_tx_intr(qdev, net_rsp); | ||
1558 | break; | ||
1559 | default: | ||
1560 | QPRINTK(qdev, RX_STATUS, DEBUG, | ||
1561 | "Hit default case, not handled! dropping the packet, opcode = %x.\n", | ||
1562 | net_rsp->opcode); | ||
1563 | } | ||
1564 | count++; | ||
1565 | ql_update_cq(rx_ring); | ||
1566 | prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); | ||
1567 | } | ||
1568 | ql_write_cq_idx(rx_ring); | ||
1569 | if (netif_queue_stopped(qdev->ndev) && net_rsp != NULL) { | ||
1570 | struct tx_ring *tx_ring = &qdev->tx_ring[net_rsp->txq_idx]; | ||
1571 | if (atomic_read(&tx_ring->queue_stopped) && | ||
1572 | (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4))) | ||
1573 | /* | ||
1574 | * The queue got stopped because the tx_ring was full. | ||
1575 | * Wake it up, because it's now at least 25% empty. | ||
1576 | */ | ||
1577 | netif_wake_queue(qdev->ndev); | ||
1578 | } | ||
1579 | |||
1580 | return count; | ||
1581 | } | ||
1582 | |||
1583 | static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget) | ||
1584 | { | ||
1585 | struct ql_adapter *qdev = rx_ring->qdev; | ||
1586 | u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); | ||
1587 | struct ql_net_rsp_iocb *net_rsp; | ||
1588 | int count = 0; | ||
1589 | |||
1590 | /* While there are entries in the completion queue. */ | ||
1591 | while (prod != rx_ring->cnsmr_idx) { | ||
1592 | |||
1593 | QPRINTK(qdev, RX_STATUS, DEBUG, | ||
1594 | "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id, | ||
1595 | prod, rx_ring->cnsmr_idx); | ||
1596 | |||
1597 | net_rsp = rx_ring->curr_entry; | ||
1598 | rmb(); | ||
1599 | switch (net_rsp->opcode) { | ||
1600 | case OPCODE_IB_MAC_IOCB: | ||
1601 | ql_process_mac_rx_intr(qdev, rx_ring, | ||
1602 | (struct ib_mac_iocb_rsp *) | ||
1603 | net_rsp); | ||
1604 | break; | ||
1605 | |||
1606 | case OPCODE_IB_AE_IOCB: | ||
1607 | ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *) | ||
1608 | net_rsp); | ||
1609 | break; | ||
1610 | default: | ||
1611 | { | ||
1612 | QPRINTK(qdev, RX_STATUS, DEBUG, | ||
1613 | "Hit default case, not handled! dropping the packet, opcode = %x.\n", | ||
1614 | net_rsp->opcode); | ||
1615 | } | ||
1616 | } | ||
1617 | count++; | ||
1618 | ql_update_cq(rx_ring); | ||
1619 | prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); | ||
1620 | if (count == budget) | ||
1621 | break; | ||
1622 | } | ||
1623 | ql_update_buffer_queues(qdev, rx_ring); | ||
1624 | ql_write_cq_idx(rx_ring); | ||
1625 | return count; | ||
1626 | } | ||
1627 | |||
1628 | static int ql_napi_poll_msix(struct napi_struct *napi, int budget) | ||
1629 | { | ||
1630 | struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi); | ||
1631 | struct ql_adapter *qdev = rx_ring->qdev; | ||
1632 | int work_done = ql_clean_inbound_rx_ring(rx_ring, budget); | ||
1633 | |||
1634 | QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n", | ||
1635 | rx_ring->cq_id); | ||
1636 | |||
1637 | if (work_done < budget) { | ||
1638 | __netif_rx_complete(qdev->ndev, napi); | ||
1639 | ql_enable_completion_interrupt(qdev, rx_ring->irq); | ||
1640 | } | ||
1641 | return work_done; | ||
1642 | } | ||
1643 | |||
1644 | static void ql_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp) | ||
1645 | { | ||
1646 | struct ql_adapter *qdev = netdev_priv(ndev); | ||
1647 | |||
1648 | qdev->vlgrp = grp; | ||
1649 | if (grp) { | ||
1650 | QPRINTK(qdev, IFUP, DEBUG, "Turning on VLAN in NIC_RCV_CFG.\n"); | ||
1651 | ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK | | ||
1652 | NIC_RCV_CFG_VLAN_MATCH_AND_NON); | ||
1653 | } else { | ||
1654 | QPRINTK(qdev, IFUP, DEBUG, | ||
1655 | "Turning off VLAN in NIC_RCV_CFG.\n"); | ||
1656 | ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK); | ||
1657 | } | ||
1658 | } | ||
1659 | |||
1660 | static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid) | ||
1661 | { | ||
1662 | struct ql_adapter *qdev = netdev_priv(ndev); | ||
1663 | u32 enable_bit = MAC_ADDR_E; | ||
1664 | |||
1665 | spin_lock(&qdev->hw_lock); | ||
1666 | if (ql_set_mac_addr_reg | ||
1667 | (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { | ||
1668 | QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n"); | ||
1669 | } | ||
1670 | spin_unlock(&qdev->hw_lock); | ||
1671 | } | ||
1672 | |||
1673 | static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid) | ||
1674 | { | ||
1675 | struct ql_adapter *qdev = netdev_priv(ndev); | ||
1676 | u32 enable_bit = 0; | ||
1677 | |||
1678 | spin_lock(&qdev->hw_lock); | ||
1679 | if (ql_set_mac_addr_reg | ||
1680 | (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { | ||
1681 | QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n"); | ||
1682 | } | ||
1683 | spin_unlock(&qdev->hw_lock); | ||
1684 | |||
1685 | } | ||
1686 | |||
1687 | /* Worker thread to process a given rx_ring that is dedicated | ||
1688 | * to outbound completions. | ||
1689 | */ | ||
1690 | static void ql_tx_clean(struct work_struct *work) | ||
1691 | { | ||
1692 | struct rx_ring *rx_ring = | ||
1693 | container_of(work, struct rx_ring, rx_work.work); | ||
1694 | ql_clean_outbound_rx_ring(rx_ring); | ||
1695 | ql_enable_completion_interrupt(rx_ring->qdev, rx_ring->irq); | ||
1696 | |||
1697 | } | ||
1698 | |||
1699 | /* Worker thread to process a given rx_ring that is dedicated | ||
1700 | * to inbound completions. | ||
1701 | */ | ||
1702 | static void ql_rx_clean(struct work_struct *work) | ||
1703 | { | ||
1704 | struct rx_ring *rx_ring = | ||
1705 | container_of(work, struct rx_ring, rx_work.work); | ||
1706 | ql_clean_inbound_rx_ring(rx_ring, 64); | ||
1707 | ql_enable_completion_interrupt(rx_ring->qdev, rx_ring->irq); | ||
1708 | } | ||
1709 | |||
1710 | /* MSI-X Multiple Vector Interrupt Handler for outbound completions. */ | ||
1711 | static irqreturn_t qlge_msix_tx_isr(int irq, void *dev_id) | ||
1712 | { | ||
1713 | struct rx_ring *rx_ring = dev_id; | ||
1714 | queue_delayed_work_on(rx_ring->cpu, rx_ring->qdev->q_workqueue, | ||
1715 | &rx_ring->rx_work, 0); | ||
1716 | return IRQ_HANDLED; | ||
1717 | } | ||
1718 | |||
1719 | /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */ | ||
1720 | static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id) | ||
1721 | { | ||
1722 | struct rx_ring *rx_ring = dev_id; | ||
1723 | struct ql_adapter *qdev = rx_ring->qdev; | ||
1724 | netif_rx_schedule(qdev->ndev, &rx_ring->napi); | ||
1725 | return IRQ_HANDLED; | ||
1726 | } | ||
1727 | |||
1728 | /* We check here to see if we're already handling a legacy | ||
1729 | * interrupt. If we are, then it must belong to another | ||
1730 | * chip with which we're sharing the interrupt line. | ||
1731 | */ | ||
1732 | int ql_legacy_check(struct ql_adapter *qdev) | ||
1733 | { | ||
1734 | int err; | ||
1735 | spin_lock(&qdev->legacy_lock); | ||
1736 | err = atomic_read(&qdev->intr_context[0].irq_cnt); | ||
1737 | spin_unlock(&qdev->legacy_lock); | ||
1738 | return err; | ||
1739 | } | ||
1740 | |||
1741 | /* This handles a fatal error, MPI activity, and the default | ||
1742 | * rx_ring in an MSI-X multiple vector environment. | ||
1743 | * In MSI/Legacy environment it also process the rest of | ||
1744 | * the rx_rings. | ||
1745 | */ | ||
1746 | static irqreturn_t qlge_isr(int irq, void *dev_id) | ||
1747 | { | ||
1748 | struct rx_ring *rx_ring = dev_id; | ||
1749 | struct ql_adapter *qdev = rx_ring->qdev; | ||
1750 | struct intr_context *intr_context = &qdev->intr_context[0]; | ||
1751 | u32 var; | ||
1752 | int i; | ||
1753 | int work_done = 0; | ||
1754 | |||
1755 | if (qdev->legacy_check && qdev->legacy_check(qdev)) { | ||
1756 | QPRINTK(qdev, INTR, INFO, "Already busy, not our interrupt.\n"); | ||
1757 | return IRQ_NONE; /* Not our interrupt */ | ||
1758 | } | ||
1759 | |||
1760 | var = ql_read32(qdev, STS); | ||
1761 | |||
1762 | /* | ||
1763 | * Check for fatal error. | ||
1764 | */ | ||
1765 | if (var & STS_FE) { | ||
1766 | ql_queue_asic_error(qdev); | ||
1767 | QPRINTK(qdev, INTR, ERR, "Got fatal error, STS = %x.\n", var); | ||
1768 | var = ql_read32(qdev, ERR_STS); | ||
1769 | QPRINTK(qdev, INTR, ERR, | ||
1770 | "Resetting chip. Error Status Register = 0x%x\n", var); | ||
1771 | return IRQ_HANDLED; | ||
1772 | } | ||
1773 | |||
1774 | /* | ||
1775 | * Check MPI processor activity. | ||
1776 | */ | ||
1777 | if (var & STS_PI) { | ||
1778 | /* | ||
1779 | * We've got an async event or mailbox completion. | ||
1780 | * Handle it and clear the source of the interrupt. | ||
1781 | */ | ||
1782 | QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n"); | ||
1783 | ql_disable_completion_interrupt(qdev, intr_context->intr); | ||
1784 | queue_delayed_work_on(smp_processor_id(), qdev->workqueue, | ||
1785 | &qdev->mpi_work, 0); | ||
1786 | work_done++; | ||
1787 | } | ||
1788 | |||
1789 | /* | ||
1790 | * Check the default queue and wake handler if active. | ||
1791 | */ | ||
1792 | rx_ring = &qdev->rx_ring[0]; | ||
1793 | if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != rx_ring->cnsmr_idx) { | ||
1794 | QPRINTK(qdev, INTR, INFO, "Waking handler for rx_ring[0].\n"); | ||
1795 | ql_disable_completion_interrupt(qdev, intr_context->intr); | ||
1796 | queue_delayed_work_on(smp_processor_id(), qdev->q_workqueue, | ||
1797 | &rx_ring->rx_work, 0); | ||
1798 | work_done++; | ||
1799 | } | ||
1800 | |||
1801 | if (!test_bit(QL_MSIX_ENABLED, &qdev->flags)) { | ||
1802 | /* | ||
1803 | * Start the DPC for each active queue. | ||
1804 | */ | ||
1805 | for (i = 1; i < qdev->rx_ring_count; i++) { | ||
1806 | rx_ring = &qdev->rx_ring[i]; | ||
1807 | if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != | ||
1808 | rx_ring->cnsmr_idx) { | ||
1809 | QPRINTK(qdev, INTR, INFO, | ||
1810 | "Waking handler for rx_ring[%d].\n", i); | ||
1811 | ql_disable_completion_interrupt(qdev, | ||
1812 | intr_context-> | ||
1813 | intr); | ||
1814 | if (i < qdev->rss_ring_first_cq_id) | ||
1815 | queue_delayed_work_on(rx_ring->cpu, | ||
1816 | qdev->q_workqueue, | ||
1817 | &rx_ring->rx_work, | ||
1818 | 0); | ||
1819 | else | ||
1820 | netif_rx_schedule(qdev->ndev, | ||
1821 | &rx_ring->napi); | ||
1822 | work_done++; | ||
1823 | } | ||
1824 | } | ||
1825 | } | ||
1826 | return work_done ? IRQ_HANDLED : IRQ_NONE; | ||
1827 | } | ||
1828 | |||
1829 | static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr) | ||
1830 | { | ||
1831 | |||
1832 | if (skb_is_gso(skb)) { | ||
1833 | int err; | ||
1834 | if (skb_header_cloned(skb)) { | ||
1835 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | ||
1836 | if (err) | ||
1837 | return err; | ||
1838 | } | ||
1839 | |||
1840 | mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB; | ||
1841 | mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC; | ||
1842 | mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len); | ||
1843 | mac_iocb_ptr->total_hdrs_len = | ||
1844 | cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb)); | ||
1845 | mac_iocb_ptr->net_trans_offset = | ||
1846 | cpu_to_le16(skb_network_offset(skb) | | ||
1847 | skb_transport_offset(skb) | ||
1848 | << OB_MAC_TRANSPORT_HDR_SHIFT); | ||
1849 | mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); | ||
1850 | mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO; | ||
1851 | if (likely(skb->protocol == htons(ETH_P_IP))) { | ||
1852 | struct iphdr *iph = ip_hdr(skb); | ||
1853 | iph->check = 0; | ||
1854 | mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4; | ||
1855 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, | ||
1856 | iph->daddr, 0, | ||
1857 | IPPROTO_TCP, | ||
1858 | 0); | ||
1859 | } else if (skb->protocol == htons(ETH_P_IPV6)) { | ||
1860 | mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6; | ||
1861 | tcp_hdr(skb)->check = | ||
1862 | ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | ||
1863 | &ipv6_hdr(skb)->daddr, | ||
1864 | 0, IPPROTO_TCP, 0); | ||
1865 | } | ||
1866 | return 1; | ||
1867 | } | ||
1868 | return 0; | ||
1869 | } | ||
1870 | |||
1871 | static void ql_hw_csum_setup(struct sk_buff *skb, | ||
1872 | struct ob_mac_tso_iocb_req *mac_iocb_ptr) | ||
1873 | { | ||
1874 | int len; | ||
1875 | struct iphdr *iph = ip_hdr(skb); | ||
1876 | u16 *check; | ||
1877 | mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB; | ||
1878 | mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len); | ||
1879 | mac_iocb_ptr->net_trans_offset = | ||
1880 | cpu_to_le16(skb_network_offset(skb) | | ||
1881 | skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT); | ||
1882 | |||
1883 | mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4; | ||
1884 | len = (ntohs(iph->tot_len) - (iph->ihl << 2)); | ||
1885 | if (likely(iph->protocol == IPPROTO_TCP)) { | ||
1886 | check = &(tcp_hdr(skb)->check); | ||
1887 | mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC; | ||
1888 | mac_iocb_ptr->total_hdrs_len = | ||
1889 | cpu_to_le16(skb_transport_offset(skb) + | ||
1890 | (tcp_hdr(skb)->doff << 2)); | ||
1891 | } else { | ||
1892 | check = &(udp_hdr(skb)->check); | ||
1893 | mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC; | ||
1894 | mac_iocb_ptr->total_hdrs_len = | ||
1895 | cpu_to_le16(skb_transport_offset(skb) + | ||
1896 | sizeof(struct udphdr)); | ||
1897 | } | ||
1898 | *check = ~csum_tcpudp_magic(iph->saddr, | ||
1899 | iph->daddr, len, iph->protocol, 0); | ||
1900 | } | ||
1901 | |||
1902 | static int qlge_send(struct sk_buff *skb, struct net_device *ndev) | ||
1903 | { | ||
1904 | struct tx_ring_desc *tx_ring_desc; | ||
1905 | struct ob_mac_iocb_req *mac_iocb_ptr; | ||
1906 | struct ql_adapter *qdev = netdev_priv(ndev); | ||
1907 | int tso; | ||
1908 | struct tx_ring *tx_ring; | ||
1909 | u32 tx_ring_idx = (u32) QL_TXQ_IDX(qdev, skb); | ||
1910 | |||
1911 | tx_ring = &qdev->tx_ring[tx_ring_idx]; | ||
1912 | |||
1913 | if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) { | ||
1914 | QPRINTK(qdev, TX_QUEUED, INFO, | ||
1915 | "%s: shutting down tx queue %d du to lack of resources.\n", | ||
1916 | __func__, tx_ring_idx); | ||
1917 | netif_stop_queue(ndev); | ||
1918 | atomic_inc(&tx_ring->queue_stopped); | ||
1919 | return NETDEV_TX_BUSY; | ||
1920 | } | ||
1921 | tx_ring_desc = &tx_ring->q[tx_ring->prod_idx]; | ||
1922 | mac_iocb_ptr = tx_ring_desc->queue_entry; | ||
1923 | memset((void *)mac_iocb_ptr, 0, sizeof(mac_iocb_ptr)); | ||
1924 | if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) != NETDEV_TX_OK) { | ||
1925 | QPRINTK(qdev, TX_QUEUED, ERR, "Could not map the segments.\n"); | ||
1926 | return NETDEV_TX_BUSY; | ||
1927 | } | ||
1928 | |||
1929 | mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB; | ||
1930 | mac_iocb_ptr->tid = tx_ring_desc->index; | ||
1931 | /* We use the upper 32-bits to store the tx queue for this IO. | ||
1932 | * When we get the completion we can use it to establish the context. | ||
1933 | */ | ||
1934 | mac_iocb_ptr->txq_idx = tx_ring_idx; | ||
1935 | tx_ring_desc->skb = skb; | ||
1936 | |||
1937 | mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len); | ||
1938 | |||
1939 | if (qdev->vlgrp && vlan_tx_tag_present(skb)) { | ||
1940 | QPRINTK(qdev, TX_QUEUED, DEBUG, "Adding a vlan tag %d.\n", | ||
1941 | vlan_tx_tag_get(skb)); | ||
1942 | mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V; | ||
1943 | mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb)); | ||
1944 | } | ||
1945 | tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr); | ||
1946 | if (tso < 0) { | ||
1947 | dev_kfree_skb_any(skb); | ||
1948 | return NETDEV_TX_OK; | ||
1949 | } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) { | ||
1950 | ql_hw_csum_setup(skb, | ||
1951 | (struct ob_mac_tso_iocb_req *)mac_iocb_ptr); | ||
1952 | } | ||
1953 | QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr); | ||
1954 | tx_ring->prod_idx++; | ||
1955 | if (tx_ring->prod_idx == tx_ring->wq_len) | ||
1956 | tx_ring->prod_idx = 0; | ||
1957 | wmb(); | ||
1958 | |||
1959 | ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg); | ||
1960 | ndev->trans_start = jiffies; | ||
1961 | QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n", | ||
1962 | tx_ring->prod_idx, skb->len); | ||
1963 | |||
1964 | atomic_dec(&tx_ring->tx_count); | ||
1965 | return NETDEV_TX_OK; | ||
1966 | } | ||
1967 | |||
1968 | static void ql_free_shadow_space(struct ql_adapter *qdev) | ||
1969 | { | ||
1970 | if (qdev->rx_ring_shadow_reg_area) { | ||
1971 | pci_free_consistent(qdev->pdev, | ||
1972 | PAGE_SIZE, | ||
1973 | qdev->rx_ring_shadow_reg_area, | ||
1974 | qdev->rx_ring_shadow_reg_dma); | ||
1975 | qdev->rx_ring_shadow_reg_area = NULL; | ||
1976 | } | ||
1977 | if (qdev->tx_ring_shadow_reg_area) { | ||
1978 | pci_free_consistent(qdev->pdev, | ||
1979 | PAGE_SIZE, | ||
1980 | qdev->tx_ring_shadow_reg_area, | ||
1981 | qdev->tx_ring_shadow_reg_dma); | ||
1982 | qdev->tx_ring_shadow_reg_area = NULL; | ||
1983 | } | ||
1984 | } | ||
1985 | |||
1986 | static int ql_alloc_shadow_space(struct ql_adapter *qdev) | ||
1987 | { | ||
1988 | qdev->rx_ring_shadow_reg_area = | ||
1989 | pci_alloc_consistent(qdev->pdev, | ||
1990 | PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma); | ||
1991 | if (qdev->rx_ring_shadow_reg_area == NULL) { | ||
1992 | QPRINTK(qdev, IFUP, ERR, | ||
1993 | "Allocation of RX shadow space failed.\n"); | ||
1994 | return -ENOMEM; | ||
1995 | } | ||
1996 | qdev->tx_ring_shadow_reg_area = | ||
1997 | pci_alloc_consistent(qdev->pdev, PAGE_SIZE, | ||
1998 | &qdev->tx_ring_shadow_reg_dma); | ||
1999 | if (qdev->tx_ring_shadow_reg_area == NULL) { | ||
2000 | QPRINTK(qdev, IFUP, ERR, | ||
2001 | "Allocation of TX shadow space failed.\n"); | ||
2002 | goto err_wqp_sh_area; | ||
2003 | } | ||
2004 | return 0; | ||
2005 | |||
2006 | err_wqp_sh_area: | ||
2007 | pci_free_consistent(qdev->pdev, | ||
2008 | PAGE_SIZE, | ||
2009 | qdev->rx_ring_shadow_reg_area, | ||
2010 | qdev->rx_ring_shadow_reg_dma); | ||
2011 | return -ENOMEM; | ||
2012 | } | ||
2013 | |||
2014 | static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) | ||
2015 | { | ||
2016 | struct tx_ring_desc *tx_ring_desc; | ||
2017 | int i; | ||
2018 | struct ob_mac_iocb_req *mac_iocb_ptr; | ||
2019 | |||
2020 | mac_iocb_ptr = tx_ring->wq_base; | ||
2021 | tx_ring_desc = tx_ring->q; | ||
2022 | for (i = 0; i < tx_ring->wq_len; i++) { | ||
2023 | tx_ring_desc->index = i; | ||
2024 | tx_ring_desc->skb = NULL; | ||
2025 | tx_ring_desc->queue_entry = mac_iocb_ptr; | ||
2026 | mac_iocb_ptr++; | ||
2027 | tx_ring_desc++; | ||
2028 | } | ||
2029 | atomic_set(&tx_ring->tx_count, tx_ring->wq_len); | ||
2030 | atomic_set(&tx_ring->queue_stopped, 0); | ||
2031 | } | ||
2032 | |||
2033 | static void ql_free_tx_resources(struct ql_adapter *qdev, | ||
2034 | struct tx_ring *tx_ring) | ||
2035 | { | ||
2036 | if (tx_ring->wq_base) { | ||
2037 | pci_free_consistent(qdev->pdev, tx_ring->wq_size, | ||
2038 | tx_ring->wq_base, tx_ring->wq_base_dma); | ||
2039 | tx_ring->wq_base = NULL; | ||
2040 | } | ||
2041 | kfree(tx_ring->q); | ||
2042 | tx_ring->q = NULL; | ||
2043 | } | ||
2044 | |||
2045 | static int ql_alloc_tx_resources(struct ql_adapter *qdev, | ||
2046 | struct tx_ring *tx_ring) | ||
2047 | { | ||
2048 | tx_ring->wq_base = | ||
2049 | pci_alloc_consistent(qdev->pdev, tx_ring->wq_size, | ||
2050 | &tx_ring->wq_base_dma); | ||
2051 | |||
2052 | if ((tx_ring->wq_base == NULL) | ||
2053 | || tx_ring->wq_base_dma & (tx_ring->wq_size - 1)) { | ||
2054 | QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n"); | ||
2055 | return -ENOMEM; | ||
2056 | } | ||
2057 | tx_ring->q = | ||
2058 | kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL); | ||
2059 | if (tx_ring->q == NULL) | ||
2060 | goto err; | ||
2061 | |||
2062 | return 0; | ||
2063 | err: | ||
2064 | pci_free_consistent(qdev->pdev, tx_ring->wq_size, | ||
2065 | tx_ring->wq_base, tx_ring->wq_base_dma); | ||
2066 | return -ENOMEM; | ||
2067 | } | ||
2068 | |||
2069 | void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) | ||
2070 | { | ||
2071 | int i; | ||
2072 | struct bq_desc *lbq_desc; | ||
2073 | |||
2074 | for (i = 0; i < rx_ring->lbq_len; i++) { | ||
2075 | lbq_desc = &rx_ring->lbq[i]; | ||
2076 | if (lbq_desc->p.lbq_page) { | ||
2077 | pci_unmap_page(qdev->pdev, | ||
2078 | pci_unmap_addr(lbq_desc, mapaddr), | ||
2079 | pci_unmap_len(lbq_desc, maplen), | ||
2080 | PCI_DMA_FROMDEVICE); | ||
2081 | |||
2082 | put_page(lbq_desc->p.lbq_page); | ||
2083 | lbq_desc->p.lbq_page = NULL; | ||
2084 | } | ||
2085 | lbq_desc->bq->addr_lo = 0; | ||
2086 | lbq_desc->bq->addr_hi = 0; | ||
2087 | } | ||
2088 | } | ||
2089 | |||
2090 | /* | ||
2091 | * Allocate and map a page for each element of the lbq. | ||
2092 | */ | ||
2093 | static int ql_alloc_lbq_buffers(struct ql_adapter *qdev, | ||
2094 | struct rx_ring *rx_ring) | ||
2095 | { | ||
2096 | int i; | ||
2097 | struct bq_desc *lbq_desc; | ||
2098 | u64 map; | ||
2099 | struct bq_element *bq = rx_ring->lbq_base; | ||
2100 | |||
2101 | for (i = 0; i < rx_ring->lbq_len; i++) { | ||
2102 | lbq_desc = &rx_ring->lbq[i]; | ||
2103 | memset(lbq_desc, 0, sizeof(lbq_desc)); | ||
2104 | lbq_desc->bq = bq; | ||
2105 | lbq_desc->index = i; | ||
2106 | lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC); | ||
2107 | if (unlikely(!lbq_desc->p.lbq_page)) { | ||
2108 | QPRINTK(qdev, IFUP, ERR, "failed alloc_page().\n"); | ||
2109 | goto mem_error; | ||
2110 | } else { | ||
2111 | map = pci_map_page(qdev->pdev, | ||
2112 | lbq_desc->p.lbq_page, | ||
2113 | 0, PAGE_SIZE, PCI_DMA_FROMDEVICE); | ||
2114 | if (pci_dma_mapping_error(qdev->pdev, map)) { | ||
2115 | QPRINTK(qdev, IFUP, ERR, | ||
2116 | "PCI mapping failed.\n"); | ||
2117 | goto mem_error; | ||
2118 | } | ||
2119 | pci_unmap_addr_set(lbq_desc, mapaddr, map); | ||
2120 | pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE); | ||
2121 | bq->addr_lo = cpu_to_le32(map); | ||
2122 | bq->addr_hi = cpu_to_le32(map >> 32); | ||
2123 | } | ||
2124 | bq++; | ||
2125 | } | ||
2126 | return 0; | ||
2127 | mem_error: | ||
2128 | ql_free_lbq_buffers(qdev, rx_ring); | ||
2129 | return -ENOMEM; | ||
2130 | } | ||
2131 | |||
2132 | void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) | ||
2133 | { | ||
2134 | int i; | ||
2135 | struct bq_desc *sbq_desc; | ||
2136 | |||
2137 | for (i = 0; i < rx_ring->sbq_len; i++) { | ||
2138 | sbq_desc = &rx_ring->sbq[i]; | ||
2139 | if (sbq_desc == NULL) { | ||
2140 | QPRINTK(qdev, IFUP, ERR, "sbq_desc %d is NULL.\n", i); | ||
2141 | return; | ||
2142 | } | ||
2143 | if (sbq_desc->p.skb) { | ||
2144 | pci_unmap_single(qdev->pdev, | ||
2145 | pci_unmap_addr(sbq_desc, mapaddr), | ||
2146 | pci_unmap_len(sbq_desc, maplen), | ||
2147 | PCI_DMA_FROMDEVICE); | ||
2148 | dev_kfree_skb(sbq_desc->p.skb); | ||
2149 | sbq_desc->p.skb = NULL; | ||
2150 | } | ||
2151 | if (sbq_desc->bq == NULL) { | ||
2152 | QPRINTK(qdev, IFUP, ERR, "sbq_desc->bq %d is NULL.\n", | ||
2153 | i); | ||
2154 | return; | ||
2155 | } | ||
2156 | sbq_desc->bq->addr_lo = 0; | ||
2157 | sbq_desc->bq->addr_hi = 0; | ||
2158 | } | ||
2159 | } | ||
2160 | |||
2161 | /* Allocate and map an skb for each element of the sbq. */ | ||
2162 | static int ql_alloc_sbq_buffers(struct ql_adapter *qdev, | ||
2163 | struct rx_ring *rx_ring) | ||
2164 | { | ||
2165 | int i; | ||
2166 | struct bq_desc *sbq_desc; | ||
2167 | struct sk_buff *skb; | ||
2168 | u64 map; | ||
2169 | struct bq_element *bq = rx_ring->sbq_base; | ||
2170 | |||
2171 | for (i = 0; i < rx_ring->sbq_len; i++) { | ||
2172 | sbq_desc = &rx_ring->sbq[i]; | ||
2173 | memset(sbq_desc, 0, sizeof(sbq_desc)); | ||
2174 | sbq_desc->index = i; | ||
2175 | sbq_desc->bq = bq; | ||
2176 | skb = netdev_alloc_skb(qdev->ndev, rx_ring->sbq_buf_size); | ||
2177 | if (unlikely(!skb)) { | ||
2178 | /* Better luck next round */ | ||
2179 | QPRINTK(qdev, IFUP, ERR, | ||
2180 | "small buff alloc failed for %d bytes at index %d.\n", | ||
2181 | rx_ring->sbq_buf_size, i); | ||
2182 | goto mem_err; | ||
2183 | } | ||
2184 | skb_reserve(skb, QLGE_SB_PAD); | ||
2185 | sbq_desc->p.skb = skb; | ||
2186 | /* | ||
2187 | * Map only half the buffer. Because the | ||
2188 | * other half may get some data copied to it | ||
2189 | * when the completion arrives. | ||
2190 | */ | ||
2191 | map = pci_map_single(qdev->pdev, | ||
2192 | skb->data, | ||
2193 | rx_ring->sbq_buf_size / 2, | ||
2194 | PCI_DMA_FROMDEVICE); | ||
2195 | if (pci_dma_mapping_error(qdev->pdev, map)) { | ||
2196 | QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n"); | ||
2197 | goto mem_err; | ||
2198 | } | ||
2199 | pci_unmap_addr_set(sbq_desc, mapaddr, map); | ||
2200 | pci_unmap_len_set(sbq_desc, maplen, rx_ring->sbq_buf_size / 2); | ||
2201 | bq->addr_lo = /*sbq_desc->addr_lo = */ | ||
2202 | cpu_to_le32(map); | ||
2203 | bq->addr_hi = /*sbq_desc->addr_hi = */ | ||
2204 | cpu_to_le32(map >> 32); | ||
2205 | bq++; | ||
2206 | } | ||
2207 | return 0; | ||
2208 | mem_err: | ||
2209 | ql_free_sbq_buffers(qdev, rx_ring); | ||
2210 | return -ENOMEM; | ||
2211 | } | ||
2212 | |||
2213 | static void ql_free_rx_resources(struct ql_adapter *qdev, | ||
2214 | struct rx_ring *rx_ring) | ||
2215 | { | ||
2216 | if (rx_ring->sbq_len) | ||
2217 | ql_free_sbq_buffers(qdev, rx_ring); | ||
2218 | if (rx_ring->lbq_len) | ||
2219 | ql_free_lbq_buffers(qdev, rx_ring); | ||
2220 | |||
2221 | /* Free the small buffer queue. */ | ||
2222 | if (rx_ring->sbq_base) { | ||
2223 | pci_free_consistent(qdev->pdev, | ||
2224 | rx_ring->sbq_size, | ||
2225 | rx_ring->sbq_base, rx_ring->sbq_base_dma); | ||
2226 | rx_ring->sbq_base = NULL; | ||
2227 | } | ||
2228 | |||
2229 | /* Free the small buffer queue control blocks. */ | ||
2230 | kfree(rx_ring->sbq); | ||
2231 | rx_ring->sbq = NULL; | ||
2232 | |||
2233 | /* Free the large buffer queue. */ | ||
2234 | if (rx_ring->lbq_base) { | ||
2235 | pci_free_consistent(qdev->pdev, | ||
2236 | rx_ring->lbq_size, | ||
2237 | rx_ring->lbq_base, rx_ring->lbq_base_dma); | ||
2238 | rx_ring->lbq_base = NULL; | ||
2239 | } | ||
2240 | |||
2241 | /* Free the large buffer queue control blocks. */ | ||
2242 | kfree(rx_ring->lbq); | ||
2243 | rx_ring->lbq = NULL; | ||
2244 | |||
2245 | /* Free the rx queue. */ | ||
2246 | if (rx_ring->cq_base) { | ||
2247 | pci_free_consistent(qdev->pdev, | ||
2248 | rx_ring->cq_size, | ||
2249 | rx_ring->cq_base, rx_ring->cq_base_dma); | ||
2250 | rx_ring->cq_base = NULL; | ||
2251 | } | ||
2252 | } | ||
2253 | |||
2254 | /* Allocate queues and buffers for this completions queue based | ||
2255 | * on the values in the parameter structure. */ | ||
2256 | static int ql_alloc_rx_resources(struct ql_adapter *qdev, | ||
2257 | struct rx_ring *rx_ring) | ||
2258 | { | ||
2259 | |||
2260 | /* | ||
2261 | * Allocate the completion queue for this rx_ring. | ||
2262 | */ | ||
2263 | rx_ring->cq_base = | ||
2264 | pci_alloc_consistent(qdev->pdev, rx_ring->cq_size, | ||
2265 | &rx_ring->cq_base_dma); | ||
2266 | |||
2267 | if (rx_ring->cq_base == NULL) { | ||
2268 | QPRINTK(qdev, IFUP, ERR, "rx_ring alloc failed.\n"); | ||
2269 | return -ENOMEM; | ||
2270 | } | ||
2271 | |||
2272 | if (rx_ring->sbq_len) { | ||
2273 | /* | ||
2274 | * Allocate small buffer queue. | ||
2275 | */ | ||
2276 | rx_ring->sbq_base = | ||
2277 | pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size, | ||
2278 | &rx_ring->sbq_base_dma); | ||
2279 | |||
2280 | if (rx_ring->sbq_base == NULL) { | ||
2281 | QPRINTK(qdev, IFUP, ERR, | ||
2282 | "Small buffer queue allocation failed.\n"); | ||
2283 | goto err_mem; | ||
2284 | } | ||
2285 | |||
2286 | /* | ||
2287 | * Allocate small buffer queue control blocks. | ||
2288 | */ | ||
2289 | rx_ring->sbq = | ||
2290 | kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc), | ||
2291 | GFP_KERNEL); | ||
2292 | if (rx_ring->sbq == NULL) { | ||
2293 | QPRINTK(qdev, IFUP, ERR, | ||
2294 | "Small buffer queue control block allocation failed.\n"); | ||
2295 | goto err_mem; | ||
2296 | } | ||
2297 | |||
2298 | if (ql_alloc_sbq_buffers(qdev, rx_ring)) { | ||
2299 | QPRINTK(qdev, IFUP, ERR, | ||
2300 | "Small buffer allocation failed.\n"); | ||
2301 | goto err_mem; | ||
2302 | } | ||
2303 | } | ||
2304 | |||
2305 | if (rx_ring->lbq_len) { | ||
2306 | /* | ||
2307 | * Allocate large buffer queue. | ||
2308 | */ | ||
2309 | rx_ring->lbq_base = | ||
2310 | pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size, | ||
2311 | &rx_ring->lbq_base_dma); | ||
2312 | |||
2313 | if (rx_ring->lbq_base == NULL) { | ||
2314 | QPRINTK(qdev, IFUP, ERR, | ||
2315 | "Large buffer queue allocation failed.\n"); | ||
2316 | goto err_mem; | ||
2317 | } | ||
2318 | /* | ||
2319 | * Allocate large buffer queue control blocks. | ||
2320 | */ | ||
2321 | rx_ring->lbq = | ||
2322 | kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc), | ||
2323 | GFP_KERNEL); | ||
2324 | if (rx_ring->lbq == NULL) { | ||
2325 | QPRINTK(qdev, IFUP, ERR, | ||
2326 | "Large buffer queue control block allocation failed.\n"); | ||
2327 | goto err_mem; | ||
2328 | } | ||
2329 | |||
2330 | /* | ||
2331 | * Allocate the buffers. | ||
2332 | */ | ||
2333 | if (ql_alloc_lbq_buffers(qdev, rx_ring)) { | ||
2334 | QPRINTK(qdev, IFUP, ERR, | ||
2335 | "Large buffer allocation failed.\n"); | ||
2336 | goto err_mem; | ||
2337 | } | ||
2338 | } | ||
2339 | |||
2340 | return 0; | ||
2341 | |||
2342 | err_mem: | ||
2343 | ql_free_rx_resources(qdev, rx_ring); | ||
2344 | return -ENOMEM; | ||
2345 | } | ||
2346 | |||
2347 | static void ql_tx_ring_clean(struct ql_adapter *qdev) | ||
2348 | { | ||
2349 | struct tx_ring *tx_ring; | ||
2350 | struct tx_ring_desc *tx_ring_desc; | ||
2351 | int i, j; | ||
2352 | |||
2353 | /* | ||
2354 | * Loop through all queues and free | ||
2355 | * any resources. | ||
2356 | */ | ||
2357 | for (j = 0; j < qdev->tx_ring_count; j++) { | ||
2358 | tx_ring = &qdev->tx_ring[j]; | ||
2359 | for (i = 0; i < tx_ring->wq_len; i++) { | ||
2360 | tx_ring_desc = &tx_ring->q[i]; | ||
2361 | if (tx_ring_desc && tx_ring_desc->skb) { | ||
2362 | QPRINTK(qdev, IFDOWN, ERR, | ||
2363 | "Freeing lost SKB %p, from queue %d, index %d.\n", | ||
2364 | tx_ring_desc->skb, j, | ||
2365 | tx_ring_desc->index); | ||
2366 | ql_unmap_send(qdev, tx_ring_desc, | ||
2367 | tx_ring_desc->map_cnt); | ||
2368 | dev_kfree_skb(tx_ring_desc->skb); | ||
2369 | tx_ring_desc->skb = NULL; | ||
2370 | } | ||
2371 | } | ||
2372 | } | ||
2373 | } | ||
2374 | |||
2375 | static void ql_free_ring_cb(struct ql_adapter *qdev) | ||
2376 | { | ||
2377 | kfree(qdev->ring_mem); | ||
2378 | } | ||
2379 | |||
2380 | static int ql_alloc_ring_cb(struct ql_adapter *qdev) | ||
2381 | { | ||
2382 | /* Allocate space for tx/rx ring control blocks. */ | ||
2383 | qdev->ring_mem_size = | ||
2384 | (qdev->tx_ring_count * sizeof(struct tx_ring)) + | ||
2385 | (qdev->rx_ring_count * sizeof(struct rx_ring)); | ||
2386 | qdev->ring_mem = kmalloc(qdev->ring_mem_size, GFP_KERNEL); | ||
2387 | if (qdev->ring_mem == NULL) { | ||
2388 | return -ENOMEM; | ||
2389 | } else { | ||
2390 | qdev->rx_ring = qdev->ring_mem; | ||
2391 | qdev->tx_ring = qdev->ring_mem + | ||
2392 | (qdev->rx_ring_count * sizeof(struct rx_ring)); | ||
2393 | } | ||
2394 | return 0; | ||
2395 | } | ||
2396 | |||
2397 | static void ql_free_mem_resources(struct ql_adapter *qdev) | ||
2398 | { | ||
2399 | int i; | ||
2400 | |||
2401 | for (i = 0; i < qdev->tx_ring_count; i++) | ||
2402 | ql_free_tx_resources(qdev, &qdev->tx_ring[i]); | ||
2403 | for (i = 0; i < qdev->rx_ring_count; i++) | ||
2404 | ql_free_rx_resources(qdev, &qdev->rx_ring[i]); | ||
2405 | ql_free_shadow_space(qdev); | ||
2406 | } | ||
2407 | |||
2408 | static int ql_alloc_mem_resources(struct ql_adapter *qdev) | ||
2409 | { | ||
2410 | int i; | ||
2411 | |||
2412 | /* Allocate space for our shadow registers and such. */ | ||
2413 | if (ql_alloc_shadow_space(qdev)) | ||
2414 | return -ENOMEM; | ||
2415 | |||
2416 | for (i = 0; i < qdev->rx_ring_count; i++) { | ||
2417 | if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) { | ||
2418 | QPRINTK(qdev, IFUP, ERR, | ||
2419 | "RX resource allocation failed.\n"); | ||
2420 | goto err_mem; | ||
2421 | } | ||
2422 | } | ||
2423 | /* Allocate tx queue resources */ | ||
2424 | for (i = 0; i < qdev->tx_ring_count; i++) { | ||
2425 | if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) { | ||
2426 | QPRINTK(qdev, IFUP, ERR, | ||
2427 | "TX resource allocation failed.\n"); | ||
2428 | goto err_mem; | ||
2429 | } | ||
2430 | } | ||
2431 | return 0; | ||
2432 | |||
2433 | err_mem: | ||
2434 | ql_free_mem_resources(qdev); | ||
2435 | return -ENOMEM; | ||
2436 | } | ||
2437 | |||
2438 | /* Set up the rx ring control block and pass it to the chip. | ||
2439 | * The control block is defined as | ||
2440 | * "Completion Queue Initialization Control Block", or cqicb. | ||
2441 | */ | ||
2442 | static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) | ||
2443 | { | ||
2444 | struct cqicb *cqicb = &rx_ring->cqicb; | ||
2445 | void *shadow_reg = qdev->rx_ring_shadow_reg_area + | ||
2446 | (rx_ring->cq_id * sizeof(u64) * 4); | ||
2447 | u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma + | ||
2448 | (rx_ring->cq_id * sizeof(u64) * 4); | ||
2449 | void __iomem *doorbell_area = | ||
2450 | qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id)); | ||
2451 | int err = 0; | ||
2452 | u16 bq_len; | ||
2453 | |||
2454 | /* Set up the shadow registers for this ring. */ | ||
2455 | rx_ring->prod_idx_sh_reg = shadow_reg; | ||
2456 | rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma; | ||
2457 | shadow_reg += sizeof(u64); | ||
2458 | shadow_reg_dma += sizeof(u64); | ||
2459 | rx_ring->lbq_base_indirect = shadow_reg; | ||
2460 | rx_ring->lbq_base_indirect_dma = shadow_reg_dma; | ||
2461 | shadow_reg += sizeof(u64); | ||
2462 | shadow_reg_dma += sizeof(u64); | ||
2463 | rx_ring->sbq_base_indirect = shadow_reg; | ||
2464 | rx_ring->sbq_base_indirect_dma = shadow_reg_dma; | ||
2465 | |||
2466 | /* PCI doorbell mem area + 0x00 for consumer index register */ | ||
2467 | rx_ring->cnsmr_idx_db_reg = (u32 *) doorbell_area; | ||
2468 | rx_ring->cnsmr_idx = 0; | ||
2469 | rx_ring->curr_entry = rx_ring->cq_base; | ||
2470 | |||
2471 | /* PCI doorbell mem area + 0x04 for valid register */ | ||
2472 | rx_ring->valid_db_reg = doorbell_area + 0x04; | ||
2473 | |||
2474 | /* PCI doorbell mem area + 0x18 for large buffer consumer */ | ||
2475 | rx_ring->lbq_prod_idx_db_reg = (u32 *) (doorbell_area + 0x18); | ||
2476 | |||
2477 | /* PCI doorbell mem area + 0x1c */ | ||
2478 | rx_ring->sbq_prod_idx_db_reg = (u32 *) (doorbell_area + 0x1c); | ||
2479 | |||
2480 | memset((void *)cqicb, 0, sizeof(struct cqicb)); | ||
2481 | cqicb->msix_vect = rx_ring->irq; | ||
2482 | |||
2483 | cqicb->len = cpu_to_le16(rx_ring->cq_len | LEN_V | LEN_CPP_CONT); | ||
2484 | |||
2485 | cqicb->addr_lo = cpu_to_le32(rx_ring->cq_base_dma); | ||
2486 | cqicb->addr_hi = cpu_to_le32((u64) rx_ring->cq_base_dma >> 32); | ||
2487 | |||
2488 | cqicb->prod_idx_addr_lo = cpu_to_le32(rx_ring->prod_idx_sh_reg_dma); | ||
2489 | cqicb->prod_idx_addr_hi = | ||
2490 | cpu_to_le32((u64) rx_ring->prod_idx_sh_reg_dma >> 32); | ||
2491 | |||
2492 | /* | ||
2493 | * Set up the control block load flags. | ||
2494 | */ | ||
2495 | cqicb->flags = FLAGS_LC | /* Load queue base address */ | ||
2496 | FLAGS_LV | /* Load MSI-X vector */ | ||
2497 | FLAGS_LI; /* Load irq delay values */ | ||
2498 | if (rx_ring->lbq_len) { | ||
2499 | cqicb->flags |= FLAGS_LL; /* Load lbq values */ | ||
2500 | *((u64 *) rx_ring->lbq_base_indirect) = rx_ring->lbq_base_dma; | ||
2501 | cqicb->lbq_addr_lo = | ||
2502 | cpu_to_le32(rx_ring->lbq_base_indirect_dma); | ||
2503 | cqicb->lbq_addr_hi = | ||
2504 | cpu_to_le32((u64) rx_ring->lbq_base_indirect_dma >> 32); | ||
2505 | cqicb->lbq_buf_size = cpu_to_le32(rx_ring->lbq_buf_size); | ||
2506 | bq_len = (u16) rx_ring->lbq_len; | ||
2507 | cqicb->lbq_len = cpu_to_le16(bq_len); | ||
2508 | rx_ring->lbq_prod_idx = rx_ring->lbq_len - 16; | ||
2509 | rx_ring->lbq_curr_idx = 0; | ||
2510 | rx_ring->lbq_clean_idx = rx_ring->lbq_prod_idx; | ||
2511 | rx_ring->lbq_free_cnt = 16; | ||
2512 | } | ||
2513 | if (rx_ring->sbq_len) { | ||
2514 | cqicb->flags |= FLAGS_LS; /* Load sbq values */ | ||
2515 | *((u64 *) rx_ring->sbq_base_indirect) = rx_ring->sbq_base_dma; | ||
2516 | cqicb->sbq_addr_lo = | ||
2517 | cpu_to_le32(rx_ring->sbq_base_indirect_dma); | ||
2518 | cqicb->sbq_addr_hi = | ||
2519 | cpu_to_le32((u64) rx_ring->sbq_base_indirect_dma >> 32); | ||
2520 | cqicb->sbq_buf_size = | ||
2521 | cpu_to_le16(((rx_ring->sbq_buf_size / 2) + 8) & 0xfffffff8); | ||
2522 | bq_len = (u16) rx_ring->sbq_len; | ||
2523 | cqicb->sbq_len = cpu_to_le16(bq_len); | ||
2524 | rx_ring->sbq_prod_idx = rx_ring->sbq_len - 16; | ||
2525 | rx_ring->sbq_curr_idx = 0; | ||
2526 | rx_ring->sbq_clean_idx = rx_ring->sbq_prod_idx; | ||
2527 | rx_ring->sbq_free_cnt = 16; | ||
2528 | } | ||
2529 | switch (rx_ring->type) { | ||
2530 | case TX_Q: | ||
2531 | /* If there's only one interrupt, then we use | ||
2532 | * worker threads to process the outbound | ||
2533 | * completion handling rx_rings. We do this so | ||
2534 | * they can be run on multiple CPUs. There is | ||
2535 | * room to play with this more where we would only | ||
2536 | * run in a worker if there are more than x number | ||
2537 | * of outbound completions on the queue and more | ||
2538 | * than one queue active. Some threshold that | ||
2539 | * would indicate a benefit in spite of the cost | ||
2540 | * of a context switch. | ||
2541 | * If there's more than one interrupt, then the | ||
2542 | * outbound completions are processed in the ISR. | ||
2543 | */ | ||
2544 | if (!test_bit(QL_MSIX_ENABLED, &qdev->flags)) | ||
2545 | INIT_DELAYED_WORK(&rx_ring->rx_work, ql_tx_clean); | ||
2546 | else { | ||
2547 | /* With all debug warnings on we see a WARN_ON message | ||
2548 | * when we free the skb in the interrupt context. | ||
2549 | */ | ||
2550 | INIT_DELAYED_WORK(&rx_ring->rx_work, ql_tx_clean); | ||
2551 | } | ||
2552 | cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs); | ||
2553 | cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames); | ||
2554 | break; | ||
2555 | case DEFAULT_Q: | ||
2556 | INIT_DELAYED_WORK(&rx_ring->rx_work, ql_rx_clean); | ||
2557 | cqicb->irq_delay = 0; | ||
2558 | cqicb->pkt_delay = 0; | ||
2559 | break; | ||
2560 | case RX_Q: | ||
2561 | /* Inbound completion handling rx_rings run in | ||
2562 | * separate NAPI contexts. | ||
2563 | */ | ||
2564 | netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix, | ||
2565 | 64); | ||
2566 | cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs); | ||
2567 | cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames); | ||
2568 | break; | ||
2569 | default: | ||
2570 | QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n", | ||
2571 | rx_ring->type); | ||
2572 | } | ||
2573 | QPRINTK(qdev, IFUP, INFO, "Initializing rx work queue.\n"); | ||
2574 | err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb), | ||
2575 | CFG_LCQ, rx_ring->cq_id); | ||
2576 | if (err) { | ||
2577 | QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n"); | ||
2578 | return err; | ||
2579 | } | ||
2580 | QPRINTK(qdev, IFUP, INFO, "Successfully loaded CQICB.\n"); | ||
2581 | /* | ||
2582 | * Advance the producer index for the buffer queues. | ||
2583 | */ | ||
2584 | wmb(); | ||
2585 | if (rx_ring->lbq_len) | ||
2586 | ql_write_db_reg(rx_ring->lbq_prod_idx, | ||
2587 | rx_ring->lbq_prod_idx_db_reg); | ||
2588 | if (rx_ring->sbq_len) | ||
2589 | ql_write_db_reg(rx_ring->sbq_prod_idx, | ||
2590 | rx_ring->sbq_prod_idx_db_reg); | ||
2591 | return err; | ||
2592 | } | ||
2593 | |||
2594 | static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) | ||
2595 | { | ||
2596 | struct wqicb *wqicb = (struct wqicb *)tx_ring; | ||
2597 | void __iomem *doorbell_area = | ||
2598 | qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id); | ||
2599 | void *shadow_reg = qdev->tx_ring_shadow_reg_area + | ||
2600 | (tx_ring->wq_id * sizeof(u64)); | ||
2601 | u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma + | ||
2602 | (tx_ring->wq_id * sizeof(u64)); | ||
2603 | int err = 0; | ||
2604 | |||
2605 | /* | ||
2606 | * Assign doorbell registers for this tx_ring. | ||
2607 | */ | ||
2608 | /* TX PCI doorbell mem area for tx producer index */ | ||
2609 | tx_ring->prod_idx_db_reg = (u32 *) doorbell_area; | ||
2610 | tx_ring->prod_idx = 0; | ||
2611 | /* TX PCI doorbell mem area + 0x04 */ | ||
2612 | tx_ring->valid_db_reg = doorbell_area + 0x04; | ||
2613 | |||
2614 | /* | ||
2615 | * Assign shadow registers for this tx_ring. | ||
2616 | */ | ||
2617 | tx_ring->cnsmr_idx_sh_reg = shadow_reg; | ||
2618 | tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma; | ||
2619 | |||
2620 | wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT); | ||
2621 | wqicb->flags = cpu_to_le16(Q_FLAGS_LC | | ||
2622 | Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO); | ||
2623 | wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id); | ||
2624 | wqicb->rid = 0; | ||
2625 | wqicb->addr_lo = cpu_to_le32(tx_ring->wq_base_dma); | ||
2626 | wqicb->addr_hi = cpu_to_le32((u64) tx_ring->wq_base_dma >> 32); | ||
2627 | |||
2628 | wqicb->cnsmr_idx_addr_lo = cpu_to_le32(tx_ring->cnsmr_idx_sh_reg_dma); | ||
2629 | wqicb->cnsmr_idx_addr_hi = | ||
2630 | cpu_to_le32((u64) tx_ring->cnsmr_idx_sh_reg_dma >> 32); | ||
2631 | |||
2632 | ql_init_tx_ring(qdev, tx_ring); | ||
2633 | |||
2634 | err = ql_write_cfg(qdev, wqicb, sizeof(wqicb), CFG_LRQ, | ||
2635 | (u16) tx_ring->wq_id); | ||
2636 | if (err) { | ||
2637 | QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n"); | ||
2638 | return err; | ||
2639 | } | ||
2640 | QPRINTK(qdev, IFUP, INFO, "Successfully loaded WQICB.\n"); | ||
2641 | return err; | ||
2642 | } | ||
2643 | |||
2644 | static void ql_disable_msix(struct ql_adapter *qdev) | ||
2645 | { | ||
2646 | if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { | ||
2647 | pci_disable_msix(qdev->pdev); | ||
2648 | clear_bit(QL_MSIX_ENABLED, &qdev->flags); | ||
2649 | kfree(qdev->msi_x_entry); | ||
2650 | qdev->msi_x_entry = NULL; | ||
2651 | } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) { | ||
2652 | pci_disable_msi(qdev->pdev); | ||
2653 | clear_bit(QL_MSI_ENABLED, &qdev->flags); | ||
2654 | } | ||
2655 | } | ||
2656 | |||
2657 | static void ql_enable_msix(struct ql_adapter *qdev) | ||
2658 | { | ||
2659 | int i; | ||
2660 | |||
2661 | qdev->intr_count = 1; | ||
2662 | /* Get the MSIX vectors. */ | ||
2663 | if (irq_type == MSIX_IRQ) { | ||
2664 | /* Try to alloc space for the msix struct, | ||
2665 | * if it fails then go to MSI/legacy. | ||
2666 | */ | ||
2667 | qdev->msi_x_entry = kcalloc(qdev->rx_ring_count, | ||
2668 | sizeof(struct msix_entry), | ||
2669 | GFP_KERNEL); | ||
2670 | if (!qdev->msi_x_entry) { | ||
2671 | irq_type = MSI_IRQ; | ||
2672 | goto msi; | ||
2673 | } | ||
2674 | |||
2675 | for (i = 0; i < qdev->rx_ring_count; i++) | ||
2676 | qdev->msi_x_entry[i].entry = i; | ||
2677 | |||
2678 | if (!pci_enable_msix | ||
2679 | (qdev->pdev, qdev->msi_x_entry, qdev->rx_ring_count)) { | ||
2680 | set_bit(QL_MSIX_ENABLED, &qdev->flags); | ||
2681 | qdev->intr_count = qdev->rx_ring_count; | ||
2682 | QPRINTK(qdev, IFUP, INFO, | ||
2683 | "MSI-X Enabled, got %d vectors.\n", | ||
2684 | qdev->intr_count); | ||
2685 | return; | ||
2686 | } else { | ||
2687 | kfree(qdev->msi_x_entry); | ||
2688 | qdev->msi_x_entry = NULL; | ||
2689 | QPRINTK(qdev, IFUP, WARNING, | ||
2690 | "MSI-X Enable failed, trying MSI.\n"); | ||
2691 | irq_type = MSI_IRQ; | ||
2692 | } | ||
2693 | } | ||
2694 | msi: | ||
2695 | if (irq_type == MSI_IRQ) { | ||
2696 | if (!pci_enable_msi(qdev->pdev)) { | ||
2697 | set_bit(QL_MSI_ENABLED, &qdev->flags); | ||
2698 | QPRINTK(qdev, IFUP, INFO, | ||
2699 | "Running with MSI interrupts.\n"); | ||
2700 | return; | ||
2701 | } | ||
2702 | } | ||
2703 | irq_type = LEG_IRQ; | ||
2704 | spin_lock_init(&qdev->legacy_lock); | ||
2705 | qdev->legacy_check = ql_legacy_check; | ||
2706 | QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n"); | ||
2707 | } | ||
2708 | |||
2709 | /* | ||
2710 | * Here we build the intr_context structures based on | ||
2711 | * our rx_ring count and intr vector count. | ||
2712 | * The intr_context structure is used to hook each vector | ||
2713 | * to possibly different handlers. | ||
2714 | */ | ||
2715 | static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev) | ||
2716 | { | ||
2717 | int i = 0; | ||
2718 | struct intr_context *intr_context = &qdev->intr_context[0]; | ||
2719 | |||
2720 | ql_enable_msix(qdev); | ||
2721 | |||
2722 | if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) { | ||
2723 | /* Each rx_ring has it's | ||
2724 | * own intr_context since we have separate | ||
2725 | * vectors for each queue. | ||
2726 | * This only true when MSI-X is enabled. | ||
2727 | */ | ||
2728 | for (i = 0; i < qdev->intr_count; i++, intr_context++) { | ||
2729 | qdev->rx_ring[i].irq = i; | ||
2730 | intr_context->intr = i; | ||
2731 | intr_context->qdev = qdev; | ||
2732 | /* | ||
2733 | * We set up each vectors enable/disable/read bits so | ||
2734 | * there's no bit/mask calculations in the critical path. | ||
2735 | */ | ||
2736 | intr_context->intr_en_mask = | ||
2737 | INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | | ||
2738 | INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD | ||
2739 | | i; | ||
2740 | intr_context->intr_dis_mask = | ||
2741 | INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | | ||
2742 | INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK | | ||
2743 | INTR_EN_IHD | i; | ||
2744 | intr_context->intr_read_mask = | ||
2745 | INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | | ||
2746 | INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD | | ||
2747 | i; | ||
2748 | |||
2749 | if (i == 0) { | ||
2750 | /* | ||
2751 | * Default queue handles bcast/mcast plus | ||
2752 | * async events. Needs buffers. | ||
2753 | */ | ||
2754 | intr_context->handler = qlge_isr; | ||
2755 | sprintf(intr_context->name, "%s-default-queue", | ||
2756 | qdev->ndev->name); | ||
2757 | } else if (i < qdev->rss_ring_first_cq_id) { | ||
2758 | /* | ||
2759 | * Outbound queue is for outbound completions only. | ||
2760 | */ | ||
2761 | intr_context->handler = qlge_msix_tx_isr; | ||
2762 | sprintf(intr_context->name, "%s-txq-%d", | ||
2763 | qdev->ndev->name, i); | ||
2764 | } else { | ||
2765 | /* | ||
2766 | * Inbound queues handle unicast frames only. | ||
2767 | */ | ||
2768 | intr_context->handler = qlge_msix_rx_isr; | ||
2769 | sprintf(intr_context->name, "%s-rxq-%d", | ||
2770 | qdev->ndev->name, i); | ||
2771 | } | ||
2772 | } | ||
2773 | } else { | ||
2774 | /* | ||
2775 | * All rx_rings use the same intr_context since | ||
2776 | * there is only one vector. | ||
2777 | */ | ||
2778 | intr_context->intr = 0; | ||
2779 | intr_context->qdev = qdev; | ||
2780 | /* | ||
2781 | * We set up each vectors enable/disable/read bits so | ||
2782 | * there's no bit/mask calculations in the critical path. | ||
2783 | */ | ||
2784 | intr_context->intr_en_mask = | ||
2785 | INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE; | ||
2786 | intr_context->intr_dis_mask = | ||
2787 | INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | | ||
2788 | INTR_EN_TYPE_DISABLE; | ||
2789 | intr_context->intr_read_mask = | ||
2790 | INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ; | ||
2791 | /* | ||
2792 | * Single interrupt means one handler for all rings. | ||
2793 | */ | ||
2794 | intr_context->handler = qlge_isr; | ||
2795 | sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name); | ||
2796 | for (i = 0; i < qdev->rx_ring_count; i++) | ||
2797 | qdev->rx_ring[i].irq = 0; | ||
2798 | } | ||
2799 | } | ||
2800 | |||
2801 | static void ql_free_irq(struct ql_adapter *qdev) | ||
2802 | { | ||
2803 | int i; | ||
2804 | struct intr_context *intr_context = &qdev->intr_context[0]; | ||
2805 | |||
2806 | for (i = 0; i < qdev->intr_count; i++, intr_context++) { | ||
2807 | if (intr_context->hooked) { | ||
2808 | if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { | ||
2809 | free_irq(qdev->msi_x_entry[i].vector, | ||
2810 | &qdev->rx_ring[i]); | ||
2811 | QPRINTK(qdev, IFDOWN, ERR, | ||
2812 | "freeing msix interrupt %d.\n", i); | ||
2813 | } else { | ||
2814 | free_irq(qdev->pdev->irq, &qdev->rx_ring[0]); | ||
2815 | QPRINTK(qdev, IFDOWN, ERR, | ||
2816 | "freeing msi interrupt %d.\n", i); | ||
2817 | } | ||
2818 | } | ||
2819 | } | ||
2820 | ql_disable_msix(qdev); | ||
2821 | } | ||
2822 | |||
2823 | static int ql_request_irq(struct ql_adapter *qdev) | ||
2824 | { | ||
2825 | int i; | ||
2826 | int status = 0; | ||
2827 | struct pci_dev *pdev = qdev->pdev; | ||
2828 | struct intr_context *intr_context = &qdev->intr_context[0]; | ||
2829 | |||
2830 | ql_resolve_queues_to_irqs(qdev); | ||
2831 | |||
2832 | for (i = 0; i < qdev->intr_count; i++, intr_context++) { | ||
2833 | atomic_set(&intr_context->irq_cnt, 0); | ||
2834 | if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { | ||
2835 | status = request_irq(qdev->msi_x_entry[i].vector, | ||
2836 | intr_context->handler, | ||
2837 | 0, | ||
2838 | intr_context->name, | ||
2839 | &qdev->rx_ring[i]); | ||
2840 | if (status) { | ||
2841 | QPRINTK(qdev, IFUP, ERR, | ||
2842 | "Failed request for MSIX interrupt %d.\n", | ||
2843 | i); | ||
2844 | goto err_irq; | ||
2845 | } else { | ||
2846 | QPRINTK(qdev, IFUP, INFO, | ||
2847 | "Hooked intr %d, queue type %s%s%s, with name %s.\n", | ||
2848 | i, | ||
2849 | qdev->rx_ring[i].type == | ||
2850 | DEFAULT_Q ? "DEFAULT_Q" : "", | ||
2851 | qdev->rx_ring[i].type == | ||
2852 | TX_Q ? "TX_Q" : "", | ||
2853 | qdev->rx_ring[i].type == | ||
2854 | RX_Q ? "RX_Q" : "", intr_context->name); | ||
2855 | } | ||
2856 | } else { | ||
2857 | QPRINTK(qdev, IFUP, DEBUG, | ||
2858 | "trying msi or legacy interrupts.\n"); | ||
2859 | QPRINTK(qdev, IFUP, DEBUG, | ||
2860 | "%s: irq = %d.\n", __func__, pdev->irq); | ||
2861 | QPRINTK(qdev, IFUP, DEBUG, | ||
2862 | "%s: context->name = %s.\n", __func__, | ||
2863 | intr_context->name); | ||
2864 | QPRINTK(qdev, IFUP, DEBUG, | ||
2865 | "%s: dev_id = 0x%p.\n", __func__, | ||
2866 | &qdev->rx_ring[0]); | ||
2867 | status = | ||
2868 | request_irq(pdev->irq, qlge_isr, | ||
2869 | test_bit(QL_MSI_ENABLED, | ||
2870 | &qdev-> | ||
2871 | flags) ? 0 : IRQF_SHARED, | ||
2872 | intr_context->name, &qdev->rx_ring[0]); | ||
2873 | if (status) | ||
2874 | goto err_irq; | ||
2875 | |||
2876 | QPRINTK(qdev, IFUP, ERR, | ||
2877 | "Hooked intr %d, queue type %s%s%s, with name %s.\n", | ||
2878 | i, | ||
2879 | qdev->rx_ring[0].type == | ||
2880 | DEFAULT_Q ? "DEFAULT_Q" : "", | ||
2881 | qdev->rx_ring[0].type == TX_Q ? "TX_Q" : "", | ||
2882 | qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "", | ||
2883 | intr_context->name); | ||
2884 | } | ||
2885 | intr_context->hooked = 1; | ||
2886 | } | ||
2887 | return status; | ||
2888 | err_irq: | ||
2889 | QPRINTK(qdev, IFUP, ERR, "Failed to get the interrupts!!!/n"); | ||
2890 | ql_free_irq(qdev); | ||
2891 | return status; | ||
2892 | } | ||
2893 | |||
2894 | static int ql_start_rss(struct ql_adapter *qdev) | ||
2895 | { | ||
2896 | struct ricb *ricb = &qdev->ricb; | ||
2897 | int status = 0; | ||
2898 | int i; | ||
2899 | u8 *hash_id = (u8 *) ricb->hash_cq_id; | ||
2900 | |||
2901 | memset((void *)ricb, 0, sizeof(ricb)); | ||
2902 | |||
2903 | ricb->base_cq = qdev->rss_ring_first_cq_id | RSS_L4K; | ||
2904 | ricb->flags = | ||
2905 | (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RI4 | RSS_RI6 | RSS_RT4 | | ||
2906 | RSS_RT6); | ||
2907 | ricb->mask = cpu_to_le16(qdev->rss_ring_count - 1); | ||
2908 | |||
2909 | /* | ||
2910 | * Fill out the Indirection Table. | ||
2911 | */ | ||
2912 | for (i = 0; i < 32; i++) | ||
2913 | hash_id[i] = i & 1; | ||
2914 | |||
2915 | /* | ||
2916 | * Random values for the IPv6 and IPv4 Hash Keys. | ||
2917 | */ | ||
2918 | get_random_bytes((void *)&ricb->ipv6_hash_key[0], 40); | ||
2919 | get_random_bytes((void *)&ricb->ipv4_hash_key[0], 16); | ||
2920 | |||
2921 | QPRINTK(qdev, IFUP, INFO, "Initializing RSS.\n"); | ||
2922 | |||
2923 | status = ql_write_cfg(qdev, ricb, sizeof(ricb), CFG_LR, 0); | ||
2924 | if (status) { | ||
2925 | QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n"); | ||
2926 | return status; | ||
2927 | } | ||
2928 | QPRINTK(qdev, IFUP, INFO, "Successfully loaded RICB.\n"); | ||
2929 | return status; | ||
2930 | } | ||
2931 | |||
2932 | /* Initialize the frame-to-queue routing. */ | ||
2933 | static int ql_route_initialize(struct ql_adapter *qdev) | ||
2934 | { | ||
2935 | int status = 0; | ||
2936 | int i; | ||
2937 | |||
2938 | /* Clear all the entries in the routing table. */ | ||
2939 | for (i = 0; i < 16; i++) { | ||
2940 | status = ql_set_routing_reg(qdev, i, 0, 0); | ||
2941 | if (status) { | ||
2942 | QPRINTK(qdev, IFUP, ERR, | ||
2943 | "Failed to init routing register for CAM packets.\n"); | ||
2944 | return status; | ||
2945 | } | ||
2946 | } | ||
2947 | |||
2948 | status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1); | ||
2949 | if (status) { | ||
2950 | QPRINTK(qdev, IFUP, ERR, | ||
2951 | "Failed to init routing register for error packets.\n"); | ||
2952 | return status; | ||
2953 | } | ||
2954 | status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1); | ||
2955 | if (status) { | ||
2956 | QPRINTK(qdev, IFUP, ERR, | ||
2957 | "Failed to init routing register for broadcast packets.\n"); | ||
2958 | return status; | ||
2959 | } | ||
2960 | /* If we have more than one inbound queue, then turn on RSS in the | ||
2961 | * routing block. | ||
2962 | */ | ||
2963 | if (qdev->rss_ring_count > 1) { | ||
2964 | status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT, | ||
2965 | RT_IDX_RSS_MATCH, 1); | ||
2966 | if (status) { | ||
2967 | QPRINTK(qdev, IFUP, ERR, | ||
2968 | "Failed to init routing register for MATCH RSS packets.\n"); | ||
2969 | return status; | ||
2970 | } | ||
2971 | } | ||
2972 | |||
2973 | status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT, | ||
2974 | RT_IDX_CAM_HIT, 1); | ||
2975 | if (status) { | ||
2976 | QPRINTK(qdev, IFUP, ERR, | ||
2977 | "Failed to init routing register for CAM packets.\n"); | ||
2978 | return status; | ||
2979 | } | ||
2980 | return status; | ||
2981 | } | ||
2982 | |||
2983 | static int ql_adapter_initialize(struct ql_adapter *qdev) | ||
2984 | { | ||
2985 | u32 value, mask; | ||
2986 | int i; | ||
2987 | int status = 0; | ||
2988 | |||
2989 | /* | ||
2990 | * Set up the System register to halt on errors. | ||
2991 | */ | ||
2992 | value = SYS_EFE | SYS_FAE; | ||
2993 | mask = value << 16; | ||
2994 | ql_write32(qdev, SYS, mask | value); | ||
2995 | |||
2996 | /* Set the default queue. */ | ||
2997 | value = NIC_RCV_CFG_DFQ; | ||
2998 | mask = NIC_RCV_CFG_DFQ_MASK; | ||
2999 | ql_write32(qdev, NIC_RCV_CFG, (mask | value)); | ||
3000 | |||
3001 | /* Set the MPI interrupt to enabled. */ | ||
3002 | ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); | ||
3003 | |||
3004 | /* Enable the function, set pagesize, enable error checking. */ | ||
3005 | value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND | | ||
3006 | FSC_EC | FSC_VM_PAGE_4K | FSC_SH; | ||
3007 | |||
3008 | /* Set/clear header splitting. */ | ||
3009 | mask = FSC_VM_PAGESIZE_MASK | | ||
3010 | FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16); | ||
3011 | ql_write32(qdev, FSC, mask | value); | ||
3012 | |||
3013 | ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP | | ||
3014 | min(SMALL_BUFFER_SIZE, MAX_SPLIT_SIZE)); | ||
3015 | |||
3016 | /* Start up the rx queues. */ | ||
3017 | for (i = 0; i < qdev->rx_ring_count; i++) { | ||
3018 | status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]); | ||
3019 | if (status) { | ||
3020 | QPRINTK(qdev, IFUP, ERR, | ||
3021 | "Failed to start rx ring[%d].\n", i); | ||
3022 | return status; | ||
3023 | } | ||
3024 | } | ||
3025 | |||
3026 | /* If there is more than one inbound completion queue | ||
3027 | * then download a RICB to configure RSS. | ||
3028 | */ | ||
3029 | if (qdev->rss_ring_count > 1) { | ||
3030 | status = ql_start_rss(qdev); | ||
3031 | if (status) { | ||
3032 | QPRINTK(qdev, IFUP, ERR, "Failed to start RSS.\n"); | ||
3033 | return status; | ||
3034 | } | ||
3035 | } | ||
3036 | |||
3037 | /* Start up the tx queues. */ | ||
3038 | for (i = 0; i < qdev->tx_ring_count; i++) { | ||
3039 | status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]); | ||
3040 | if (status) { | ||
3041 | QPRINTK(qdev, IFUP, ERR, | ||
3042 | "Failed to start tx ring[%d].\n", i); | ||
3043 | return status; | ||
3044 | } | ||
3045 | } | ||
3046 | |||
3047 | status = ql_port_initialize(qdev); | ||
3048 | if (status) { | ||
3049 | QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n"); | ||
3050 | return status; | ||
3051 | } | ||
3052 | |||
3053 | status = ql_set_mac_addr_reg(qdev, (u8 *) qdev->ndev->perm_addr, | ||
3054 | MAC_ADDR_TYPE_CAM_MAC, qdev->func); | ||
3055 | if (status) { | ||
3056 | QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n"); | ||
3057 | return status; | ||
3058 | } | ||
3059 | |||
3060 | status = ql_route_initialize(qdev); | ||
3061 | if (status) { | ||
3062 | QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n"); | ||
3063 | return status; | ||
3064 | } | ||
3065 | |||
3066 | /* Start NAPI for the RSS queues. */ | ||
3067 | for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++) { | ||
3068 | QPRINTK(qdev, IFUP, INFO, "Enabling NAPI for rx_ring[%d].\n", | ||
3069 | i); | ||
3070 | napi_enable(&qdev->rx_ring[i].napi); | ||
3071 | } | ||
3072 | |||
3073 | return status; | ||
3074 | } | ||
3075 | |||
3076 | /* Issue soft reset to chip. */ | ||
3077 | static int ql_adapter_reset(struct ql_adapter *qdev) | ||
3078 | { | ||
3079 | u32 value; | ||
3080 | int max_wait_time; | ||
3081 | int status = 0; | ||
3082 | int resetCnt = 0; | ||
3083 | |||
3084 | #define MAX_RESET_CNT 1 | ||
3085 | issueReset: | ||
3086 | resetCnt++; | ||
3087 | QPRINTK(qdev, IFDOWN, DEBUG, "Issue soft reset to chip.\n"); | ||
3088 | ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR); | ||
3089 | /* Wait for reset to complete. */ | ||
3090 | max_wait_time = 3; | ||
3091 | QPRINTK(qdev, IFDOWN, DEBUG, "Wait %d seconds for reset to complete.\n", | ||
3092 | max_wait_time); | ||
3093 | do { | ||
3094 | value = ql_read32(qdev, RST_FO); | ||
3095 | if ((value & RST_FO_FR) == 0) | ||
3096 | break; | ||
3097 | |||
3098 | ssleep(1); | ||
3099 | } while ((--max_wait_time)); | ||
3100 | if (value & RST_FO_FR) { | ||
3101 | QPRINTK(qdev, IFDOWN, ERR, | ||
3102 | "Stuck in SoftReset: FSC_SR:0x%08x\n", value); | ||
3103 | if (resetCnt < MAX_RESET_CNT) | ||
3104 | goto issueReset; | ||
3105 | } | ||
3106 | if (max_wait_time == 0) { | ||
3107 | status = -ETIMEDOUT; | ||
3108 | QPRINTK(qdev, IFDOWN, ERR, | ||
3109 | "ETIMEOUT!!! errored out of resetting the chip!\n"); | ||
3110 | } | ||
3111 | |||
3112 | return status; | ||
3113 | } | ||
3114 | |||
3115 | static void ql_display_dev_info(struct net_device *ndev) | ||
3116 | { | ||
3117 | struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); | ||
3118 | |||
3119 | QPRINTK(qdev, PROBE, INFO, | ||
3120 | "Function #%d, NIC Roll %d, NIC Rev = %d, " | ||
3121 | "XG Roll = %d, XG Rev = %d.\n", | ||
3122 | qdev->func, | ||
3123 | qdev->chip_rev_id & 0x0000000f, | ||
3124 | qdev->chip_rev_id >> 4 & 0x0000000f, | ||
3125 | qdev->chip_rev_id >> 8 & 0x0000000f, | ||
3126 | qdev->chip_rev_id >> 12 & 0x0000000f); | ||
3127 | QPRINTK(qdev, PROBE, INFO, | ||
3128 | "MAC address %02x:%02x:%02x:%02x:%02x:%02x\n", | ||
3129 | ndev->dev_addr[0], ndev->dev_addr[1], | ||
3130 | ndev->dev_addr[2], ndev->dev_addr[3], ndev->dev_addr[4], | ||
3131 | ndev->dev_addr[5]); | ||
3132 | } | ||
3133 | |||
3134 | static int ql_adapter_down(struct ql_adapter *qdev) | ||
3135 | { | ||
3136 | struct net_device *ndev = qdev->ndev; | ||
3137 | int i, status = 0; | ||
3138 | struct rx_ring *rx_ring; | ||
3139 | |||
3140 | netif_stop_queue(ndev); | ||
3141 | netif_carrier_off(ndev); | ||
3142 | |||
3143 | cancel_delayed_work_sync(&qdev->asic_reset_work); | ||
3144 | cancel_delayed_work_sync(&qdev->mpi_reset_work); | ||
3145 | cancel_delayed_work_sync(&qdev->mpi_work); | ||
3146 | |||
3147 | /* The default queue at index 0 is always processed in | ||
3148 | * a workqueue. | ||
3149 | */ | ||
3150 | cancel_delayed_work_sync(&qdev->rx_ring[0].rx_work); | ||
3151 | |||
3152 | /* The rest of the rx_rings are processed in | ||
3153 | * a workqueue only if it's a single interrupt | ||
3154 | * environment (MSI/Legacy). | ||
3155 | */ | ||
3156 | for (i = 1; i > qdev->rx_ring_count; i++) { | ||
3157 | rx_ring = &qdev->rx_ring[i]; | ||
3158 | /* Only the RSS rings use NAPI on multi irq | ||
3159 | * environment. Outbound completion processing | ||
3160 | * is done in interrupt context. | ||
3161 | */ | ||
3162 | if (i >= qdev->rss_ring_first_cq_id) { | ||
3163 | napi_disable(&rx_ring->napi); | ||
3164 | } else { | ||
3165 | cancel_delayed_work_sync(&rx_ring->rx_work); | ||
3166 | } | ||
3167 | } | ||
3168 | |||
3169 | clear_bit(QL_ADAPTER_UP, &qdev->flags); | ||
3170 | |||
3171 | ql_disable_interrupts(qdev); | ||
3172 | |||
3173 | ql_tx_ring_clean(qdev); | ||
3174 | |||
3175 | spin_lock(&qdev->hw_lock); | ||
3176 | status = ql_adapter_reset(qdev); | ||
3177 | if (status) | ||
3178 | QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n", | ||
3179 | qdev->func); | ||
3180 | spin_unlock(&qdev->hw_lock); | ||
3181 | return status; | ||
3182 | } | ||
3183 | |||
3184 | static int ql_adapter_up(struct ql_adapter *qdev) | ||
3185 | { | ||
3186 | int err = 0; | ||
3187 | |||
3188 | spin_lock(&qdev->hw_lock); | ||
3189 | err = ql_adapter_initialize(qdev); | ||
3190 | if (err) { | ||
3191 | QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n"); | ||
3192 | spin_unlock(&qdev->hw_lock); | ||
3193 | goto err_init; | ||
3194 | } | ||
3195 | spin_unlock(&qdev->hw_lock); | ||
3196 | set_bit(QL_ADAPTER_UP, &qdev->flags); | ||
3197 | ql_enable_interrupts(qdev); | ||
3198 | ql_enable_all_completion_interrupts(qdev); | ||
3199 | if ((ql_read32(qdev, STS) & qdev->port_init)) { | ||
3200 | netif_carrier_on(qdev->ndev); | ||
3201 | netif_start_queue(qdev->ndev); | ||
3202 | } | ||
3203 | |||
3204 | return 0; | ||
3205 | err_init: | ||
3206 | ql_adapter_reset(qdev); | ||
3207 | return err; | ||
3208 | } | ||
3209 | |||
3210 | static int ql_cycle_adapter(struct ql_adapter *qdev) | ||
3211 | { | ||
3212 | int status; | ||
3213 | |||
3214 | status = ql_adapter_down(qdev); | ||
3215 | if (status) | ||
3216 | goto error; | ||
3217 | |||
3218 | status = ql_adapter_up(qdev); | ||
3219 | if (status) | ||
3220 | goto error; | ||
3221 | |||
3222 | return status; | ||
3223 | error: | ||
3224 | QPRINTK(qdev, IFUP, ALERT, | ||
3225 | "Driver up/down cycle failed, closing device\n"); | ||
3226 | rtnl_lock(); | ||
3227 | dev_close(qdev->ndev); | ||
3228 | rtnl_unlock(); | ||
3229 | return status; | ||
3230 | } | ||
3231 | |||
3232 | static void ql_release_adapter_resources(struct ql_adapter *qdev) | ||
3233 | { | ||
3234 | ql_free_mem_resources(qdev); | ||
3235 | ql_free_irq(qdev); | ||
3236 | } | ||
3237 | |||
3238 | static int ql_get_adapter_resources(struct ql_adapter *qdev) | ||
3239 | { | ||
3240 | int status = 0; | ||
3241 | |||
3242 | if (ql_alloc_mem_resources(qdev)) { | ||
3243 | QPRINTK(qdev, IFUP, ERR, "Unable to allocate memory.\n"); | ||
3244 | return -ENOMEM; | ||
3245 | } | ||
3246 | status = ql_request_irq(qdev); | ||
3247 | if (status) | ||
3248 | goto err_irq; | ||
3249 | return status; | ||
3250 | err_irq: | ||
3251 | ql_free_mem_resources(qdev); | ||
3252 | return status; | ||
3253 | } | ||
3254 | |||
3255 | static int qlge_close(struct net_device *ndev) | ||
3256 | { | ||
3257 | struct ql_adapter *qdev = netdev_priv(ndev); | ||
3258 | |||
3259 | /* | ||
3260 | * Wait for device to recover from a reset. | ||
3261 | * (Rarely happens, but possible.) | ||
3262 | */ | ||
3263 | while (!test_bit(QL_ADAPTER_UP, &qdev->flags)) | ||
3264 | msleep(1); | ||
3265 | ql_adapter_down(qdev); | ||
3266 | ql_release_adapter_resources(qdev); | ||
3267 | ql_free_ring_cb(qdev); | ||
3268 | return 0; | ||
3269 | } | ||
3270 | |||
3271 | static int ql_configure_rings(struct ql_adapter *qdev) | ||
3272 | { | ||
3273 | int i; | ||
3274 | struct rx_ring *rx_ring; | ||
3275 | struct tx_ring *tx_ring; | ||
3276 | int cpu_cnt = num_online_cpus(); | ||
3277 | |||
3278 | /* | ||
3279 | * For each processor present we allocate one | ||
3280 | * rx_ring for outbound completions, and one | ||
3281 | * rx_ring for inbound completions. Plus there is | ||
3282 | * always the one default queue. For the CPU | ||
3283 | * counts we end up with the following rx_rings: | ||
3284 | * rx_ring count = | ||
3285 | * one default queue + | ||
3286 | * (CPU count * outbound completion rx_ring) + | ||
3287 | * (CPU count * inbound (RSS) completion rx_ring) | ||
3288 | * To keep it simple we limit the total number of | ||
3289 | * queues to < 32, so we truncate CPU to 8. | ||
3290 | * This limitation can be removed when requested. | ||
3291 | */ | ||
3292 | |||
3293 | if (cpu_cnt > 8) | ||
3294 | cpu_cnt = 8; | ||
3295 | |||
3296 | /* | ||
3297 | * rx_ring[0] is always the default queue. | ||
3298 | */ | ||
3299 | /* Allocate outbound completion ring for each CPU. */ | ||
3300 | qdev->tx_ring_count = cpu_cnt; | ||
3301 | /* Allocate inbound completion (RSS) ring for each CPU. */ | ||
3302 | qdev->rss_ring_count = cpu_cnt; | ||
3303 | /* cq_id for the first inbound ring handler. */ | ||
3304 | qdev->rss_ring_first_cq_id = cpu_cnt + 1; | ||
3305 | /* | ||
3306 | * qdev->rx_ring_count: | ||
3307 | * Total number of rx_rings. This includes the one | ||
3308 | * default queue, a number of outbound completion | ||
3309 | * handler rx_rings, and the number of inbound | ||
3310 | * completion handler rx_rings. | ||
3311 | */ | ||
3312 | qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count + 1; | ||
3313 | |||
3314 | if (ql_alloc_ring_cb(qdev)) | ||
3315 | return -ENOMEM; | ||
3316 | |||
3317 | for (i = 0; i < qdev->tx_ring_count; i++) { | ||
3318 | tx_ring = &qdev->tx_ring[i]; | ||
3319 | memset((void *)tx_ring, 0, sizeof(tx_ring)); | ||
3320 | tx_ring->qdev = qdev; | ||
3321 | tx_ring->wq_id = i; | ||
3322 | tx_ring->wq_len = qdev->tx_ring_size; | ||
3323 | tx_ring->wq_size = | ||
3324 | tx_ring->wq_len * sizeof(struct ob_mac_iocb_req); | ||
3325 | |||
3326 | /* | ||
3327 | * The completion queue ID for the tx rings start | ||
3328 | * immediately after the default Q ID, which is zero. | ||
3329 | */ | ||
3330 | tx_ring->cq_id = i + 1; | ||
3331 | } | ||
3332 | |||
3333 | for (i = 0; i < qdev->rx_ring_count; i++) { | ||
3334 | rx_ring = &qdev->rx_ring[i]; | ||
3335 | memset((void *)rx_ring, 0, sizeof(rx_ring)); | ||
3336 | rx_ring->qdev = qdev; | ||
3337 | rx_ring->cq_id = i; | ||
3338 | rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */ | ||
3339 | if (i == 0) { /* Default queue at index 0. */ | ||
3340 | /* | ||
3341 | * Default queue handles bcast/mcast plus | ||
3342 | * async events. Needs buffers. | ||
3343 | */ | ||
3344 | rx_ring->cq_len = qdev->rx_ring_size; | ||
3345 | rx_ring->cq_size = | ||
3346 | rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); | ||
3347 | rx_ring->lbq_len = NUM_LARGE_BUFFERS; | ||
3348 | rx_ring->lbq_size = | ||
3349 | rx_ring->lbq_len * sizeof(struct bq_element); | ||
3350 | rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE; | ||
3351 | rx_ring->sbq_len = NUM_SMALL_BUFFERS; | ||
3352 | rx_ring->sbq_size = | ||
3353 | rx_ring->sbq_len * sizeof(struct bq_element); | ||
3354 | rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2; | ||
3355 | rx_ring->type = DEFAULT_Q; | ||
3356 | } else if (i < qdev->rss_ring_first_cq_id) { | ||
3357 | /* | ||
3358 | * Outbound queue handles outbound completions only. | ||
3359 | */ | ||
3360 | /* outbound cq is same size as tx_ring it services. */ | ||
3361 | rx_ring->cq_len = qdev->tx_ring_size; | ||
3362 | rx_ring->cq_size = | ||
3363 | rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); | ||
3364 | rx_ring->lbq_len = 0; | ||
3365 | rx_ring->lbq_size = 0; | ||
3366 | rx_ring->lbq_buf_size = 0; | ||
3367 | rx_ring->sbq_len = 0; | ||
3368 | rx_ring->sbq_size = 0; | ||
3369 | rx_ring->sbq_buf_size = 0; | ||
3370 | rx_ring->type = TX_Q; | ||
3371 | } else { /* Inbound completions (RSS) queues */ | ||
3372 | /* | ||
3373 | * Inbound queues handle unicast frames only. | ||
3374 | */ | ||
3375 | rx_ring->cq_len = qdev->rx_ring_size; | ||
3376 | rx_ring->cq_size = | ||
3377 | rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); | ||
3378 | rx_ring->lbq_len = NUM_LARGE_BUFFERS; | ||
3379 | rx_ring->lbq_size = | ||
3380 | rx_ring->lbq_len * sizeof(struct bq_element); | ||
3381 | rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE; | ||
3382 | rx_ring->sbq_len = NUM_SMALL_BUFFERS; | ||
3383 | rx_ring->sbq_size = | ||
3384 | rx_ring->sbq_len * sizeof(struct bq_element); | ||
3385 | rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2; | ||
3386 | rx_ring->type = RX_Q; | ||
3387 | } | ||
3388 | } | ||
3389 | return 0; | ||
3390 | } | ||
3391 | |||
3392 | static int qlge_open(struct net_device *ndev) | ||
3393 | { | ||
3394 | int err = 0; | ||
3395 | struct ql_adapter *qdev = netdev_priv(ndev); | ||
3396 | |||
3397 | err = ql_configure_rings(qdev); | ||
3398 | if (err) | ||
3399 | return err; | ||
3400 | |||
3401 | err = ql_get_adapter_resources(qdev); | ||
3402 | if (err) | ||
3403 | goto error_up; | ||
3404 | |||
3405 | err = ql_adapter_up(qdev); | ||
3406 | if (err) | ||
3407 | goto error_up; | ||
3408 | |||
3409 | return err; | ||
3410 | |||
3411 | error_up: | ||
3412 | ql_release_adapter_resources(qdev); | ||
3413 | ql_free_ring_cb(qdev); | ||
3414 | return err; | ||
3415 | } | ||
3416 | |||
3417 | static int qlge_change_mtu(struct net_device *ndev, int new_mtu) | ||
3418 | { | ||
3419 | struct ql_adapter *qdev = netdev_priv(ndev); | ||
3420 | |||
3421 | if (ndev->mtu == 1500 && new_mtu == 9000) { | ||
3422 | QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n"); | ||
3423 | } else if (ndev->mtu == 9000 && new_mtu == 1500) { | ||
3424 | QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n"); | ||
3425 | } else if ((ndev->mtu == 1500 && new_mtu == 1500) || | ||
3426 | (ndev->mtu == 9000 && new_mtu == 9000)) { | ||
3427 | return 0; | ||
3428 | } else | ||
3429 | return -EINVAL; | ||
3430 | ndev->mtu = new_mtu; | ||
3431 | return 0; | ||
3432 | } | ||
3433 | |||
3434 | static struct net_device_stats *qlge_get_stats(struct net_device | ||
3435 | *ndev) | ||
3436 | { | ||
3437 | struct ql_adapter *qdev = netdev_priv(ndev); | ||
3438 | return &qdev->stats; | ||
3439 | } | ||
3440 | |||
3441 | static void qlge_set_multicast_list(struct net_device *ndev) | ||
3442 | { | ||
3443 | struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); | ||
3444 | struct dev_mc_list *mc_ptr; | ||
3445 | int i; | ||
3446 | |||
3447 | spin_lock(&qdev->hw_lock); | ||
3448 | /* | ||
3449 | * Set or clear promiscuous mode if a | ||
3450 | * transition is taking place. | ||
3451 | */ | ||
3452 | if (ndev->flags & IFF_PROMISC) { | ||
3453 | if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) { | ||
3454 | if (ql_set_routing_reg | ||
3455 | (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) { | ||
3456 | QPRINTK(qdev, HW, ERR, | ||
3457 | "Failed to set promiscous mode.\n"); | ||
3458 | } else { | ||
3459 | set_bit(QL_PROMISCUOUS, &qdev->flags); | ||
3460 | } | ||
3461 | } | ||
3462 | } else { | ||
3463 | if (test_bit(QL_PROMISCUOUS, &qdev->flags)) { | ||
3464 | if (ql_set_routing_reg | ||
3465 | (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) { | ||
3466 | QPRINTK(qdev, HW, ERR, | ||
3467 | "Failed to clear promiscous mode.\n"); | ||
3468 | } else { | ||
3469 | clear_bit(QL_PROMISCUOUS, &qdev->flags); | ||
3470 | } | ||
3471 | } | ||
3472 | } | ||
3473 | |||
3474 | /* | ||
3475 | * Set or clear all multicast mode if a | ||
3476 | * transition is taking place. | ||
3477 | */ | ||
3478 | if ((ndev->flags & IFF_ALLMULTI) || | ||
3479 | (ndev->mc_count > MAX_MULTICAST_ENTRIES)) { | ||
3480 | if (!test_bit(QL_ALLMULTI, &qdev->flags)) { | ||
3481 | if (ql_set_routing_reg | ||
3482 | (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) { | ||
3483 | QPRINTK(qdev, HW, ERR, | ||
3484 | "Failed to set all-multi mode.\n"); | ||
3485 | } else { | ||
3486 | set_bit(QL_ALLMULTI, &qdev->flags); | ||
3487 | } | ||
3488 | } | ||
3489 | } else { | ||
3490 | if (test_bit(QL_ALLMULTI, &qdev->flags)) { | ||
3491 | if (ql_set_routing_reg | ||
3492 | (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) { | ||
3493 | QPRINTK(qdev, HW, ERR, | ||
3494 | "Failed to clear all-multi mode.\n"); | ||
3495 | } else { | ||
3496 | clear_bit(QL_ALLMULTI, &qdev->flags); | ||
3497 | } | ||
3498 | } | ||
3499 | } | ||
3500 | |||
3501 | if (ndev->mc_count) { | ||
3502 | for (i = 0, mc_ptr = ndev->mc_list; mc_ptr; | ||
3503 | i++, mc_ptr = mc_ptr->next) | ||
3504 | if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr, | ||
3505 | MAC_ADDR_TYPE_MULTI_MAC, i)) { | ||
3506 | QPRINTK(qdev, HW, ERR, | ||
3507 | "Failed to loadmulticast address.\n"); | ||
3508 | goto exit; | ||
3509 | } | ||
3510 | if (ql_set_routing_reg | ||
3511 | (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) { | ||
3512 | QPRINTK(qdev, HW, ERR, | ||
3513 | "Failed to set multicast match mode.\n"); | ||
3514 | } else { | ||
3515 | set_bit(QL_ALLMULTI, &qdev->flags); | ||
3516 | } | ||
3517 | } | ||
3518 | exit: | ||
3519 | spin_unlock(&qdev->hw_lock); | ||
3520 | } | ||
3521 | |||
3522 | static int qlge_set_mac_address(struct net_device *ndev, void *p) | ||
3523 | { | ||
3524 | struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); | ||
3525 | struct sockaddr *addr = p; | ||
3526 | |||
3527 | if (netif_running(ndev)) | ||
3528 | return -EBUSY; | ||
3529 | |||
3530 | if (!is_valid_ether_addr(addr->sa_data)) | ||
3531 | return -EADDRNOTAVAIL; | ||
3532 | memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); | ||
3533 | |||
3534 | spin_lock(&qdev->hw_lock); | ||
3535 | if (ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr, | ||
3536 | MAC_ADDR_TYPE_CAM_MAC, qdev->func)) {/* Unicast */ | ||
3537 | QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n"); | ||
3538 | return -1; | ||
3539 | } | ||
3540 | spin_unlock(&qdev->hw_lock); | ||
3541 | |||
3542 | return 0; | ||
3543 | } | ||
3544 | |||
3545 | static void qlge_tx_timeout(struct net_device *ndev) | ||
3546 | { | ||
3547 | struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); | ||
3548 | queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0); | ||
3549 | } | ||
3550 | |||
3551 | static void ql_asic_reset_work(struct work_struct *work) | ||
3552 | { | ||
3553 | struct ql_adapter *qdev = | ||
3554 | container_of(work, struct ql_adapter, asic_reset_work.work); | ||
3555 | ql_cycle_adapter(qdev); | ||
3556 | } | ||
3557 | |||
3558 | static void ql_get_board_info(struct ql_adapter *qdev) | ||
3559 | { | ||
3560 | qdev->func = | ||
3561 | (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT; | ||
3562 | if (qdev->func) { | ||
3563 | qdev->xg_sem_mask = SEM_XGMAC1_MASK; | ||
3564 | qdev->port_link_up = STS_PL1; | ||
3565 | qdev->port_init = STS_PI1; | ||
3566 | qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI; | ||
3567 | qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO; | ||
3568 | } else { | ||
3569 | qdev->xg_sem_mask = SEM_XGMAC0_MASK; | ||
3570 | qdev->port_link_up = STS_PL0; | ||
3571 | qdev->port_init = STS_PI0; | ||
3572 | qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI; | ||
3573 | qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO; | ||
3574 | } | ||
3575 | qdev->chip_rev_id = ql_read32(qdev, REV_ID); | ||
3576 | } | ||
3577 | |||
3578 | static void ql_release_all(struct pci_dev *pdev) | ||
3579 | { | ||
3580 | struct net_device *ndev = pci_get_drvdata(pdev); | ||
3581 | struct ql_adapter *qdev = netdev_priv(ndev); | ||
3582 | |||
3583 | if (qdev->workqueue) { | ||
3584 | destroy_workqueue(qdev->workqueue); | ||
3585 | qdev->workqueue = NULL; | ||
3586 | } | ||
3587 | if (qdev->q_workqueue) { | ||
3588 | destroy_workqueue(qdev->q_workqueue); | ||
3589 | qdev->q_workqueue = NULL; | ||
3590 | } | ||
3591 | if (qdev->reg_base) | ||
3592 | iounmap((void *)qdev->reg_base); | ||
3593 | if (qdev->doorbell_area) | ||
3594 | iounmap(qdev->doorbell_area); | ||
3595 | pci_release_regions(pdev); | ||
3596 | pci_set_drvdata(pdev, NULL); | ||
3597 | } | ||
3598 | |||
3599 | static int __devinit ql_init_device(struct pci_dev *pdev, | ||
3600 | struct net_device *ndev, int cards_found) | ||
3601 | { | ||
3602 | struct ql_adapter *qdev = netdev_priv(ndev); | ||
3603 | int pos, err = 0; | ||
3604 | u16 val16; | ||
3605 | |||
3606 | memset((void *)qdev, 0, sizeof(qdev)); | ||
3607 | err = pci_enable_device(pdev); | ||
3608 | if (err) { | ||
3609 | dev_err(&pdev->dev, "PCI device enable failed.\n"); | ||
3610 | return err; | ||
3611 | } | ||
3612 | |||
3613 | pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); | ||
3614 | if (pos <= 0) { | ||
3615 | dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, " | ||
3616 | "aborting.\n"); | ||
3617 | goto err_out; | ||
3618 | } else { | ||
3619 | pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16); | ||
3620 | val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN; | ||
3621 | val16 |= (PCI_EXP_DEVCTL_CERE | | ||
3622 | PCI_EXP_DEVCTL_NFERE | | ||
3623 | PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE); | ||
3624 | pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16); | ||
3625 | } | ||
3626 | |||
3627 | err = pci_request_regions(pdev, DRV_NAME); | ||
3628 | if (err) { | ||
3629 | dev_err(&pdev->dev, "PCI region request failed.\n"); | ||
3630 | goto err_out; | ||
3631 | } | ||
3632 | |||
3633 | pci_set_master(pdev); | ||
3634 | if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { | ||
3635 | set_bit(QL_DMA64, &qdev->flags); | ||
3636 | err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); | ||
3637 | } else { | ||
3638 | err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | ||
3639 | if (!err) | ||
3640 | err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); | ||
3641 | } | ||
3642 | |||
3643 | if (err) { | ||
3644 | dev_err(&pdev->dev, "No usable DMA configuration.\n"); | ||
3645 | goto err_out; | ||
3646 | } | ||
3647 | |||
3648 | pci_set_drvdata(pdev, ndev); | ||
3649 | qdev->reg_base = | ||
3650 | ioremap_nocache(pci_resource_start(pdev, 1), | ||
3651 | pci_resource_len(pdev, 1)); | ||
3652 | if (!qdev->reg_base) { | ||
3653 | dev_err(&pdev->dev, "Register mapping failed.\n"); | ||
3654 | err = -ENOMEM; | ||
3655 | goto err_out; | ||
3656 | } | ||
3657 | |||
3658 | qdev->doorbell_area_size = pci_resource_len(pdev, 3); | ||
3659 | qdev->doorbell_area = | ||
3660 | ioremap_nocache(pci_resource_start(pdev, 3), | ||
3661 | pci_resource_len(pdev, 3)); | ||
3662 | if (!qdev->doorbell_area) { | ||
3663 | dev_err(&pdev->dev, "Doorbell register mapping failed.\n"); | ||
3664 | err = -ENOMEM; | ||
3665 | goto err_out; | ||
3666 | } | ||
3667 | |||
3668 | ql_get_board_info(qdev); | ||
3669 | qdev->ndev = ndev; | ||
3670 | qdev->pdev = pdev; | ||
3671 | qdev->msg_enable = netif_msg_init(debug, default_msg); | ||
3672 | spin_lock_init(&qdev->hw_lock); | ||
3673 | spin_lock_init(&qdev->stats_lock); | ||
3674 | |||
3675 | /* make sure the EEPROM is good */ | ||
3676 | err = ql_get_flash_params(qdev); | ||
3677 | if (err) { | ||
3678 | dev_err(&pdev->dev, "Invalid FLASH.\n"); | ||
3679 | goto err_out; | ||
3680 | } | ||
3681 | |||
3682 | if (!is_valid_ether_addr(qdev->flash.mac_addr)) | ||
3683 | goto err_out; | ||
3684 | |||
3685 | memcpy(ndev->dev_addr, qdev->flash.mac_addr, ndev->addr_len); | ||
3686 | memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); | ||
3687 | |||
3688 | /* Set up the default ring sizes. */ | ||
3689 | qdev->tx_ring_size = NUM_TX_RING_ENTRIES; | ||
3690 | qdev->rx_ring_size = NUM_RX_RING_ENTRIES; | ||
3691 | |||
3692 | /* Set up the coalescing parameters. */ | ||
3693 | qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT; | ||
3694 | qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT; | ||
3695 | qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT; | ||
3696 | qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT; | ||
3697 | |||
3698 | /* | ||
3699 | * Set up the operating parameters. | ||
3700 | */ | ||
3701 | qdev->rx_csum = 1; | ||
3702 | |||
3703 | qdev->q_workqueue = create_workqueue(ndev->name); | ||
3704 | qdev->workqueue = create_singlethread_workqueue(ndev->name); | ||
3705 | INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work); | ||
3706 | INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work); | ||
3707 | INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work); | ||
3708 | |||
3709 | if (!cards_found) { | ||
3710 | dev_info(&pdev->dev, "%s\n", DRV_STRING); | ||
3711 | dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n", | ||
3712 | DRV_NAME, DRV_VERSION); | ||
3713 | } | ||
3714 | return 0; | ||
3715 | err_out: | ||
3716 | ql_release_all(pdev); | ||
3717 | pci_disable_device(pdev); | ||
3718 | return err; | ||
3719 | } | ||
3720 | |||
3721 | static int __devinit qlge_probe(struct pci_dev *pdev, | ||
3722 | const struct pci_device_id *pci_entry) | ||
3723 | { | ||
3724 | struct net_device *ndev = NULL; | ||
3725 | struct ql_adapter *qdev = NULL; | ||
3726 | static int cards_found = 0; | ||
3727 | int err = 0; | ||
3728 | |||
3729 | ndev = alloc_etherdev(sizeof(struct ql_adapter)); | ||
3730 | if (!ndev) | ||
3731 | return -ENOMEM; | ||
3732 | |||
3733 | err = ql_init_device(pdev, ndev, cards_found); | ||
3734 | if (err < 0) { | ||
3735 | free_netdev(ndev); | ||
3736 | return err; | ||
3737 | } | ||
3738 | |||
3739 | qdev = netdev_priv(ndev); | ||
3740 | SET_NETDEV_DEV(ndev, &pdev->dev); | ||
3741 | ndev->features = (0 | ||
3742 | | NETIF_F_IP_CSUM | ||
3743 | | NETIF_F_SG | ||
3744 | | NETIF_F_TSO | ||
3745 | | NETIF_F_TSO6 | ||
3746 | | NETIF_F_TSO_ECN | ||
3747 | | NETIF_F_HW_VLAN_TX | ||
3748 | | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER); | ||
3749 | |||
3750 | if (test_bit(QL_DMA64, &qdev->flags)) | ||
3751 | ndev->features |= NETIF_F_HIGHDMA; | ||
3752 | |||
3753 | /* | ||
3754 | * Set up net_device structure. | ||
3755 | */ | ||
3756 | ndev->tx_queue_len = qdev->tx_ring_size; | ||
3757 | ndev->irq = pdev->irq; | ||
3758 | ndev->open = qlge_open; | ||
3759 | ndev->stop = qlge_close; | ||
3760 | ndev->hard_start_xmit = qlge_send; | ||
3761 | SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops); | ||
3762 | ndev->change_mtu = qlge_change_mtu; | ||
3763 | ndev->get_stats = qlge_get_stats; | ||
3764 | ndev->set_multicast_list = qlge_set_multicast_list; | ||
3765 | ndev->set_mac_address = qlge_set_mac_address; | ||
3766 | ndev->tx_timeout = qlge_tx_timeout; | ||
3767 | ndev->watchdog_timeo = 10 * HZ; | ||
3768 | ndev->vlan_rx_register = ql_vlan_rx_register; | ||
3769 | ndev->vlan_rx_add_vid = ql_vlan_rx_add_vid; | ||
3770 | ndev->vlan_rx_kill_vid = ql_vlan_rx_kill_vid; | ||
3771 | err = register_netdev(ndev); | ||
3772 | if (err) { | ||
3773 | dev_err(&pdev->dev, "net device registration failed.\n"); | ||
3774 | ql_release_all(pdev); | ||
3775 | pci_disable_device(pdev); | ||
3776 | return err; | ||
3777 | } | ||
3778 | netif_carrier_off(ndev); | ||
3779 | netif_stop_queue(ndev); | ||
3780 | ql_display_dev_info(ndev); | ||
3781 | cards_found++; | ||
3782 | return 0; | ||
3783 | } | ||
3784 | |||
3785 | static void __devexit qlge_remove(struct pci_dev *pdev) | ||
3786 | { | ||
3787 | struct net_device *ndev = pci_get_drvdata(pdev); | ||
3788 | unregister_netdev(ndev); | ||
3789 | ql_release_all(pdev); | ||
3790 | pci_disable_device(pdev); | ||
3791 | free_netdev(ndev); | ||
3792 | } | ||
3793 | |||
3794 | /* | ||
3795 | * This callback is called by the PCI subsystem whenever | ||
3796 | * a PCI bus error is detected. | ||
3797 | */ | ||
3798 | static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev, | ||
3799 | enum pci_channel_state state) | ||
3800 | { | ||
3801 | struct net_device *ndev = pci_get_drvdata(pdev); | ||
3802 | struct ql_adapter *qdev = netdev_priv(ndev); | ||
3803 | |||
3804 | if (netif_running(ndev)) | ||
3805 | ql_adapter_down(qdev); | ||
3806 | |||
3807 | pci_disable_device(pdev); | ||
3808 | |||
3809 | /* Request a slot reset. */ | ||
3810 | return PCI_ERS_RESULT_NEED_RESET; | ||
3811 | } | ||
3812 | |||
3813 | /* | ||
3814 | * This callback is called after the PCI buss has been reset. | ||
3815 | * Basically, this tries to restart the card from scratch. | ||
3816 | * This is a shortened version of the device probe/discovery code, | ||
3817 | * it resembles the first-half of the () routine. | ||
3818 | */ | ||
3819 | static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev) | ||
3820 | { | ||
3821 | struct net_device *ndev = pci_get_drvdata(pdev); | ||
3822 | struct ql_adapter *qdev = netdev_priv(ndev); | ||
3823 | |||
3824 | if (pci_enable_device(pdev)) { | ||
3825 | QPRINTK(qdev, IFUP, ERR, | ||
3826 | "Cannot re-enable PCI device after reset.\n"); | ||
3827 | return PCI_ERS_RESULT_DISCONNECT; | ||
3828 | } | ||
3829 | |||
3830 | pci_set_master(pdev); | ||
3831 | |||
3832 | netif_carrier_off(ndev); | ||
3833 | netif_stop_queue(ndev); | ||
3834 | ql_adapter_reset(qdev); | ||
3835 | |||
3836 | /* Make sure the EEPROM is good */ | ||
3837 | memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); | ||
3838 | |||
3839 | if (!is_valid_ether_addr(ndev->perm_addr)) { | ||
3840 | QPRINTK(qdev, IFUP, ERR, "After reset, invalid MAC address.\n"); | ||
3841 | return PCI_ERS_RESULT_DISCONNECT; | ||
3842 | } | ||
3843 | |||
3844 | return PCI_ERS_RESULT_RECOVERED; | ||
3845 | } | ||
3846 | |||
3847 | static void qlge_io_resume(struct pci_dev *pdev) | ||
3848 | { | ||
3849 | struct net_device *ndev = pci_get_drvdata(pdev); | ||
3850 | struct ql_adapter *qdev = netdev_priv(ndev); | ||
3851 | |||
3852 | pci_set_master(pdev); | ||
3853 | |||
3854 | if (netif_running(ndev)) { | ||
3855 | if (ql_adapter_up(qdev)) { | ||
3856 | QPRINTK(qdev, IFUP, ERR, | ||
3857 | "Device initialization failed after reset.\n"); | ||
3858 | return; | ||
3859 | } | ||
3860 | } | ||
3861 | |||
3862 | netif_device_attach(ndev); | ||
3863 | } | ||
3864 | |||
3865 | static struct pci_error_handlers qlge_err_handler = { | ||
3866 | .error_detected = qlge_io_error_detected, | ||
3867 | .slot_reset = qlge_io_slot_reset, | ||
3868 | .resume = qlge_io_resume, | ||
3869 | }; | ||
3870 | |||
3871 | static int qlge_suspend(struct pci_dev *pdev, pm_message_t state) | ||
3872 | { | ||
3873 | struct net_device *ndev = pci_get_drvdata(pdev); | ||
3874 | struct ql_adapter *qdev = netdev_priv(ndev); | ||
3875 | int err; | ||
3876 | |||
3877 | netif_device_detach(ndev); | ||
3878 | |||
3879 | if (netif_running(ndev)) { | ||
3880 | err = ql_adapter_down(qdev); | ||
3881 | if (!err) | ||
3882 | return err; | ||
3883 | } | ||
3884 | |||
3885 | err = pci_save_state(pdev); | ||
3886 | if (err) | ||
3887 | return err; | ||
3888 | |||
3889 | pci_disable_device(pdev); | ||
3890 | |||
3891 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | ||
3892 | |||
3893 | return 0; | ||
3894 | } | ||
3895 | |||
3896 | static int qlge_resume(struct pci_dev *pdev) | ||
3897 | { | ||
3898 | struct net_device *ndev = pci_get_drvdata(pdev); | ||
3899 | struct ql_adapter *qdev = netdev_priv(ndev); | ||
3900 | int err; | ||
3901 | |||
3902 | pci_set_power_state(pdev, PCI_D0); | ||
3903 | pci_restore_state(pdev); | ||
3904 | err = pci_enable_device(pdev); | ||
3905 | if (err) { | ||
3906 | QPRINTK(qdev, IFUP, ERR, "Cannot enable PCI device from suspend\n"); | ||
3907 | return err; | ||
3908 | } | ||
3909 | pci_set_master(pdev); | ||
3910 | |||
3911 | pci_enable_wake(pdev, PCI_D3hot, 0); | ||
3912 | pci_enable_wake(pdev, PCI_D3cold, 0); | ||
3913 | |||
3914 | if (netif_running(ndev)) { | ||
3915 | err = ql_adapter_up(qdev); | ||
3916 | if (err) | ||
3917 | return err; | ||
3918 | } | ||
3919 | |||
3920 | netif_device_attach(ndev); | ||
3921 | |||
3922 | return 0; | ||
3923 | } | ||
3924 | |||
3925 | static void qlge_shutdown(struct pci_dev *pdev) | ||
3926 | { | ||
3927 | qlge_suspend(pdev, PMSG_SUSPEND); | ||
3928 | } | ||
3929 | |||
3930 | static struct pci_driver qlge_driver = { | ||
3931 | .name = DRV_NAME, | ||
3932 | .id_table = qlge_pci_tbl, | ||
3933 | .probe = qlge_probe, | ||
3934 | .remove = __devexit_p(qlge_remove), | ||
3935 | #ifdef CONFIG_PM | ||
3936 | .suspend = qlge_suspend, | ||
3937 | .resume = qlge_resume, | ||
3938 | #endif | ||
3939 | .shutdown = qlge_shutdown, | ||
3940 | .err_handler = &qlge_err_handler | ||
3941 | }; | ||
3942 | |||
3943 | static int __init qlge_init_module(void) | ||
3944 | { | ||
3945 | return pci_register_driver(&qlge_driver); | ||
3946 | } | ||
3947 | |||
3948 | static void __exit qlge_exit(void) | ||
3949 | { | ||
3950 | pci_unregister_driver(&qlge_driver); | ||
3951 | } | ||
3952 | |||
3953 | module_init(qlge_init_module); | ||
3954 | module_exit(qlge_exit); | ||
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c new file mode 100644 index 000000000000..24fe344bcf1f --- /dev/null +++ b/drivers/net/qlge/qlge_mpi.c | |||
@@ -0,0 +1,150 @@ | |||
1 | #include "qlge.h" | ||
2 | |||
3 | static int ql_read_mbox_reg(struct ql_adapter *qdev, u32 reg, u32 *data) | ||
4 | { | ||
5 | int status; | ||
6 | /* wait for reg to come ready */ | ||
7 | status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR); | ||
8 | if (status) | ||
9 | goto exit; | ||
10 | /* set up for reg read */ | ||
11 | ql_write32(qdev, PROC_ADDR, reg | PROC_ADDR_R); | ||
12 | /* wait for reg to come ready */ | ||
13 | status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR); | ||
14 | if (status) | ||
15 | goto exit; | ||
16 | /* get the data */ | ||
17 | *data = ql_read32(qdev, PROC_DATA); | ||
18 | exit: | ||
19 | return status; | ||
20 | } | ||
21 | |||
22 | int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp) | ||
23 | { | ||
24 | int i, status; | ||
25 | |||
26 | status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK); | ||
27 | if (status) | ||
28 | return -EBUSY; | ||
29 | for (i = 0; i < mbcp->out_count; i++) { | ||
30 | status = | ||
31 | ql_read_mbox_reg(qdev, qdev->mailbox_out + i, | ||
32 | &mbcp->mbox_out[i]); | ||
33 | if (status) { | ||
34 | QPRINTK(qdev, DRV, ERR, "Failed mailbox read.\n"); | ||
35 | break; | ||
36 | } | ||
37 | } | ||
38 | ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */ | ||
39 | return status; | ||
40 | } | ||
41 | |||
42 | static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp) | ||
43 | { | ||
44 | mbcp->out_count = 2; | ||
45 | |||
46 | if (ql_get_mb_sts(qdev, mbcp)) | ||
47 | goto exit; | ||
48 | |||
49 | qdev->link_status = mbcp->mbox_out[1]; | ||
50 | QPRINTK(qdev, DRV, ERR, "Link Up.\n"); | ||
51 | QPRINTK(qdev, DRV, INFO, "Link Status = 0x%.08x.\n", mbcp->mbox_out[1]); | ||
52 | if (!netif_carrier_ok(qdev->ndev)) { | ||
53 | QPRINTK(qdev, LINK, INFO, "Link is Up.\n"); | ||
54 | netif_carrier_on(qdev->ndev); | ||
55 | netif_wake_queue(qdev->ndev); | ||
56 | } | ||
57 | exit: | ||
58 | /* Clear the MPI firmware status. */ | ||
59 | ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT); | ||
60 | } | ||
61 | |||
62 | static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp) | ||
63 | { | ||
64 | mbcp->out_count = 3; | ||
65 | |||
66 | if (ql_get_mb_sts(qdev, mbcp)) { | ||
67 | QPRINTK(qdev, DRV, ERR, "Firmware did not initialize!\n"); | ||
68 | goto exit; | ||
69 | } | ||
70 | |||
71 | if (netif_carrier_ok(qdev->ndev)) { | ||
72 | QPRINTK(qdev, LINK, INFO, "Link is Down.\n"); | ||
73 | netif_carrier_off(qdev->ndev); | ||
74 | netif_stop_queue(qdev->ndev); | ||
75 | } | ||
76 | QPRINTK(qdev, DRV, ERR, "Link Down.\n"); | ||
77 | QPRINTK(qdev, DRV, ERR, "Link Status = 0x%.08x.\n", mbcp->mbox_out[1]); | ||
78 | exit: | ||
79 | /* Clear the MPI firmware status. */ | ||
80 | ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT); | ||
81 | } | ||
82 | |||
83 | static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp) | ||
84 | { | ||
85 | mbcp->out_count = 2; | ||
86 | |||
87 | if (ql_get_mb_sts(qdev, mbcp)) { | ||
88 | QPRINTK(qdev, DRV, ERR, "Firmware did not initialize!\n"); | ||
89 | goto exit; | ||
90 | } | ||
91 | QPRINTK(qdev, DRV, ERR, "Firmware initialized!\n"); | ||
92 | QPRINTK(qdev, DRV, ERR, "Firmware status = 0x%.08x.\n", | ||
93 | mbcp->mbox_out[0]); | ||
94 | QPRINTK(qdev, DRV, ERR, "Firmware Revision = 0x%.08x.\n", | ||
95 | mbcp->mbox_out[1]); | ||
96 | exit: | ||
97 | /* Clear the MPI firmware status. */ | ||
98 | ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT); | ||
99 | } | ||
100 | |||
101 | void ql_mpi_work(struct work_struct *work) | ||
102 | { | ||
103 | struct ql_adapter *qdev = | ||
104 | container_of(work, struct ql_adapter, mpi_work.work); | ||
105 | struct mbox_params mbc; | ||
106 | struct mbox_params *mbcp = &mbc; | ||
107 | mbcp->out_count = 1; | ||
108 | |||
109 | while (ql_read32(qdev, STS) & STS_PI) { | ||
110 | if (ql_get_mb_sts(qdev, mbcp)) { | ||
111 | QPRINTK(qdev, DRV, ERR, | ||
112 | "Could not read MPI, resetting ASIC!\n"); | ||
113 | ql_queue_asic_error(qdev); | ||
114 | } | ||
115 | |||
116 | switch (mbcp->mbox_out[0]) { | ||
117 | case AEN_LINK_UP: | ||
118 | ql_link_up(qdev, mbcp); | ||
119 | break; | ||
120 | case AEN_LINK_DOWN: | ||
121 | ql_link_down(qdev, mbcp); | ||
122 | break; | ||
123 | case AEN_FW_INIT_DONE: | ||
124 | ql_init_fw_done(qdev, mbcp); | ||
125 | break; | ||
126 | case MB_CMD_STS_GOOD: | ||
127 | break; | ||
128 | case AEN_FW_INIT_FAIL: | ||
129 | case AEN_SYS_ERR: | ||
130 | case MB_CMD_STS_ERR: | ||
131 | ql_queue_fw_error(qdev); | ||
132 | default: | ||
133 | /* Clear the MPI firmware status. */ | ||
134 | ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT); | ||
135 | break; | ||
136 | } | ||
137 | } | ||
138 | ql_enable_completion_interrupt(qdev, 0); | ||
139 | } | ||
140 | |||
141 | void ql_mpi_reset_work(struct work_struct *work) | ||
142 | { | ||
143 | struct ql_adapter *qdev = | ||
144 | container_of(work, struct ql_adapter, mpi_reset_work.work); | ||
145 | QPRINTK(qdev, DRV, ERR, | ||
146 | "Enter, qdev = %p..\n", qdev); | ||
147 | ql_write32(qdev, CSR, CSR_CMD_SET_RST); | ||
148 | msleep(50); | ||
149 | ql_write32(qdev, CSR, CSR_CMD_CLR_RST); | ||
150 | } | ||