diff options
author | Greg Kroah-Hartman <gregkh@suse.de> | 2009-09-15 13:09:27 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2009-09-15 15:02:35 -0400 |
commit | e9d599220b97e7d52311f6011c75ba0cfcb356fe (patch) | |
tree | 1309442e3019c2d0b469e3bafdb4eed2e3cc3c73 /drivers | |
parent | 0b33559a1adb3b9953503c9b55a61c37db34ffc0 (diff) |
Staging: remove sxg driver
Unfortunatly, the upstream company has abandonded development of this
driver. So it's best to just remove the driver from the tree.
Cc: Christopher Harrer <charrer@alacritech.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/staging/Kconfig | 2 | ||||
-rw-r--r-- | drivers/staging/Makefile | 1 | ||||
-rw-r--r-- | drivers/staging/sxg/Kconfig | 11 | ||||
-rw-r--r-- | drivers/staging/sxg/Makefile | 3 | ||||
-rw-r--r-- | drivers/staging/sxg/README | 12 | ||||
-rw-r--r-- | drivers/staging/sxg/sxg.c | 4543 | ||||
-rw-r--r-- | drivers/staging/sxg/sxg.h | 787 | ||||
-rw-r--r-- | drivers/staging/sxg/sxg_ethtool.c | 328 | ||||
-rw-r--r-- | drivers/staging/sxg/sxg_os.h | 149 | ||||
-rw-r--r-- | drivers/staging/sxg/sxgdbg.h | 184 | ||||
-rw-r--r-- | drivers/staging/sxg/sxghif.h | 1014 | ||||
-rw-r--r-- | drivers/staging/sxg/sxghw.h | 1020 | ||||
-rw-r--r-- | drivers/staging/sxg/sxgphycode-1.2.h | 130 |
13 files changed, 0 insertions, 8184 deletions
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index e86a6716156c..10d3fcffe91c 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig | |||
@@ -45,8 +45,6 @@ source "drivers/staging/et131x/Kconfig" | |||
45 | 45 | ||
46 | source "drivers/staging/slicoss/Kconfig" | 46 | source "drivers/staging/slicoss/Kconfig" |
47 | 47 | ||
48 | source "drivers/staging/sxg/Kconfig" | ||
49 | |||
50 | source "drivers/staging/go7007/Kconfig" | 48 | source "drivers/staging/go7007/Kconfig" |
51 | 49 | ||
52 | source "drivers/staging/usbip/Kconfig" | 50 | source "drivers/staging/usbip/Kconfig" |
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index fa5361664ba8..c30093bae621 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile | |||
@@ -5,7 +5,6 @@ obj-$(CONFIG_STAGING) += staging.o | |||
5 | 5 | ||
6 | obj-$(CONFIG_ET131X) += et131x/ | 6 | obj-$(CONFIG_ET131X) += et131x/ |
7 | obj-$(CONFIG_SLICOSS) += slicoss/ | 7 | obj-$(CONFIG_SLICOSS) += slicoss/ |
8 | obj-$(CONFIG_SXG) += sxg/ | ||
9 | obj-$(CONFIG_VIDEO_GO7007) += go7007/ | 8 | obj-$(CONFIG_VIDEO_GO7007) += go7007/ |
10 | obj-$(CONFIG_USB_IP_COMMON) += usbip/ | 9 | obj-$(CONFIG_USB_IP_COMMON) += usbip/ |
11 | obj-$(CONFIG_W35UND) += winbond/ | 10 | obj-$(CONFIG_W35UND) += winbond/ |
diff --git a/drivers/staging/sxg/Kconfig b/drivers/staging/sxg/Kconfig deleted file mode 100644 index c5cbdafee4d3..000000000000 --- a/drivers/staging/sxg/Kconfig +++ /dev/null | |||
@@ -1,11 +0,0 @@ | |||
1 | config SXG | ||
2 | tristate "Alacritech SLIC Technology Non-Accelerated 10Gbe support" | ||
3 | depends on PCI && NETDEV_10000 | ||
4 | depends on X86 | ||
5 | default n | ||
6 | help | ||
7 | This driver supports the Alacritech SLIC Technology Non-Accelerated | ||
8 | 10Gbe network cards. | ||
9 | |||
10 | To compile this driver as a module, choose | ||
11 | M here: the module will be called sxg_nic. | ||
diff --git a/drivers/staging/sxg/Makefile b/drivers/staging/sxg/Makefile deleted file mode 100644 index 8e053222c2ae..000000000000 --- a/drivers/staging/sxg/Makefile +++ /dev/null | |||
@@ -1,3 +0,0 @@ | |||
1 | obj-$(CONFIG_SXG) += sxg_nic.o | ||
2 | |||
3 | sxg_nic-y := sxg.o sxg_ethtool.o | ||
diff --git a/drivers/staging/sxg/README b/drivers/staging/sxg/README deleted file mode 100644 index e42f344ea5fa..000000000000 --- a/drivers/staging/sxg/README +++ /dev/null | |||
@@ -1,12 +0,0 @@ | |||
1 | This is the rough cut at a driver for the Alacritech SLIC Technology | ||
2 | Non-Accelerated 10Gbe network driver. | ||
3 | |||
4 | TODO: | ||
5 | - remove wrappers | ||
6 | - checkpatch.pl cleanups | ||
7 | - new functionality that the card needs | ||
8 | - remove reliance on x86 | ||
9 | |||
10 | Please send patches to: | ||
11 | Greg Kroah-Hartman <gregkh@suse.de> | ||
12 | for any cleanups that you do to this driver. | ||
diff --git a/drivers/staging/sxg/sxg.c b/drivers/staging/sxg/sxg.c deleted file mode 100644 index 395e876c7dc9..000000000000 --- a/drivers/staging/sxg/sxg.c +++ /dev/null | |||
@@ -1,4543 +0,0 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright (C) 2000-2008 Alacritech, Inc. All rights reserved. | ||
4 | * | ||
5 | * Redistribution and use in source and binary forms, with or without | ||
6 | * modification, are permitted provided that the following conditions | ||
7 | * are met: | ||
8 | * | ||
9 | * 1. Redistributions of source code must retain the above copyright | ||
10 | * notice, this list of conditions and the following disclaimer. | ||
11 | * 2. Redistributions in binary form must reproduce the above | ||
12 | * copyright notice, this list of conditions and the following | ||
13 | * disclaimer in the documentation and/or other materials provided | ||
14 | * with the distribution. | ||
15 | * | ||
16 | * THIS SOFTWARE IS PROVIDED BY ALACRITECH, INC. ``AS IS'' AND ANY | ||
17 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
18 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | ||
19 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ALACRITECH, INC. OR | ||
20 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
21 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
22 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF | ||
23 | * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
24 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, | ||
25 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT | ||
26 | * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | ||
27 | * SUCH DAMAGE. | ||
28 | * | ||
29 | * The views and conclusions contained in the software and documentation | ||
30 | * are those of the authors and should not be interpreted as representing | ||
31 | * official policies, either expressed or implied, of Alacritech, Inc. | ||
32 | * | ||
33 | * Parts developed by LinSysSoft Sahara team | ||
34 | * | ||
35 | **************************************************************************/ | ||
36 | |||
37 | /* | ||
38 | * FILENAME: sxg.c | ||
39 | * | ||
40 | * The SXG driver for Alacritech's 10Gbe products. | ||
41 | * | ||
42 | * NOTE: This is the standard, non-accelerated version of Alacritech's | ||
43 | * IS-NIC driver. | ||
44 | */ | ||
45 | |||
46 | #include <linux/kernel.h> | ||
47 | #include <linux/string.h> | ||
48 | #include <linux/errno.h> | ||
49 | #include <linux/module.h> | ||
50 | #include <linux/moduleparam.h> | ||
51 | #include <linux/firmware.h> | ||
52 | #include <linux/ioport.h> | ||
53 | #include <linux/slab.h> | ||
54 | #include <linux/interrupt.h> | ||
55 | #include <linux/timer.h> | ||
56 | #include <linux/pci.h> | ||
57 | #include <linux/spinlock.h> | ||
58 | #include <linux/init.h> | ||
59 | #include <linux/netdevice.h> | ||
60 | #include <linux/etherdevice.h> | ||
61 | #include <linux/ethtool.h> | ||
62 | #include <linux/skbuff.h> | ||
63 | #include <linux/delay.h> | ||
64 | #include <linux/types.h> | ||
65 | #include <linux/dma-mapping.h> | ||
66 | #include <linux/mii.h> | ||
67 | #include <linux/ip.h> | ||
68 | #include <linux/in.h> | ||
69 | #include <linux/tcp.h> | ||
70 | #include <linux/ipv6.h> | ||
71 | |||
72 | #define SLIC_GET_STATS_ENABLED 0 | ||
73 | #define LINUX_FREES_ADAPTER_RESOURCES 1 | ||
74 | #define SXG_OFFLOAD_IP_CHECKSUM 0 | ||
75 | #define SXG_POWER_MANAGEMENT_ENABLED 0 | ||
76 | #define VPCI 0 | ||
77 | #define ATK_DEBUG 1 | ||
78 | #define SXG_UCODE_DEBUG 0 | ||
79 | |||
80 | |||
81 | #include "sxg_os.h" | ||
82 | #include "sxghw.h" | ||
83 | #include "sxghif.h" | ||
84 | #include "sxg.h" | ||
85 | #include "sxgdbg.h" | ||
86 | #include "sxgphycode-1.2.h" | ||
87 | |||
88 | static int sxg_allocate_buffer_memory(struct adapter_t *adapter, u32 Size, | ||
89 | enum sxg_buffer_type BufferType); | ||
90 | static int sxg_allocate_rcvblock_complete(struct adapter_t *adapter, | ||
91 | void *RcvBlock, | ||
92 | dma_addr_t PhysicalAddress, | ||
93 | u32 Length); | ||
94 | static void sxg_allocate_sgl_buffer_complete(struct adapter_t *adapter, | ||
95 | struct sxg_scatter_gather *SxgSgl, | ||
96 | dma_addr_t PhysicalAddress, | ||
97 | u32 Length); | ||
98 | |||
99 | static void sxg_mcast_init_crc32(void); | ||
100 | static int sxg_entry_open(struct net_device *dev); | ||
101 | static int sxg_second_open(struct net_device * dev); | ||
102 | static int sxg_entry_halt(struct net_device *dev); | ||
103 | static int sxg_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | ||
104 | static int sxg_send_packets(struct sk_buff *skb, struct net_device *dev); | ||
105 | static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb); | ||
106 | static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl, | ||
107 | struct sxg_scatter_gather *SxgSgl); | ||
108 | |||
109 | static void sxg_handle_interrupt(struct adapter_t *adapter, int *work_done, | ||
110 | int budget); | ||
111 | static void sxg_interrupt(struct adapter_t *adapter); | ||
112 | static int sxg_poll(struct napi_struct *napi, int budget); | ||
113 | static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId); | ||
114 | static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId, | ||
115 | int *sxg_napi_continue, int *work_done, int budget); | ||
116 | static void sxg_complete_slow_send(struct adapter_t *adapter); | ||
117 | static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter, | ||
118 | struct sxg_event *Event); | ||
119 | static void sxg_process_rcv_error(struct adapter_t *adapter, u32 ErrorStatus); | ||
120 | static bool sxg_mac_filter(struct adapter_t *adapter, | ||
121 | struct ether_header *EtherHdr, ushort length); | ||
122 | static struct net_device_stats *sxg_get_stats(struct net_device * dev); | ||
123 | void sxg_free_resources(struct adapter_t *adapter); | ||
124 | void sxg_free_rcvblocks(struct adapter_t *adapter); | ||
125 | void sxg_free_sgl_buffers(struct adapter_t *adapter); | ||
126 | void sxg_unmap_resources(struct adapter_t *adapter); | ||
127 | void sxg_free_mcast_addrs(struct adapter_t *adapter); | ||
128 | void sxg_collect_statistics(struct adapter_t *adapter); | ||
129 | static int sxg_register_interrupt(struct adapter_t *adapter); | ||
130 | static void sxg_remove_isr(struct adapter_t *adapter); | ||
131 | static irqreturn_t sxg_isr(int irq, void *dev_id); | ||
132 | |||
133 | static void sxg_watchdog(unsigned long data); | ||
134 | static void sxg_update_link_status (struct work_struct *work); | ||
135 | |||
136 | #define XXXTODO 0 | ||
137 | |||
138 | #if XXXTODO | ||
139 | static int sxg_mac_set_address(struct net_device *dev, void *ptr); | ||
140 | #endif | ||
141 | static void sxg_mcast_set_list(struct net_device *dev); | ||
142 | |||
143 | static int sxg_adapter_set_hwaddr(struct adapter_t *adapter); | ||
144 | |||
145 | static int sxg_initialize_adapter(struct adapter_t *adapter); | ||
146 | static void sxg_stock_rcv_buffers(struct adapter_t *adapter); | ||
147 | static void sxg_complete_descriptor_blocks(struct adapter_t *adapter, | ||
148 | unsigned char Index); | ||
149 | int sxg_change_mtu (struct net_device *netdev, int new_mtu); | ||
150 | static int sxg_initialize_link(struct adapter_t *adapter); | ||
151 | static int sxg_phy_init(struct adapter_t *adapter); | ||
152 | static void sxg_link_event(struct adapter_t *adapter); | ||
153 | static enum SXG_LINK_STATE sxg_get_link_state(struct adapter_t *adapter); | ||
154 | static void sxg_link_state(struct adapter_t *adapter, | ||
155 | enum SXG_LINK_STATE LinkState); | ||
156 | static int sxg_write_mdio_reg(struct adapter_t *adapter, | ||
157 | u32 DevAddr, u32 RegAddr, u32 Value); | ||
158 | static int sxg_read_mdio_reg(struct adapter_t *adapter, | ||
159 | u32 DevAddr, u32 RegAddr, u32 *pValue); | ||
160 | static void sxg_set_mcast_addr(struct adapter_t *adapter); | ||
161 | |||
162 | static unsigned int sxg_first_init = 1; | ||
163 | static char *sxg_banner = | ||
164 | "Alacritech SLIC Technology(tm) Server and Storage \ | ||
165 | 10Gbe Accelerator (Non-Accelerated)\n"; | ||
166 | |||
167 | static int sxg_debug = 1; | ||
168 | static int debug = -1; | ||
169 | static struct net_device *head_netdevice = NULL; | ||
170 | |||
171 | static struct sxgbase_driver sxg_global = { | ||
172 | .dynamic_intagg = 1, | ||
173 | }; | ||
174 | static int intagg_delay = 100; | ||
175 | static u32 dynamic_intagg = 0; | ||
176 | |||
177 | char sxg_driver_name[] = "sxg_nic"; | ||
178 | #define DRV_AUTHOR "Alacritech, Inc. Engineering" | ||
179 | #define DRV_DESCRIPTION \ | ||
180 | "Alacritech SLIC Techonology(tm) Non-Accelerated 10Gbe Driver" | ||
181 | #define DRV_COPYRIGHT \ | ||
182 | "Copyright 2000-2008 Alacritech, Inc. All rights reserved." | ||
183 | |||
184 | MODULE_AUTHOR(DRV_AUTHOR); | ||
185 | MODULE_DESCRIPTION(DRV_DESCRIPTION); | ||
186 | MODULE_LICENSE("GPL"); | ||
187 | |||
188 | module_param(dynamic_intagg, int, 0); | ||
189 | MODULE_PARM_DESC(dynamic_intagg, "Dynamic Interrupt Aggregation Setting"); | ||
190 | module_param(intagg_delay, int, 0); | ||
191 | MODULE_PARM_DESC(intagg_delay, "uSec Interrupt Aggregation Delay"); | ||
192 | |||
193 | static struct pci_device_id sxg_pci_tbl[] __devinitdata = { | ||
194 | {PCI_DEVICE(SXG_VENDOR_ID, SXG_DEVICE_ID)}, | ||
195 | {0,} | ||
196 | }; | ||
197 | |||
198 | MODULE_DEVICE_TABLE(pci, sxg_pci_tbl); | ||
199 | |||
200 | static inline void sxg_reg32_write(void __iomem *reg, u32 value, bool flush) | ||
201 | { | ||
202 | writel(value, reg); | ||
203 | if (flush) | ||
204 | mb(); | ||
205 | } | ||
206 | |||
207 | static inline void sxg_reg64_write(struct adapter_t *adapter, void __iomem *reg, | ||
208 | u64 value, u32 cpu) | ||
209 | { | ||
210 | u32 value_high = (u32) (value >> 32); | ||
211 | u32 value_low = (u32) (value & 0x00000000FFFFFFFF); | ||
212 | unsigned long flags; | ||
213 | |||
214 | spin_lock_irqsave(&adapter->Bit64RegLock, flags); | ||
215 | writel(value_high, (void __iomem *)(&adapter->UcodeRegs[cpu].Upper)); | ||
216 | writel(value_low, reg); | ||
217 | spin_unlock_irqrestore(&adapter->Bit64RegLock, flags); | ||
218 | } | ||
219 | |||
220 | static void sxg_init_driver(void) | ||
221 | { | ||
222 | if (sxg_first_init) { | ||
223 | DBG_ERROR("sxg: %s sxg_first_init set jiffies[%lx]\n", | ||
224 | __func__, jiffies); | ||
225 | sxg_first_init = 0; | ||
226 | spin_lock_init(&sxg_global.driver_lock); | ||
227 | } | ||
228 | } | ||
229 | |||
230 | static void sxg_dbg_macaddrs(struct adapter_t *adapter) | ||
231 | { | ||
232 | DBG_ERROR(" (%s) curr %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n", | ||
233 | adapter->netdev->name, adapter->currmacaddr[0], | ||
234 | adapter->currmacaddr[1], adapter->currmacaddr[2], | ||
235 | adapter->currmacaddr[3], adapter->currmacaddr[4], | ||
236 | adapter->currmacaddr[5]); | ||
237 | DBG_ERROR(" (%s) mac %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n", | ||
238 | adapter->netdev->name, adapter->macaddr[0], | ||
239 | adapter->macaddr[1], adapter->macaddr[2], | ||
240 | adapter->macaddr[3], adapter->macaddr[4], | ||
241 | adapter->macaddr[5]); | ||
242 | return; | ||
243 | } | ||
244 | |||
245 | /* SXG Globals */ | ||
246 | static struct sxg_driver SxgDriver; | ||
247 | |||
248 | #ifdef ATKDBG | ||
249 | static struct sxg_trace_buffer LSxgTraceBuffer; | ||
250 | #endif /* ATKDBG */ | ||
251 | static struct sxg_trace_buffer *SxgTraceBuffer = NULL; | ||
252 | |||
253 | /* | ||
254 | * MSI Related API's | ||
255 | */ | ||
256 | int sxg_register_intr(struct adapter_t *adapter); | ||
257 | int sxg_enable_msi_x(struct adapter_t *adapter); | ||
258 | int sxg_add_msi_isr(struct adapter_t *adapter); | ||
259 | void sxg_remove_msix_isr(struct adapter_t *adapter); | ||
260 | int sxg_set_interrupt_capability(struct adapter_t *adapter); | ||
261 | |||
262 | int sxg_set_interrupt_capability(struct adapter_t *adapter) | ||
263 | { | ||
264 | int ret; | ||
265 | |||
266 | ret = sxg_enable_msi_x(adapter); | ||
267 | if (ret != STATUS_SUCCESS) { | ||
268 | adapter->msi_enabled = FALSE; | ||
269 | DBG_ERROR("sxg_set_interrupt_capability MSI-X Disable\n"); | ||
270 | } else { | ||
271 | adapter->msi_enabled = TRUE; | ||
272 | DBG_ERROR("sxg_set_interrupt_capability MSI-X Enable\n"); | ||
273 | } | ||
274 | return ret; | ||
275 | } | ||
276 | |||
277 | int sxg_register_intr(struct adapter_t *adapter) | ||
278 | { | ||
279 | int ret = 0; | ||
280 | |||
281 | if (adapter->msi_enabled) { | ||
282 | ret = sxg_add_msi_isr(adapter); | ||
283 | } | ||
284 | else { | ||
285 | DBG_ERROR("MSI-X Enable Failed. Using Pin INT\n"); | ||
286 | ret = sxg_register_interrupt(adapter); | ||
287 | if (ret != STATUS_SUCCESS) { | ||
288 | DBG_ERROR("sxg_register_interrupt Failed\n"); | ||
289 | } | ||
290 | } | ||
291 | return ret; | ||
292 | } | ||
293 | |||
294 | int sxg_enable_msi_x(struct adapter_t *adapter) | ||
295 | { | ||
296 | int ret; | ||
297 | |||
298 | adapter->nr_msix_entries = 1; | ||
299 | adapter->msi_entries = kmalloc(adapter->nr_msix_entries * | ||
300 | sizeof(struct msix_entry),GFP_KERNEL); | ||
301 | if (!adapter->msi_entries) { | ||
302 | DBG_ERROR("%s:MSI Entries memory allocation Failed\n",__func__); | ||
303 | return -ENOMEM; | ||
304 | } | ||
305 | memset(adapter->msi_entries, 0, adapter->nr_msix_entries * | ||
306 | sizeof(struct msix_entry)); | ||
307 | |||
308 | ret = pci_enable_msix(adapter->pcidev, adapter->msi_entries, | ||
309 | adapter->nr_msix_entries); | ||
310 | if (ret) { | ||
311 | DBG_ERROR("Enabling MSI-X with %d vectors failed\n", | ||
312 | adapter->nr_msix_entries); | ||
313 | /*Should try with less vector returned.*/ | ||
314 | kfree(adapter->msi_entries); | ||
315 | return STATUS_FAILURE; /*MSI-X Enable failed.*/ | ||
316 | } | ||
317 | return (STATUS_SUCCESS); | ||
318 | } | ||
319 | |||
320 | int sxg_add_msi_isr(struct adapter_t *adapter) | ||
321 | { | ||
322 | int ret,i; | ||
323 | |||
324 | if (!adapter->intrregistered) { | ||
325 | spin_unlock_irqrestore(&sxg_global.driver_lock, | ||
326 | sxg_global.flags); | ||
327 | for (i=0; i<adapter->nr_msix_entries; i++) { | ||
328 | ret = request_irq (adapter->msi_entries[i].vector, | ||
329 | sxg_isr, | ||
330 | IRQF_SHARED, | ||
331 | adapter->netdev->name, | ||
332 | adapter->netdev); | ||
333 | if (ret) { | ||
334 | spin_lock_irqsave(&sxg_global.driver_lock, | ||
335 | sxg_global.flags); | ||
336 | DBG_ERROR("sxg: MSI-X request_irq (%s) " | ||
337 | "FAILED [%x]\n", adapter->netdev->name, | ||
338 | ret); | ||
339 | return (ret); | ||
340 | } | ||
341 | } | ||
342 | } | ||
343 | spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags); | ||
344 | adapter->msi_enabled = TRUE; | ||
345 | adapter->intrregistered = 1; | ||
346 | adapter->IntRegistered = TRUE; | ||
347 | return (STATUS_SUCCESS); | ||
348 | } | ||
349 | |||
350 | void sxg_remove_msix_isr(struct adapter_t *adapter) | ||
351 | { | ||
352 | int i,vector; | ||
353 | struct net_device *netdev = adapter->netdev; | ||
354 | |||
355 | for(i=0; i< adapter->nr_msix_entries;i++) | ||
356 | { | ||
357 | vector = adapter->msi_entries[i].vector; | ||
358 | DBG_ERROR("%s : Freeing IRQ vector#%d\n",__func__,vector); | ||
359 | free_irq(vector,netdev); | ||
360 | } | ||
361 | } | ||
362 | |||
363 | |||
364 | static void sxg_remove_isr(struct adapter_t *adapter) | ||
365 | { | ||
366 | struct net_device *netdev = adapter->netdev; | ||
367 | if (adapter->msi_enabled) | ||
368 | sxg_remove_msix_isr(adapter); | ||
369 | else | ||
370 | free_irq(adapter->netdev->irq, netdev); | ||
371 | } | ||
372 | |||
373 | void sxg_reset_interrupt_capability(struct adapter_t *adapter) | ||
374 | { | ||
375 | if (adapter->msi_enabled) { | ||
376 | pci_disable_msix(adapter->pcidev); | ||
377 | kfree(adapter->msi_entries); | ||
378 | adapter->msi_entries = NULL; | ||
379 | } | ||
380 | return; | ||
381 | } | ||
382 | |||
383 | /* | ||
384 | * sxg_download_microcode | ||
385 | * | ||
386 | * Download Microcode to Sahara adapter using the Linux | ||
387 | * Firmware module to get the ucode.sys file. | ||
388 | * | ||
389 | * Arguments - | ||
390 | * adapter - A pointer to our adapter structure | ||
391 | * UcodeSel - microcode file selection | ||
392 | * | ||
393 | * Return | ||
394 | * int | ||
395 | */ | ||
396 | static bool sxg_download_microcode(struct adapter_t *adapter, | ||
397 | enum SXG_UCODE_SEL UcodeSel) | ||
398 | { | ||
399 | const struct firmware *fw; | ||
400 | const char *file = ""; | ||
401 | struct sxg_hw_regs *HwRegs = adapter->HwRegs; | ||
402 | int ret; | ||
403 | int ucode_start; | ||
404 | u32 Section; | ||
405 | u32 ThisSectionSize; | ||
406 | u32 instruction = 0; | ||
407 | u32 BaseAddress, AddressOffset, Address; | ||
408 | /* u32 Failure; */ | ||
409 | u32 ValueRead; | ||
410 | u32 i; | ||
411 | u32 index = 0; | ||
412 | u32 num_sections = 0; | ||
413 | u32 sectionSize[16]; | ||
414 | u32 sectionStart[16]; | ||
415 | |||
416 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DnldUcod", | ||
417 | adapter, 0, 0, 0); | ||
418 | |||
419 | /* | ||
420 | * This routine is only implemented to download the microcode | ||
421 | * for the Revision B Sahara chip. Rev A and Diagnostic | ||
422 | * microcode is not supported at this time. If Rev A or | ||
423 | * diagnostic ucode is required, this routine will obviously | ||
424 | * need to change. Also, eventually need to add support for | ||
425 | * Rev B checked version of ucode. That's easy enough once | ||
426 | * the free version of Rev B works. | ||
427 | */ | ||
428 | ASSERT(UcodeSel == SXG_UCODE_SYSTEM); | ||
429 | ASSERT(adapter->asictype == SAHARA_REV_B); | ||
430 | #if SXG_UCODE_DEBUG | ||
431 | file = "sxg/saharadbgdownloadB.sys"; | ||
432 | #else | ||
433 | file = "sxg/saharadownloadB.sys"; | ||
434 | #endif | ||
435 | ret = request_firmware(&fw, file, &adapter->pcidev->dev); | ||
436 | if (ret) { | ||
437 | DBG_ERROR("%s SXG_NIC: Failed to load firmware %s\n", __func__,file); | ||
438 | return ret; | ||
439 | } | ||
440 | |||
441 | /* | ||
442 | * The microcode .sys file contains starts with a 4 byte word containing | ||
443 | * the number of sections. That is followed by "num_sections" 4 byte | ||
444 | * words containing each "section" size. That is followed num_sections | ||
445 | * 4 byte words containing each section "start" address. | ||
446 | * | ||
447 | * Following the above header, the .sys file contains num_sections, | ||
448 | * where each section size is specified, newline delineatetd 12 byte | ||
449 | * microcode instructions. | ||
450 | */ | ||
451 | num_sections = *(u32 *)(fw->data + index); | ||
452 | index += 4; | ||
453 | ASSERT(num_sections <= 3); | ||
454 | for (i = 0; i < num_sections; i++) { | ||
455 | sectionSize[i] = *(u32 *)(fw->data + index); | ||
456 | index += 4; | ||
457 | } | ||
458 | for (i = 0; i < num_sections; i++) { | ||
459 | sectionStart[i] = *(u32 *)(fw->data + index); | ||
460 | index += 4; | ||
461 | } | ||
462 | |||
463 | /* First, reset the card */ | ||
464 | WRITE_REG(HwRegs->Reset, 0xDEAD, FLUSH); | ||
465 | udelay(50); | ||
466 | HwRegs = adapter->HwRegs; | ||
467 | |||
468 | /* | ||
469 | * Download each section of the microcode as specified in | ||
470 | * sectionSize[index] to sectionStart[index] address. As | ||
471 | * described above, the .sys file contains 12 byte word | ||
472 | * microcode instructions. The *download.sys file is generated | ||
473 | * using the objtosys.exe utility that was built for Sahara | ||
474 | * microcode. | ||
475 | */ | ||
476 | /* See usage of this below when we read back for parity */ | ||
477 | ucode_start = index; | ||
478 | instruction = *(u32 *)(fw->data + index); | ||
479 | index += 4; | ||
480 | |||
481 | for (Section = 0; Section < num_sections; Section++) { | ||
482 | BaseAddress = sectionStart[Section]; | ||
483 | /* Size in instructions */ | ||
484 | ThisSectionSize = sectionSize[Section] / 12; | ||
485 | for (AddressOffset = 0; AddressOffset < ThisSectionSize; | ||
486 | AddressOffset++) { | ||
487 | u32 first_instr = 0; /* See comment below */ | ||
488 | |||
489 | Address = BaseAddress + AddressOffset; | ||
490 | ASSERT((Address & ~MICROCODE_ADDRESS_MASK) == 0); | ||
491 | /* Write instruction bits 31 - 0 (low) */ | ||
492 | first_instr = instruction; | ||
493 | WRITE_REG(HwRegs->UcodeDataLow, instruction, FLUSH); | ||
494 | instruction = *(u32 *)(fw->data + index); | ||
495 | index += 4; /* Advance to the "next" instruction */ | ||
496 | |||
497 | /* Write instruction bits 63-32 (middle) */ | ||
498 | WRITE_REG(HwRegs->UcodeDataMiddle, instruction, FLUSH); | ||
499 | instruction = *(u32 *)(fw->data + index); | ||
500 | index += 4; /* Advance to the "next" instruction */ | ||
501 | |||
502 | /* Write instruction bits 95-64 (high) */ | ||
503 | WRITE_REG(HwRegs->UcodeDataHigh, instruction, FLUSH); | ||
504 | instruction = *(u32 *)(fw->data + index); | ||
505 | index += 4; /* Advance to the "next" instruction */ | ||
506 | |||
507 | /* Write instruction address with the WRITE bit set */ | ||
508 | WRITE_REG(HwRegs->UcodeAddr, | ||
509 | (Address | MICROCODE_ADDRESS_WRITE), FLUSH); | ||
510 | /* | ||
511 | * Sahara bug in the ucode download logic - the write to DataLow | ||
512 | * for the next instruction could get corrupted. To avoid this, | ||
513 | * write to DataLow again for this instruction (which may get | ||
514 | * corrupted, but it doesn't matter), then increment the address | ||
515 | * and write the data for the next instruction to DataLow. That | ||
516 | * write should succeed. | ||
517 | */ | ||
518 | WRITE_REG(HwRegs->UcodeDataLow, first_instr, FLUSH); | ||
519 | } | ||
520 | } | ||
521 | /* | ||
522 | * Now repeat the entire operation reading the instruction back and | ||
523 | * checking for parity errors | ||
524 | */ | ||
525 | index = ucode_start; | ||
526 | |||
527 | for (Section = 0; Section < num_sections; Section++) { | ||
528 | BaseAddress = sectionStart[Section]; | ||
529 | /* Size in instructions */ | ||
530 | ThisSectionSize = sectionSize[Section] / 12; | ||
531 | for (AddressOffset = 0; AddressOffset < ThisSectionSize; | ||
532 | AddressOffset++) { | ||
533 | Address = BaseAddress + AddressOffset; | ||
534 | /* Write the address with the READ bit set */ | ||
535 | WRITE_REG(HwRegs->UcodeAddr, | ||
536 | (Address | MICROCODE_ADDRESS_READ), FLUSH); | ||
537 | /* Read it back and check parity bit. */ | ||
538 | READ_REG(HwRegs->UcodeAddr, ValueRead); | ||
539 | if (ValueRead & MICROCODE_ADDRESS_PARITY) { | ||
540 | DBG_ERROR("sxg: %s PARITY ERROR\n", | ||
541 | __func__); | ||
542 | |||
543 | return FALSE; /* Parity error */ | ||
544 | } | ||
545 | ASSERT((ValueRead & MICROCODE_ADDRESS_MASK) == Address); | ||
546 | /* Read the instruction back and compare */ | ||
547 | /* First instruction */ | ||
548 | instruction = *(u32 *)(fw->data + index); | ||
549 | index += 4; | ||
550 | READ_REG(HwRegs->UcodeDataLow, ValueRead); | ||
551 | if (ValueRead != instruction) { | ||
552 | DBG_ERROR("sxg: %s MISCOMPARE LOW\n", | ||
553 | __func__); | ||
554 | return FALSE; /* Miscompare */ | ||
555 | } | ||
556 | instruction = *(u32 *)(fw->data + index); | ||
557 | index += 4; | ||
558 | READ_REG(HwRegs->UcodeDataMiddle, ValueRead); | ||
559 | if (ValueRead != instruction) { | ||
560 | DBG_ERROR("sxg: %s MISCOMPARE MIDDLE\n", | ||
561 | __func__); | ||
562 | return FALSE; /* Miscompare */ | ||
563 | } | ||
564 | instruction = *(u32 *)(fw->data + index); | ||
565 | index += 4; | ||
566 | READ_REG(HwRegs->UcodeDataHigh, ValueRead); | ||
567 | if (ValueRead != instruction) { | ||
568 | DBG_ERROR("sxg: %s MISCOMPARE HIGH\n", | ||
569 | __func__); | ||
570 | return FALSE; /* Miscompare */ | ||
571 | } | ||
572 | } | ||
573 | } | ||
574 | |||
575 | /* download finished */ | ||
576 | release_firmware(fw); | ||
577 | /* Everything OK, Go. */ | ||
578 | WRITE_REG(HwRegs->UcodeAddr, MICROCODE_ADDRESS_GO, FLUSH); | ||
579 | |||
580 | /* | ||
581 | * Poll the CardUp register to wait for microcode to initialize | ||
582 | * Give up after 10,000 attemps (500ms). | ||
583 | */ | ||
584 | for (i = 0; i < 10000; i++) { | ||
585 | udelay(50); | ||
586 | READ_REG(adapter->UcodeRegs[0].CardUp, ValueRead); | ||
587 | if (ValueRead == 0xCAFE) { | ||
588 | break; | ||
589 | } | ||
590 | } | ||
591 | if (i == 10000) { | ||
592 | DBG_ERROR("sxg: %s TIMEOUT bringing up card - verify MICROCODE\n", __func__); | ||
593 | |||
594 | return FALSE; /* Timeout */ | ||
595 | } | ||
596 | /* | ||
597 | * Now write the LoadSync register. This is used to | ||
598 | * synchronize with the card so it can scribble on the memory | ||
599 | * that contained 0xCAFE from the "CardUp" step above | ||
600 | */ | ||
601 | if (UcodeSel == SXG_UCODE_SYSTEM) { | ||
602 | WRITE_REG(adapter->UcodeRegs[0].LoadSync, 0, FLUSH); | ||
603 | } | ||
604 | |||
605 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDnldUcd", | ||
606 | adapter, 0, 0, 0); | ||
607 | return (TRUE); | ||
608 | } | ||
609 | |||
610 | /* | ||
611 | * sxg_allocate_resources - Allocate memory and locks | ||
612 | * | ||
613 | * Arguments - | ||
614 | * adapter - A pointer to our adapter structure | ||
615 | * | ||
616 | * Return - int | ||
617 | */ | ||
618 | static int sxg_allocate_resources(struct adapter_t *adapter) | ||
619 | { | ||
620 | int status = STATUS_SUCCESS; | ||
621 | u32 RssIds, IsrCount; | ||
622 | /* struct sxg_xmt_ring *XmtRing; */ | ||
623 | /* struct sxg_rcv_ring *RcvRing; */ | ||
624 | |||
625 | DBG_ERROR("%s ENTER\n", __func__); | ||
626 | |||
627 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocRes", | ||
628 | adapter, 0, 0, 0); | ||
629 | |||
630 | /* Windows tells us how many CPUs it plans to use for */ | ||
631 | /* RSS */ | ||
632 | RssIds = SXG_RSS_CPU_COUNT(adapter); | ||
633 | IsrCount = adapter->msi_enabled ? RssIds : 1; | ||
634 | |||
635 | DBG_ERROR("%s Setup the spinlocks\n", __func__); | ||
636 | |||
637 | /* Allocate spinlocks and initialize listheads first. */ | ||
638 | spin_lock_init(&adapter->RcvQLock); | ||
639 | spin_lock_init(&adapter->SglQLock); | ||
640 | spin_lock_init(&adapter->XmtZeroLock); | ||
641 | spin_lock_init(&adapter->Bit64RegLock); | ||
642 | spin_lock_init(&adapter->AdapterLock); | ||
643 | atomic_set(&adapter->pending_allocations, 0); | ||
644 | |||
645 | DBG_ERROR("%s Setup the lists\n", __func__); | ||
646 | |||
647 | InitializeListHead(&adapter->FreeRcvBuffers); | ||
648 | InitializeListHead(&adapter->FreeRcvBlocks); | ||
649 | InitializeListHead(&adapter->AllRcvBlocks); | ||
650 | InitializeListHead(&adapter->FreeSglBuffers); | ||
651 | InitializeListHead(&adapter->AllSglBuffers); | ||
652 | |||
653 | /* | ||
654 | * Mark these basic allocations done. This flags essentially | ||
655 | * tells the SxgFreeResources routine that it can grab spinlocks | ||
656 | * and reference listheads. | ||
657 | */ | ||
658 | adapter->BasicAllocations = TRUE; | ||
659 | /* | ||
660 | * Main allocation loop. Start with the maximum supported by | ||
661 | * the microcode and back off if memory allocation | ||
662 | * fails. If we hit a minimum, fail. | ||
663 | */ | ||
664 | |||
665 | for (;;) { | ||
666 | DBG_ERROR("%s Allocate XmtRings size[%x]\n", __func__, | ||
667 | (unsigned int)(sizeof(struct sxg_xmt_ring) * 1)); | ||
668 | |||
669 | /* | ||
670 | * Start with big items first - receive and transmit rings. | ||
671 | * At the moment I'm going to keep the ring size fixed and | ||
672 | * adjust the TCBs if we fail. Later we might | ||
673 | * consider reducing the ring size as well.. | ||
674 | */ | ||
675 | adapter->XmtRings = pci_alloc_consistent(adapter->pcidev, | ||
676 | sizeof(struct sxg_xmt_ring) * | ||
677 | 1, | ||
678 | &adapter->PXmtRings); | ||
679 | DBG_ERROR("%s XmtRings[%p]\n", __func__, adapter->XmtRings); | ||
680 | |||
681 | if (!adapter->XmtRings) { | ||
682 | goto per_tcb_allocation_failed; | ||
683 | } | ||
684 | memset(adapter->XmtRings, 0, sizeof(struct sxg_xmt_ring) * 1); | ||
685 | |||
686 | DBG_ERROR("%s Allocate RcvRings size[%x]\n", __func__, | ||
687 | (unsigned int)(sizeof(struct sxg_rcv_ring) * 1)); | ||
688 | adapter->RcvRings = | ||
689 | pci_alloc_consistent(adapter->pcidev, | ||
690 | sizeof(struct sxg_rcv_ring) * 1, | ||
691 | &adapter->PRcvRings); | ||
692 | DBG_ERROR("%s RcvRings[%p]\n", __func__, adapter->RcvRings); | ||
693 | if (!adapter->RcvRings) { | ||
694 | goto per_tcb_allocation_failed; | ||
695 | } | ||
696 | memset(adapter->RcvRings, 0, sizeof(struct sxg_rcv_ring) * 1); | ||
697 | adapter->ucode_stats = kzalloc(sizeof(struct sxg_ucode_stats), GFP_ATOMIC); | ||
698 | adapter->pucode_stats = pci_map_single(adapter->pcidev, | ||
699 | adapter->ucode_stats, | ||
700 | sizeof(struct sxg_ucode_stats), | ||
701 | PCI_DMA_FROMDEVICE); | ||
702 | // memset(adapter->ucode_stats, 0, sizeof(struct sxg_ucode_stats)); | ||
703 | break; | ||
704 | |||
705 | per_tcb_allocation_failed: | ||
706 | /* an allocation failed. Free any successful allocations. */ | ||
707 | if (adapter->XmtRings) { | ||
708 | pci_free_consistent(adapter->pcidev, | ||
709 | sizeof(struct sxg_xmt_ring) * 1, | ||
710 | adapter->XmtRings, | ||
711 | adapter->PXmtRings); | ||
712 | adapter->XmtRings = NULL; | ||
713 | } | ||
714 | if (adapter->RcvRings) { | ||
715 | pci_free_consistent(adapter->pcidev, | ||
716 | sizeof(struct sxg_rcv_ring) * 1, | ||
717 | adapter->RcvRings, | ||
718 | adapter->PRcvRings); | ||
719 | adapter->RcvRings = NULL; | ||
720 | } | ||
721 | /* Loop around and try again.... */ | ||
722 | if (adapter->ucode_stats) { | ||
723 | pci_unmap_single(adapter->pcidev, | ||
724 | sizeof(struct sxg_ucode_stats), | ||
725 | adapter->pucode_stats, PCI_DMA_FROMDEVICE); | ||
726 | adapter->ucode_stats = NULL; | ||
727 | } | ||
728 | |||
729 | } | ||
730 | |||
731 | DBG_ERROR("%s Initialize RCV ZERO and XMT ZERO rings\n", __func__); | ||
732 | /* Initialize rcv zero and xmt zero rings */ | ||
733 | SXG_INITIALIZE_RING(adapter->RcvRingZeroInfo, SXG_RCV_RING_SIZE); | ||
734 | SXG_INITIALIZE_RING(adapter->XmtRingZeroInfo, SXG_XMT_RING_SIZE); | ||
735 | |||
736 | /* Sanity check receive data structure format */ | ||
737 | /* ASSERT((adapter->ReceiveBufferSize == SXG_RCV_DATA_BUFFER_SIZE) || | ||
738 | (adapter->ReceiveBufferSize == SXG_RCV_JUMBO_BUFFER_SIZE)); */ | ||
739 | ASSERT(sizeof(struct sxg_rcv_descriptor_block) == | ||
740 | SXG_RCV_DESCRIPTOR_BLOCK_SIZE); | ||
741 | |||
742 | DBG_ERROR("%s Allocate EventRings size[%x]\n", __func__, | ||
743 | (unsigned int)(sizeof(struct sxg_event_ring) * RssIds)); | ||
744 | |||
745 | /* Allocate event queues. */ | ||
746 | adapter->EventRings = pci_alloc_consistent(adapter->pcidev, | ||
747 | sizeof(struct sxg_event_ring) * | ||
748 | RssIds, | ||
749 | &adapter->PEventRings); | ||
750 | |||
751 | if (!adapter->EventRings) { | ||
752 | /* Caller will call SxgFreeAdapter to clean up above | ||
753 | * allocations */ | ||
754 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF8", | ||
755 | adapter, SXG_MAX_ENTRIES, 0, 0); | ||
756 | status = STATUS_RESOURCES; | ||
757 | goto per_tcb_allocation_failed; | ||
758 | } | ||
759 | memset(adapter->EventRings, 0, sizeof(struct sxg_event_ring) * RssIds); | ||
760 | |||
761 | DBG_ERROR("%s Allocate ISR size[%x]\n", __func__, IsrCount); | ||
762 | /* Allocate ISR */ | ||
763 | adapter->Isr = pci_alloc_consistent(adapter->pcidev, | ||
764 | IsrCount, &adapter->PIsr); | ||
765 | if (!adapter->Isr) { | ||
766 | /* Caller will call SxgFreeAdapter to clean up above | ||
767 | * allocations */ | ||
768 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF9", | ||
769 | adapter, SXG_MAX_ENTRIES, 0, 0); | ||
770 | status = STATUS_RESOURCES; | ||
771 | goto per_tcb_allocation_failed; | ||
772 | } | ||
773 | memset(adapter->Isr, 0, sizeof(u32) * IsrCount); | ||
774 | |||
775 | DBG_ERROR("%s Allocate shared XMT ring zero index location size[%x]\n", | ||
776 | __func__, (unsigned int)sizeof(u32)); | ||
777 | |||
778 | /* Allocate shared XMT ring zero index location */ | ||
779 | adapter->XmtRingZeroIndex = pci_alloc_consistent(adapter->pcidev, | ||
780 | sizeof(u32), | ||
781 | &adapter-> | ||
782 | PXmtRingZeroIndex); | ||
783 | if (!adapter->XmtRingZeroIndex) { | ||
784 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF10", | ||
785 | adapter, SXG_MAX_ENTRIES, 0, 0); | ||
786 | status = STATUS_RESOURCES; | ||
787 | goto per_tcb_allocation_failed; | ||
788 | } | ||
789 | memset(adapter->XmtRingZeroIndex, 0, sizeof(u32)); | ||
790 | |||
791 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlcResS", | ||
792 | adapter, SXG_MAX_ENTRIES, 0, 0); | ||
793 | |||
794 | return status; | ||
795 | } | ||
796 | |||
797 | /* | ||
798 | * sxg_config_pci - | ||
799 | * | ||
800 | * Set up PCI Configuration space | ||
801 | * | ||
802 | * Arguments - | ||
803 | * pcidev - A pointer to our adapter structure | ||
804 | */ | ||
805 | static void sxg_config_pci(struct pci_dev *pcidev) | ||
806 | { | ||
807 | u16 pci_command; | ||
808 | u16 new_command; | ||
809 | |||
810 | pci_read_config_word(pcidev, PCI_COMMAND, &pci_command); | ||
811 | DBG_ERROR("sxg: %s PCI command[%4.4x]\n", __func__, pci_command); | ||
812 | /* Set the command register */ | ||
813 | new_command = pci_command | ( | ||
814 | /* Memory Space Enable */ | ||
815 | PCI_COMMAND_MEMORY | | ||
816 | /* Bus master enable */ | ||
817 | PCI_COMMAND_MASTER | | ||
818 | /* Memory write and invalidate */ | ||
819 | PCI_COMMAND_INVALIDATE | | ||
820 | /* Parity error response */ | ||
821 | PCI_COMMAND_PARITY | | ||
822 | /* System ERR */ | ||
823 | PCI_COMMAND_SERR | | ||
824 | /* Fast back-to-back */ | ||
825 | PCI_COMMAND_FAST_BACK); | ||
826 | if (pci_command != new_command) { | ||
827 | DBG_ERROR("%s -- Updating PCI COMMAND register %4.4x->%4.4x.\n", | ||
828 | __func__, pci_command, new_command); | ||
829 | pci_write_config_word(pcidev, PCI_COMMAND, new_command); | ||
830 | } | ||
831 | } | ||
832 | |||
833 | /* | ||
834 | * sxg_read_config | ||
835 | * @adapter : Pointer to the adapter structure for the card | ||
836 | * This function will read the configuration data from EEPROM/FLASH | ||
837 | */ | ||
838 | static inline int sxg_read_config(struct adapter_t *adapter) | ||
839 | { | ||
840 | /* struct sxg_config data; */ | ||
841 | struct sxg_config *config; | ||
842 | struct sw_cfg_data *data; | ||
843 | dma_addr_t p_addr; | ||
844 | unsigned long status; | ||
845 | unsigned long i; | ||
846 | config = pci_alloc_consistent(adapter->pcidev, | ||
847 | sizeof(struct sxg_config), &p_addr); | ||
848 | |||
849 | if(!config) { | ||
850 | /* | ||
851 | * We cant get even this much memory. Raise a hell | ||
852 | * Get out of here | ||
853 | */ | ||
854 | printk(KERN_ERR"%s : Could not allocate memory for reading \ | ||
855 | EEPROM\n", __func__); | ||
856 | return -ENOMEM; | ||
857 | } | ||
858 | |||
859 | data = &config->SwCfg; | ||
860 | |||
861 | /* Initialize (reflective memory) status register */ | ||
862 | WRITE_REG(adapter->UcodeRegs[0].ConfigStat, SXG_CFG_TIMEOUT, TRUE); | ||
863 | |||
864 | /* Send request to fetch configuration data */ | ||
865 | WRITE_REG64(adapter, adapter->UcodeRegs[0].Config, p_addr, 0); | ||
866 | for(i=0; i<1000; i++) { | ||
867 | READ_REG(adapter->UcodeRegs[0].ConfigStat, status); | ||
868 | if (status != SXG_CFG_TIMEOUT) | ||
869 | break; | ||
870 | mdelay(1); /* Do we really need this */ | ||
871 | } | ||
872 | |||
873 | switch(status) { | ||
874 | /* Config read from EEPROM succeeded */ | ||
875 | case SXG_CFG_LOAD_EEPROM: | ||
876 | /* Config read from Flash succeeded */ | ||
877 | case SXG_CFG_LOAD_FLASH: | ||
878 | /* | ||
879 | * Copy the MAC address to adapter structure | ||
880 | * TODO: We are not doing the remaining part : FRU, etc | ||
881 | */ | ||
882 | memcpy(adapter->macaddr, data->MacAddr[0].MacAddr, | ||
883 | sizeof(struct sxg_config_mac)); | ||
884 | break; | ||
885 | case SXG_CFG_TIMEOUT: | ||
886 | case SXG_CFG_LOAD_INVALID: | ||
887 | case SXG_CFG_LOAD_ERROR: | ||
888 | default: /* Fix default handler later */ | ||
889 | printk(KERN_WARNING"%s : We could not read the config \ | ||
890 | word. Status = %ld\n", __func__, status); | ||
891 | break; | ||
892 | } | ||
893 | pci_free_consistent(adapter->pcidev, sizeof(struct sw_cfg_data), data, | ||
894 | p_addr); | ||
895 | if (adapter->netdev) { | ||
896 | memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6); | ||
897 | memcpy(adapter->netdev->perm_addr, adapter->currmacaddr, 6); | ||
898 | } | ||
899 | sxg_dbg_macaddrs(adapter); | ||
900 | |||
901 | return status; | ||
902 | } | ||
903 | |||
904 | static const struct net_device_ops sxg_netdev_ops = { | ||
905 | .ndo_open = sxg_entry_open, | ||
906 | .ndo_stop = sxg_entry_halt, | ||
907 | .ndo_start_xmit = sxg_send_packets, | ||
908 | .ndo_do_ioctl = sxg_ioctl, | ||
909 | .ndo_change_mtu = sxg_change_mtu, | ||
910 | .ndo_get_stats = sxg_get_stats, | ||
911 | .ndo_set_multicast_list = sxg_mcast_set_list, | ||
912 | .ndo_validate_addr = eth_validate_addr, | ||
913 | #if XXXTODO | ||
914 | .ndo_set_mac_address = sxg_mac_set_address, | ||
915 | #else | ||
916 | .ndo_set_mac_address = eth_mac_addr, | ||
917 | #endif | ||
918 | }; | ||
919 | |||
920 | static int sxg_entry_probe(struct pci_dev *pcidev, | ||
921 | const struct pci_device_id *pci_tbl_entry) | ||
922 | { | ||
923 | static int did_version = 0; | ||
924 | int err; | ||
925 | struct net_device *netdev; | ||
926 | struct adapter_t *adapter; | ||
927 | void __iomem *memmapped_ioaddr; | ||
928 | u32 status = 0; | ||
929 | ulong mmio_start = 0; | ||
930 | ulong mmio_len = 0; | ||
931 | unsigned char revision_id; | ||
932 | |||
933 | DBG_ERROR("sxg: %s 2.6 VERSION ENTER jiffies[%lx] cpu %d\n", | ||
934 | __func__, jiffies, smp_processor_id()); | ||
935 | |||
936 | /* Initialize trace buffer */ | ||
937 | #ifdef ATKDBG | ||
938 | SxgTraceBuffer = &LSxgTraceBuffer; | ||
939 | SXG_TRACE_INIT(SxgTraceBuffer, TRACE_NOISY); | ||
940 | #endif | ||
941 | |||
942 | sxg_global.dynamic_intagg = dynamic_intagg; | ||
943 | |||
944 | err = pci_enable_device(pcidev); | ||
945 | |||
946 | DBG_ERROR("Call pci_enable_device(%p) status[%x]\n", pcidev, err); | ||
947 | if (err) { | ||
948 | return err; | ||
949 | } | ||
950 | |||
951 | if (sxg_debug > 0 && did_version++ == 0) { | ||
952 | printk(KERN_INFO "%s\n", sxg_banner); | ||
953 | printk(KERN_INFO "%s\n", SXG_DRV_VERSION); | ||
954 | } | ||
955 | |||
956 | pci_read_config_byte(pcidev, PCI_REVISION_ID, &revision_id); | ||
957 | |||
958 | if (!(err = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64)))) { | ||
959 | DBG_ERROR("pci_set_dma_mask(DMA_BIT_MASK(64)) successful\n"); | ||
960 | } else { | ||
961 | if ((err = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)))) { | ||
962 | DBG_ERROR | ||
963 | ("No usable DMA configuration, aborting err[%x]\n", | ||
964 | err); | ||
965 | return err; | ||
966 | } | ||
967 | DBG_ERROR("pci_set_dma_mask(DMA_BIT_MASK(32)) successful\n"); | ||
968 | } | ||
969 | |||
970 | DBG_ERROR("Call pci_request_regions\n"); | ||
971 | |||
972 | err = pci_request_regions(pcidev, sxg_driver_name); | ||
973 | if (err) { | ||
974 | DBG_ERROR("pci_request_regions FAILED err[%x]\n", err); | ||
975 | return err; | ||
976 | } | ||
977 | |||
978 | DBG_ERROR("call pci_set_master\n"); | ||
979 | pci_set_master(pcidev); | ||
980 | |||
981 | DBG_ERROR("call alloc_etherdev\n"); | ||
982 | netdev = alloc_etherdev(sizeof(struct adapter_t)); | ||
983 | if (!netdev) { | ||
984 | err = -ENOMEM; | ||
985 | goto err_out_exit_sxg_probe; | ||
986 | } | ||
987 | DBG_ERROR("alloc_etherdev for slic netdev[%p]\n", netdev); | ||
988 | |||
989 | SET_NETDEV_DEV(netdev, &pcidev->dev); | ||
990 | |||
991 | pci_set_drvdata(pcidev, netdev); | ||
992 | adapter = netdev_priv(netdev); | ||
993 | if (revision_id == 1) { | ||
994 | adapter->asictype = SAHARA_REV_A; | ||
995 | } else if (revision_id == 2) { | ||
996 | adapter->asictype = SAHARA_REV_B; | ||
997 | } else { | ||
998 | ASSERT(0); | ||
999 | DBG_ERROR("%s Unexpected revision ID %x\n", __func__, revision_id); | ||
1000 | goto err_out_exit_sxg_probe; | ||
1001 | } | ||
1002 | adapter->netdev = netdev; | ||
1003 | adapter->pcidev = pcidev; | ||
1004 | |||
1005 | mmio_start = pci_resource_start(pcidev, 0); | ||
1006 | mmio_len = pci_resource_len(pcidev, 0); | ||
1007 | |||
1008 | DBG_ERROR("sxg: call ioremap(mmio_start[%lx], mmio_len[%lx])\n", | ||
1009 | mmio_start, mmio_len); | ||
1010 | |||
1011 | memmapped_ioaddr = ioremap(mmio_start, mmio_len); | ||
1012 | DBG_ERROR("sxg: %s MEMMAPPED_IOADDR [%p]\n", __func__, | ||
1013 | memmapped_ioaddr); | ||
1014 | if (!memmapped_ioaddr) { | ||
1015 | DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n", | ||
1016 | __func__, mmio_len, mmio_start); | ||
1017 | goto err_out_free_mmio_region_0; | ||
1018 | } | ||
1019 | |||
1020 | DBG_ERROR("sxg: %s found Alacritech SXG PCI, MMIO at %p, start[%lx] \ | ||
1021 | len[%lx], IRQ %d.\n", __func__, memmapped_ioaddr, mmio_start, | ||
1022 | mmio_len, pcidev->irq); | ||
1023 | |||
1024 | adapter->HwRegs = (void *)memmapped_ioaddr; | ||
1025 | adapter->base_addr = memmapped_ioaddr; | ||
1026 | |||
1027 | mmio_start = pci_resource_start(pcidev, 2); | ||
1028 | mmio_len = pci_resource_len(pcidev, 2); | ||
1029 | |||
1030 | DBG_ERROR("sxg: call ioremap(mmio_start[%lx], mmio_len[%lx])\n", | ||
1031 | mmio_start, mmio_len); | ||
1032 | |||
1033 | memmapped_ioaddr = ioremap(mmio_start, mmio_len); | ||
1034 | DBG_ERROR("sxg: %s MEMMAPPED_IOADDR [%p]\n", __func__, | ||
1035 | memmapped_ioaddr); | ||
1036 | if (!memmapped_ioaddr) { | ||
1037 | DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n", | ||
1038 | __func__, mmio_len, mmio_start); | ||
1039 | goto err_out_free_mmio_region_2; | ||
1040 | } | ||
1041 | |||
1042 | DBG_ERROR("sxg: %s found Alacritech SXG PCI, MMIO at %p, " | ||
1043 | "start[%lx] len[%lx], IRQ %d.\n", __func__, | ||
1044 | memmapped_ioaddr, mmio_start, mmio_len, pcidev->irq); | ||
1045 | |||
1046 | adapter->UcodeRegs = (void *)memmapped_ioaddr; | ||
1047 | |||
1048 | adapter->State = SXG_STATE_INITIALIZING; | ||
1049 | /* | ||
1050 | * Maintain a list of all adapters anchored by | ||
1051 | * the global SxgDriver structure. | ||
1052 | */ | ||
1053 | adapter->Next = SxgDriver.Adapters; | ||
1054 | SxgDriver.Adapters = adapter; | ||
1055 | adapter->AdapterID = ++SxgDriver.AdapterID; | ||
1056 | |||
1057 | /* Initialize CRC table used to determine multicast hash */ | ||
1058 | sxg_mcast_init_crc32(); | ||
1059 | |||
1060 | adapter->JumboEnabled = FALSE; | ||
1061 | adapter->RssEnabled = FALSE; | ||
1062 | if (adapter->JumboEnabled) { | ||
1063 | adapter->FrameSize = JUMBOMAXFRAME; | ||
1064 | adapter->ReceiveBufferSize = SXG_RCV_JUMBO_BUFFER_SIZE; | ||
1065 | } else { | ||
1066 | adapter->FrameSize = ETHERMAXFRAME; | ||
1067 | adapter->ReceiveBufferSize = SXG_RCV_DATA_BUFFER_SIZE; | ||
1068 | } | ||
1069 | |||
1070 | /* | ||
1071 | * status = SXG_READ_EEPROM(adapter); | ||
1072 | * if (!status) { | ||
1073 | * goto sxg_init_bad; | ||
1074 | * } | ||
1075 | */ | ||
1076 | |||
1077 | DBG_ERROR("sxg: %s ENTER sxg_config_pci\n", __func__); | ||
1078 | sxg_config_pci(pcidev); | ||
1079 | DBG_ERROR("sxg: %s EXIT sxg_config_pci\n", __func__); | ||
1080 | |||
1081 | DBG_ERROR("sxg: %s ENTER sxg_init_driver\n", __func__); | ||
1082 | sxg_init_driver(); | ||
1083 | DBG_ERROR("sxg: %s EXIT sxg_init_driver\n", __func__); | ||
1084 | |||
1085 | adapter->vendid = pci_tbl_entry->vendor; | ||
1086 | adapter->devid = pci_tbl_entry->device; | ||
1087 | adapter->subsysid = pci_tbl_entry->subdevice; | ||
1088 | adapter->slotnumber = ((pcidev->devfn >> 3) & 0x1F); | ||
1089 | adapter->functionnumber = (pcidev->devfn & 0x7); | ||
1090 | adapter->memorylength = pci_resource_len(pcidev, 0); | ||
1091 | adapter->irq = pcidev->irq; | ||
1092 | adapter->next_netdevice = head_netdevice; | ||
1093 | head_netdevice = netdev; | ||
1094 | adapter->port = 0; /*adapter->functionnumber; */ | ||
1095 | |||
1096 | /* Allocate memory and other resources */ | ||
1097 | DBG_ERROR("sxg: %s ENTER sxg_allocate_resources\n", __func__); | ||
1098 | status = sxg_allocate_resources(adapter); | ||
1099 | DBG_ERROR("sxg: %s EXIT sxg_allocate_resources status %x\n", | ||
1100 | __func__, status); | ||
1101 | if (status != STATUS_SUCCESS) { | ||
1102 | goto err_out_unmap; | ||
1103 | } | ||
1104 | |||
1105 | DBG_ERROR("sxg: %s ENTER sxg_download_microcode\n", __func__); | ||
1106 | if (sxg_download_microcode(adapter, SXG_UCODE_SYSTEM)) { | ||
1107 | DBG_ERROR("sxg: %s ENTER sxg_adapter_set_hwaddr\n", | ||
1108 | __func__); | ||
1109 | sxg_read_config(adapter); | ||
1110 | status = sxg_adapter_set_hwaddr(adapter); | ||
1111 | } else { | ||
1112 | adapter->state = ADAPT_FAIL; | ||
1113 | adapter->linkstate = LINK_DOWN; | ||
1114 | DBG_ERROR("sxg_download_microcode FAILED status[%x]\n", status); | ||
1115 | } | ||
1116 | |||
1117 | netdev->base_addr = (unsigned long)adapter->base_addr; | ||
1118 | netdev->irq = adapter->irq; | ||
1119 | netdev->netdev_ops = &sxg_netdev_ops; | ||
1120 | SET_ETHTOOL_OPS(netdev, &sxg_nic_ethtool_ops); | ||
1121 | netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; | ||
1122 | err = sxg_set_interrupt_capability(adapter); | ||
1123 | if (err != STATUS_SUCCESS) | ||
1124 | DBG_ERROR("Cannot enable MSI-X capability\n"); | ||
1125 | |||
1126 | strcpy(netdev->name, "eth%d"); | ||
1127 | /* strcpy(netdev->name, pci_name(pcidev)); */ | ||
1128 | if ((err = register_netdev(netdev))) { | ||
1129 | DBG_ERROR("Cannot register net device, aborting. %s\n", | ||
1130 | netdev->name); | ||
1131 | goto err_out_unmap; | ||
1132 | } | ||
1133 | |||
1134 | netif_napi_add(netdev, &adapter->napi, | ||
1135 | sxg_poll, SXG_NETDEV_WEIGHT); | ||
1136 | netdev->watchdog_timeo = 2 * HZ; | ||
1137 | init_timer(&adapter->watchdog_timer); | ||
1138 | adapter->watchdog_timer.function = &sxg_watchdog; | ||
1139 | adapter->watchdog_timer.data = (unsigned long) adapter; | ||
1140 | INIT_WORK(&adapter->update_link_status, sxg_update_link_status); | ||
1141 | |||
1142 | DBG_ERROR | ||
1143 | ("sxg: %s addr 0x%lx, irq %d, MAC addr \ | ||
1144 | %02X:%02X:%02X:%02X:%02X:%02X\n", | ||
1145 | netdev->name, netdev->base_addr, pcidev->irq, netdev->dev_addr[0], | ||
1146 | netdev->dev_addr[1], netdev->dev_addr[2], netdev->dev_addr[3], | ||
1147 | netdev->dev_addr[4], netdev->dev_addr[5]); | ||
1148 | |||
1149 | /* sxg_init_bad: */ | ||
1150 | ASSERT(status == FALSE); | ||
1151 | /* sxg_free_adapter(adapter); */ | ||
1152 | |||
1153 | DBG_ERROR("sxg: %s EXIT status[%x] jiffies[%lx] cpu %d\n", __func__, | ||
1154 | status, jiffies, smp_processor_id()); | ||
1155 | return status; | ||
1156 | |||
1157 | err_out_unmap: | ||
1158 | sxg_free_resources(adapter); | ||
1159 | |||
1160 | err_out_free_mmio_region_2: | ||
1161 | |||
1162 | mmio_start = pci_resource_start(pcidev, 2); | ||
1163 | mmio_len = pci_resource_len(pcidev, 2); | ||
1164 | release_mem_region(mmio_start, mmio_len); | ||
1165 | |||
1166 | err_out_free_mmio_region_0: | ||
1167 | |||
1168 | mmio_start = pci_resource_start(pcidev, 0); | ||
1169 | mmio_len = pci_resource_len(pcidev, 0); | ||
1170 | |||
1171 | release_mem_region(mmio_start, mmio_len); | ||
1172 | |||
1173 | err_out_exit_sxg_probe: | ||
1174 | |||
1175 | DBG_ERROR("%s EXIT jiffies[%lx] cpu %d\n", __func__, jiffies, | ||
1176 | smp_processor_id()); | ||
1177 | |||
1178 | pci_disable_device(pcidev); | ||
1179 | DBG_ERROR("sxg: %s deallocate device\n", __func__); | ||
1180 | kfree(netdev); | ||
1181 | printk("Exit %s, Sxg driver loading failed..\n", __func__); | ||
1182 | |||
1183 | return -ENODEV; | ||
1184 | } | ||
1185 | |||
1186 | /* | ||
1187 | * LINE BASE Interrupt routines.. | ||
1188 | * | ||
1189 | * sxg_disable_interrupt | ||
1190 | * | ||
1191 | * DisableInterrupt Handler | ||
1192 | * | ||
1193 | * Arguments: | ||
1194 | * | ||
1195 | * adapter: Our adapter structure | ||
1196 | * | ||
1197 | * Return Value: | ||
1198 | * None. | ||
1199 | */ | ||
1200 | static void sxg_disable_interrupt(struct adapter_t *adapter) | ||
1201 | { | ||
1202 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DisIntr", | ||
1203 | adapter, adapter->InterruptsEnabled, 0, 0); | ||
1204 | /* For now, RSS is disabled with line based interrupts */ | ||
1205 | ASSERT(adapter->RssEnabled == FALSE); | ||
1206 | /* Turn off interrupts by writing to the icr register. */ | ||
1207 | WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_DISABLE), TRUE); | ||
1208 | |||
1209 | adapter->InterruptsEnabled = 0; | ||
1210 | |||
1211 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDisIntr", | ||
1212 | adapter, adapter->InterruptsEnabled, 0, 0); | ||
1213 | } | ||
1214 | |||
1215 | /* | ||
1216 | * sxg_enable_interrupt | ||
1217 | * | ||
1218 | * EnableInterrupt Handler | ||
1219 | * | ||
1220 | * Arguments: | ||
1221 | * | ||
1222 | * adapter: Our adapter structure | ||
1223 | * | ||
1224 | * Return Value: | ||
1225 | * None. | ||
1226 | */ | ||
1227 | static void sxg_enable_interrupt(struct adapter_t *adapter) | ||
1228 | { | ||
1229 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "EnIntr", | ||
1230 | adapter, adapter->InterruptsEnabled, 0, 0); | ||
1231 | /* For now, RSS is disabled with line based interrupts */ | ||
1232 | ASSERT(adapter->RssEnabled == FALSE); | ||
1233 | /* Turn on interrupts by writing to the icr register. */ | ||
1234 | WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_ENABLE), TRUE); | ||
1235 | |||
1236 | adapter->InterruptsEnabled = 1; | ||
1237 | |||
1238 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XEnIntr", | ||
1239 | adapter, 0, 0, 0); | ||
1240 | } | ||
1241 | |||
1242 | /* | ||
1243 | * sxg_isr - Process an line-based interrupt | ||
1244 | * | ||
1245 | * Arguments: | ||
1246 | * Context - Our adapter structure | ||
1247 | * QueueDefault - Output parameter to queue to default CPU | ||
1248 | * TargetCpus - Output bitmap to schedule DPC's | ||
1249 | * | ||
1250 | * Return Value: TRUE if our interrupt | ||
1251 | */ | ||
1252 | static irqreturn_t sxg_isr(int irq, void *dev_id) | ||
1253 | { | ||
1254 | struct net_device *dev = (struct net_device *) dev_id; | ||
1255 | struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev); | ||
1256 | |||
1257 | if(adapter->state != ADAPT_UP) | ||
1258 | return IRQ_NONE; | ||
1259 | adapter->Stats.NumInts++; | ||
1260 | if (adapter->Isr[0] == 0) { | ||
1261 | /* | ||
1262 | * The SLIC driver used to experience a number of spurious | ||
1263 | * interrupts due to the delay associated with the masking of | ||
1264 | * the interrupt (we'd bounce back in here). If we see that | ||
1265 | * again with Sahara,add a READ_REG of the Icr register after | ||
1266 | * the WRITE_REG below. | ||
1267 | */ | ||
1268 | adapter->Stats.FalseInts++; | ||
1269 | return IRQ_NONE; | ||
1270 | } | ||
1271 | /* | ||
1272 | * Move the Isr contents and clear the value in | ||
1273 | * shared memory, and mask interrupts | ||
1274 | */ | ||
1275 | /* ASSERT(adapter->IsrDpcsPending == 0); */ | ||
1276 | #if XXXTODO /* RSS Stuff */ | ||
1277 | /* | ||
1278 | * If RSS is enabled and the ISR specifies SXG_ISR_EVENT, then | ||
1279 | * schedule DPC's based on event queues. | ||
1280 | */ | ||
1281 | if (adapter->RssEnabled && (adapter->IsrCopy[0] & SXG_ISR_EVENT)) { | ||
1282 | for (i = 0; | ||
1283 | i < adapter->RssSystemInfo->ProcessorInfo.RssCpuCount; | ||
1284 | i++) { | ||
1285 | struct sxg_event_ring *EventRing = | ||
1286 | &adapter->EventRings[i]; | ||
1287 | struct sxg_event *Event = | ||
1288 | &EventRing->Ring[adapter->NextEvent[i]]; | ||
1289 | unsigned char Cpu = | ||
1290 | adapter->RssSystemInfo->RssIdToCpu[i]; | ||
1291 | if (Event->Status & EVENT_STATUS_VALID) { | ||
1292 | adapter->IsrDpcsPending++; | ||
1293 | CpuMask |= (1 << Cpu); | ||
1294 | } | ||
1295 | } | ||
1296 | } | ||
1297 | /* | ||
1298 | * Now, either schedule the CPUs specified by the CpuMask, | ||
1299 | * or queue default | ||
1300 | */ | ||
1301 | if (CpuMask) { | ||
1302 | *QueueDefault = FALSE; | ||
1303 | } else { | ||
1304 | adapter->IsrDpcsPending = 1; | ||
1305 | *QueueDefault = TRUE; | ||
1306 | } | ||
1307 | *TargetCpus = CpuMask; | ||
1308 | #endif | ||
1309 | sxg_interrupt(adapter); | ||
1310 | |||
1311 | return IRQ_HANDLED; | ||
1312 | } | ||
1313 | |||
1314 | static void sxg_interrupt(struct adapter_t *adapter) | ||
1315 | { | ||
1316 | WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_MASK), TRUE); | ||
1317 | |||
1318 | if (napi_schedule_prep(&adapter->napi)) { | ||
1319 | __napi_schedule(&adapter->napi); | ||
1320 | } | ||
1321 | } | ||
1322 | |||
1323 | static void sxg_handle_interrupt(struct adapter_t *adapter, int *work_done, | ||
1324 | int budget) | ||
1325 | { | ||
1326 | /* unsigned char RssId = 0; */ | ||
1327 | u32 NewIsr; | ||
1328 | int sxg_napi_continue = 1; | ||
1329 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "HndlIntr", | ||
1330 | adapter, adapter->IsrCopy[0], 0, 0); | ||
1331 | /* For now, RSS is disabled with line based interrupts */ | ||
1332 | ASSERT(adapter->RssEnabled == FALSE); | ||
1333 | |||
1334 | adapter->IsrCopy[0] = adapter->Isr[0]; | ||
1335 | adapter->Isr[0] = 0; | ||
1336 | |||
1337 | /* Always process the event queue. */ | ||
1338 | while (sxg_napi_continue) | ||
1339 | { | ||
1340 | sxg_process_event_queue(adapter, | ||
1341 | (adapter->RssEnabled ? /*RssId */ 0 : 0), | ||
1342 | &sxg_napi_continue, work_done, budget); | ||
1343 | } | ||
1344 | |||
1345 | #if XXXTODO /* RSS stuff */ | ||
1346 | if (--adapter->IsrDpcsPending) { | ||
1347 | /* We're done. */ | ||
1348 | ASSERT(adapter->RssEnabled); | ||
1349 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DPCsPend", | ||
1350 | adapter, 0, 0, 0); | ||
1351 | return; | ||
1352 | } | ||
1353 | #endif | ||
1354 | /* Last (or only) DPC processes the ISR and clears the interrupt. */ | ||
1355 | NewIsr = sxg_process_isr(adapter, 0); | ||
1356 | /* Reenable interrupts */ | ||
1357 | adapter->IsrCopy[0] = 0; | ||
1358 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ClearIsr", | ||
1359 | adapter, NewIsr, 0, 0); | ||
1360 | |||
1361 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XHndlInt", | ||
1362 | adapter, 0, 0, 0); | ||
1363 | } | ||
1364 | static int sxg_poll(struct napi_struct *napi, int budget) | ||
1365 | { | ||
1366 | struct adapter_t *adapter = container_of(napi, struct adapter_t, napi); | ||
1367 | int work_done = 0; | ||
1368 | |||
1369 | sxg_handle_interrupt(adapter, &work_done, budget); | ||
1370 | |||
1371 | if (work_done < budget) { | ||
1372 | napi_complete(napi); | ||
1373 | WRITE_REG(adapter->UcodeRegs[0].Isr, 0, TRUE); | ||
1374 | } | ||
1375 | return work_done; | ||
1376 | } | ||
1377 | |||
1378 | /* | ||
1379 | * sxg_process_isr - Process an interrupt. Called from the line-based and | ||
1380 | * message based interrupt DPC routines | ||
1381 | * | ||
1382 | * Arguments: | ||
1383 | * adapter - Our adapter structure | ||
1384 | * Queue - The ISR that needs processing | ||
1385 | * | ||
1386 | * Return Value: | ||
1387 | * None | ||
1388 | */ | ||
1389 | static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId) | ||
1390 | { | ||
1391 | u32 Isr = adapter->IsrCopy[MessageId]; | ||
1392 | u32 NewIsr = 0; | ||
1393 | |||
1394 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ProcIsr", | ||
1395 | adapter, Isr, 0, 0); | ||
1396 | |||
1397 | /* Error */ | ||
1398 | if (Isr & SXG_ISR_ERR) { | ||
1399 | if (Isr & SXG_ISR_PDQF) { | ||
1400 | adapter->Stats.PdqFull++; | ||
1401 | DBG_ERROR("%s: SXG_ISR_ERR PDQF!!\n", __func__); | ||
1402 | } | ||
1403 | /* No host buffer */ | ||
1404 | if (Isr & SXG_ISR_RMISS) { | ||
1405 | /* | ||
1406 | * There is a bunch of code in the SLIC driver which | ||
1407 | * attempts to process more receive events per DPC | ||
1408 | * if we start to fall behind. We'll probablyd | ||
1409 | * need to do something similar here, but hold | ||
1410 | * off for now. I don't want to make the code more | ||
1411 | * complicated than strictly needed. | ||
1412 | */ | ||
1413 | adapter->stats.rx_missed_errors++; | ||
1414 | if (adapter->stats.rx_missed_errors< 5) { | ||
1415 | DBG_ERROR("%s: SXG_ISR_ERR RMISS!!\n", | ||
1416 | __func__); | ||
1417 | } | ||
1418 | } | ||
1419 | /* Card crash */ | ||
1420 | if (Isr & SXG_ISR_DEAD) { | ||
1421 | /* | ||
1422 | * Set aside the crash info and set the adapter state | ||
1423 | * to RESET | ||
1424 | */ | ||
1425 | adapter->CrashCpu = (unsigned char) | ||
1426 | ((Isr & SXG_ISR_CPU) >> SXG_ISR_CPU_SHIFT); | ||
1427 | adapter->CrashLocation = (ushort) (Isr & SXG_ISR_CRASH); | ||
1428 | adapter->Dead = TRUE; | ||
1429 | DBG_ERROR("%s: ISR_DEAD %x, CPU: %d\n", __func__, | ||
1430 | adapter->CrashLocation, adapter->CrashCpu); | ||
1431 | } | ||
1432 | /* Event ring full */ | ||
1433 | if (Isr & SXG_ISR_ERFULL) { | ||
1434 | /* | ||
1435 | * Same issue as RMISS, really. This means the | ||
1436 | * host is falling behind the card. Need to increase | ||
1437 | * event ring size, process more events per interrupt, | ||
1438 | * and/or reduce/remove interrupt aggregation. | ||
1439 | */ | ||
1440 | adapter->Stats.EventRingFull++; | ||
1441 | DBG_ERROR("%s: SXG_ISR_ERR EVENT RING FULL!!\n", | ||
1442 | __func__); | ||
1443 | } | ||
1444 | /* Transmit drop - no DRAM buffers or XMT error */ | ||
1445 | if (Isr & SXG_ISR_XDROP) { | ||
1446 | DBG_ERROR("%s: SXG_ISR_ERR XDROP!!\n", __func__); | ||
1447 | } | ||
1448 | } | ||
1449 | /* Slowpath send completions */ | ||
1450 | if (Isr & SXG_ISR_SPSEND) { | ||
1451 | sxg_complete_slow_send(adapter); | ||
1452 | } | ||
1453 | /* Dump */ | ||
1454 | if (Isr & SXG_ISR_UPC) { | ||
1455 | /* Maybe change when debug is added.. */ | ||
1456 | // ASSERT(adapter->DumpCmdRunning); | ||
1457 | adapter->DumpCmdRunning = FALSE; | ||
1458 | } | ||
1459 | /* Link event */ | ||
1460 | if (Isr & SXG_ISR_LINK) { | ||
1461 | if (adapter->state != ADAPT_DOWN) { | ||
1462 | adapter->link_status_changed = 1; | ||
1463 | schedule_work(&adapter->update_link_status); | ||
1464 | } | ||
1465 | } | ||
1466 | /* Debug - breakpoint hit */ | ||
1467 | if (Isr & SXG_ISR_BREAK) { | ||
1468 | /* | ||
1469 | * At the moment AGDB isn't written to support interactive | ||
1470 | * debug sessions. When it is, this interrupt will be used to | ||
1471 | * signal AGDB that it has hit a breakpoint. For now, ASSERT. | ||
1472 | */ | ||
1473 | ASSERT(0); | ||
1474 | } | ||
1475 | /* Heartbeat response */ | ||
1476 | if (Isr & SXG_ISR_PING) { | ||
1477 | adapter->PingOutstanding = FALSE; | ||
1478 | } | ||
1479 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XProcIsr", | ||
1480 | adapter, Isr, NewIsr, 0); | ||
1481 | |||
1482 | return (NewIsr); | ||
1483 | } | ||
1484 | |||
1485 | /* | ||
1486 | * sxg_rcv_checksum - Set the checksum for received packet | ||
1487 | * | ||
1488 | * Arguements: | ||
1489 | * @adapter - Adapter structure on which packet is received | ||
1490 | * @skb - Packet which is receieved | ||
1491 | * @Event - Event read from hardware | ||
1492 | */ | ||
1493 | |||
1494 | void sxg_rcv_checksum(struct adapter_t *adapter, struct sk_buff *skb, | ||
1495 | struct sxg_event *Event) | ||
1496 | { | ||
1497 | skb->ip_summed = CHECKSUM_NONE; | ||
1498 | if (likely(adapter->flags & SXG_RCV_IP_CSUM_ENABLED)) { | ||
1499 | if (likely(adapter->flags & SXG_RCV_TCP_CSUM_ENABLED) | ||
1500 | && (Event->Status & EVENT_STATUS_TCPIP)) { | ||
1501 | if(!(Event->Status & EVENT_STATUS_TCPBAD)) | ||
1502 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
1503 | if(!(Event->Status & EVENT_STATUS_IPBAD)) | ||
1504 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
1505 | } else if(Event->Status & EVENT_STATUS_IPONLY) { | ||
1506 | if(!(Event->Status & EVENT_STATUS_IPBAD)) | ||
1507 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
1508 | } | ||
1509 | } | ||
1510 | } | ||
1511 | |||
1512 | /* | ||
1513 | * sxg_process_event_queue - Process our event queue | ||
1514 | * | ||
1515 | * Arguments: | ||
1516 | * - adapter - Adapter structure | ||
1517 | * - RssId - The event queue requiring processing | ||
1518 | * | ||
1519 | * Return Value: | ||
1520 | * None. | ||
1521 | */ | ||
1522 | static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId, | ||
1523 | int *sxg_napi_continue, int *work_done, int budget) | ||
1524 | { | ||
1525 | struct sxg_event_ring *EventRing = &adapter->EventRings[RssId]; | ||
1526 | struct sxg_event *Event = &EventRing->Ring[adapter->NextEvent[RssId]]; | ||
1527 | u32 EventsProcessed = 0, Batches = 0; | ||
1528 | struct sk_buff *skb; | ||
1529 | #ifdef LINUX_HANDLES_RCV_INDICATION_LISTS | ||
1530 | struct sk_buff *prev_skb = NULL; | ||
1531 | struct sk_buff *IndicationList[SXG_RCV_ARRAYSIZE]; | ||
1532 | u32 Index; | ||
1533 | struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr; | ||
1534 | #endif | ||
1535 | u32 ReturnStatus = 0; | ||
1536 | int sxg_rcv_data_buffers = SXG_RCV_DATA_BUFFERS; | ||
1537 | |||
1538 | ASSERT((adapter->State == SXG_STATE_RUNNING) || | ||
1539 | (adapter->State == SXG_STATE_PAUSING) || | ||
1540 | (adapter->State == SXG_STATE_PAUSED) || | ||
1541 | (adapter->State == SXG_STATE_HALTING)); | ||
1542 | /* | ||
1543 | * We may still have unprocessed events on the queue if | ||
1544 | * the card crashed. Don't process them. | ||
1545 | */ | ||
1546 | if (adapter->Dead) { | ||
1547 | return (0); | ||
1548 | } | ||
1549 | /* | ||
1550 | * In theory there should only be a single processor that | ||
1551 | * accesses this queue, and only at interrupt-DPC time. So/ | ||
1552 | * we shouldn't need a lock for any of this. | ||
1553 | */ | ||
1554 | while (Event->Status & EVENT_STATUS_VALID) { | ||
1555 | (*sxg_napi_continue) = 1; | ||
1556 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "Event", | ||
1557 | Event, Event->Code, Event->Status, | ||
1558 | adapter->NextEvent); | ||
1559 | switch (Event->Code) { | ||
1560 | case EVENT_CODE_BUFFERS: | ||
1561 | /* struct sxg_ring_info Head & Tail == unsigned char */ | ||
1562 | ASSERT(!(Event->CommandIndex & 0xFF00)); | ||
1563 | sxg_complete_descriptor_blocks(adapter, | ||
1564 | Event->CommandIndex); | ||
1565 | break; | ||
1566 | case EVENT_CODE_SLOWRCV: | ||
1567 | (*work_done)++; | ||
1568 | --adapter->RcvBuffersOnCard; | ||
1569 | if ((skb = sxg_slow_receive(adapter, Event))) { | ||
1570 | u32 rx_bytes; | ||
1571 | #ifdef LINUX_HANDLES_RCV_INDICATION_LISTS | ||
1572 | /* Add it to our indication list */ | ||
1573 | SXG_ADD_RCV_PACKET(adapter, skb, prev_skb, | ||
1574 | IndicationList, num_skbs); | ||
1575 | /* | ||
1576 | * Linux, we just pass up each skb to the | ||
1577 | * protocol above at this point, there is no | ||
1578 | * capability of an indication list. | ||
1579 | */ | ||
1580 | #else | ||
1581 | /* CHECK skb_pull(skb, INIC_RCVBUF_HEADSIZE); */ | ||
1582 | /* (rcvbuf->length & IRHDDR_FLEN_MSK); */ | ||
1583 | rx_bytes = Event->Length; | ||
1584 | adapter->stats.rx_packets++; | ||
1585 | adapter->stats.rx_bytes += rx_bytes; | ||
1586 | sxg_rcv_checksum(adapter, skb, Event); | ||
1587 | skb->dev = adapter->netdev; | ||
1588 | netif_receive_skb(skb); | ||
1589 | #endif | ||
1590 | } | ||
1591 | break; | ||
1592 | default: | ||
1593 | DBG_ERROR("%s: ERROR Invalid EventCode %d\n", | ||
1594 | __func__, Event->Code); | ||
1595 | /* ASSERT(0); */ | ||
1596 | } | ||
1597 | /* | ||
1598 | * See if we need to restock card receive buffers. | ||
1599 | * There are two things to note here: | ||
1600 | * First - This test is not SMP safe. The | ||
1601 | * adapter->BuffersOnCard field is protected via atomic | ||
1602 | * interlocked calls, but we do not protect it with respect | ||
1603 | * to these tests. The only way to do that is with a lock, | ||
1604 | * and I don't want to grab a lock every time we adjust the | ||
1605 | * BuffersOnCard count. Instead, we allow the buffer | ||
1606 | * replenishment to be off once in a while. The worst that | ||
1607 | * can happen is the card is given on more-or-less descriptor | ||
1608 | * block than the arbitrary value we've chosen. No big deal | ||
1609 | * In short DO NOT ADD A LOCK HERE, OR WHERE RcvBuffersOnCard | ||
1610 | * is adjusted. | ||
1611 | * Second - We expect this test to rarely | ||
1612 | * evaluate to true. We attempt to refill descriptor blocks | ||
1613 | * as they are returned to us (sxg_complete_descriptor_blocks) | ||
1614 | * so The only time this should evaluate to true is when | ||
1615 | * sxg_complete_descriptor_blocks failed to allocate | ||
1616 | * receive buffers. | ||
1617 | */ | ||
1618 | if (adapter->JumboEnabled) | ||
1619 | sxg_rcv_data_buffers = SXG_JUMBO_RCV_DATA_BUFFERS; | ||
1620 | |||
1621 | if (adapter->RcvBuffersOnCard < sxg_rcv_data_buffers) { | ||
1622 | sxg_stock_rcv_buffers(adapter); | ||
1623 | } | ||
1624 | /* | ||
1625 | * It's more efficient to just set this to zero. | ||
1626 | * But clearing the top bit saves potential debug info... | ||
1627 | */ | ||
1628 | Event->Status &= ~EVENT_STATUS_VALID; | ||
1629 | /* Advance to the next event */ | ||
1630 | SXG_ADVANCE_INDEX(adapter->NextEvent[RssId], EVENT_RING_SIZE); | ||
1631 | Event = &EventRing->Ring[adapter->NextEvent[RssId]]; | ||
1632 | EventsProcessed++; | ||
1633 | if (EventsProcessed == EVENT_RING_BATCH) { | ||
1634 | /* Release a batch of events back to the card */ | ||
1635 | WRITE_REG(adapter->UcodeRegs[RssId].EventRelease, | ||
1636 | EVENT_RING_BATCH, FALSE); | ||
1637 | EventsProcessed = 0; | ||
1638 | /* | ||
1639 | * If we've processed our batch limit, break out of the | ||
1640 | * loop and return SXG_ISR_EVENT to arrange for us to | ||
1641 | * be called again | ||
1642 | */ | ||
1643 | if (Batches++ == EVENT_BATCH_LIMIT) { | ||
1644 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, | ||
1645 | TRACE_NOISY, "EvtLimit", Batches, | ||
1646 | adapter->NextEvent, 0, 0); | ||
1647 | ReturnStatus = SXG_ISR_EVENT; | ||
1648 | break; | ||
1649 | } | ||
1650 | } | ||
1651 | if (*work_done >= budget) { | ||
1652 | WRITE_REG(adapter->UcodeRegs[RssId].EventRelease, | ||
1653 | EventsProcessed, FALSE); | ||
1654 | EventsProcessed = 0; | ||
1655 | (*sxg_napi_continue) = 0; | ||
1656 | break; | ||
1657 | } | ||
1658 | } | ||
1659 | if (!(Event->Status & EVENT_STATUS_VALID)) | ||
1660 | (*sxg_napi_continue) = 0; | ||
1661 | |||
1662 | #ifdef LINUX_HANDLES_RCV_INDICATION_LISTS | ||
1663 | /* Indicate any received dumb-nic frames */ | ||
1664 | SXG_INDICATE_PACKETS(adapter, IndicationList, num_skbs); | ||
1665 | #endif | ||
1666 | /* Release events back to the card. */ | ||
1667 | if (EventsProcessed) { | ||
1668 | WRITE_REG(adapter->UcodeRegs[RssId].EventRelease, | ||
1669 | EventsProcessed, FALSE); | ||
1670 | } | ||
1671 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XPrcEvnt", | ||
1672 | Batches, EventsProcessed, adapter->NextEvent, num_skbs); | ||
1673 | |||
1674 | return (ReturnStatus); | ||
1675 | } | ||
1676 | |||
1677 | /* | ||
1678 | * sxg_complete_slow_send - Complete slowpath or dumb-nic sends | ||
1679 | * | ||
1680 | * Arguments - | ||
1681 | * adapter - A pointer to our adapter structure | ||
1682 | * Return | ||
1683 | * None | ||
1684 | */ | ||
1685 | static void sxg_complete_slow_send(struct adapter_t *adapter) | ||
1686 | { | ||
1687 | struct sxg_xmt_ring *XmtRing = &adapter->XmtRings[0]; | ||
1688 | struct sxg_ring_info *XmtRingInfo = &adapter->XmtRingZeroInfo; | ||
1689 | u32 *ContextType; | ||
1690 | struct sxg_cmd *XmtCmd; | ||
1691 | unsigned long flags = 0; | ||
1692 | unsigned long sgl_flags = 0; | ||
1693 | unsigned int processed_count = 0; | ||
1694 | |||
1695 | /* | ||
1696 | * NOTE - This lock is dropped and regrabbed in this loop. | ||
1697 | * This means two different processors can both be running/ | ||
1698 | * through this loop. Be *very* careful. | ||
1699 | */ | ||
1700 | spin_lock_irqsave(&adapter->XmtZeroLock, flags); | ||
1701 | |||
1702 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnds", | ||
1703 | adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0); | ||
1704 | |||
1705 | while ((XmtRingInfo->Tail != *adapter->XmtRingZeroIndex) | ||
1706 | && processed_count++ < SXG_COMPLETE_SLOW_SEND_LIMIT) { | ||
1707 | /* | ||
1708 | * Locate the current Cmd (ring descriptor entry), and | ||
1709 | * associated SGL, and advance the tail | ||
1710 | */ | ||
1711 | SXG_RETURN_CMD(XmtRing, XmtRingInfo, XmtCmd, ContextType); | ||
1712 | ASSERT(ContextType); | ||
1713 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnd", | ||
1714 | XmtRingInfo->Head, XmtRingInfo->Tail, XmtCmd, 0); | ||
1715 | /* Clear the SGL field. */ | ||
1716 | XmtCmd->Sgl = 0; | ||
1717 | |||
1718 | switch (*ContextType) { | ||
1719 | case SXG_SGL_DUMB: | ||
1720 | { | ||
1721 | struct sk_buff *skb; | ||
1722 | struct sxg_scatter_gather *SxgSgl = | ||
1723 | (struct sxg_scatter_gather *)ContextType; | ||
1724 | dma64_addr_t FirstSgeAddress; | ||
1725 | u32 FirstSgeLength; | ||
1726 | |||
1727 | /* Dumb-nic send. Command context is the dumb-nic SGL */ | ||
1728 | skb = (struct sk_buff *)ContextType; | ||
1729 | skb = SxgSgl->DumbPacket; | ||
1730 | FirstSgeAddress = XmtCmd->Buffer.FirstSgeAddress; | ||
1731 | FirstSgeLength = XmtCmd->Buffer.FirstSgeLength; | ||
1732 | /* Complete the send */ | ||
1733 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, | ||
1734 | TRACE_IMPORTANT, "DmSndCmp", skb, 0, | ||
1735 | 0, 0); | ||
1736 | ASSERT(adapter->Stats.XmtQLen); | ||
1737 | /* | ||
1738 | * Now drop the lock and complete the send | ||
1739 | * back to Microsoft. We need to drop the lock | ||
1740 | * because Microsoft can come back with a | ||
1741 | * chimney send, which results in a double trip | ||
1742 | * in SxgTcpOuput | ||
1743 | */ | ||
1744 | spin_unlock_irqrestore( | ||
1745 | &adapter->XmtZeroLock, flags); | ||
1746 | |||
1747 | SxgSgl->DumbPacket = NULL; | ||
1748 | SXG_COMPLETE_DUMB_SEND(adapter, skb, | ||
1749 | FirstSgeAddress, | ||
1750 | FirstSgeLength); | ||
1751 | SXG_FREE_SGL_BUFFER(adapter, SxgSgl, NULL); | ||
1752 | /* and reacquire.. */ | ||
1753 | spin_lock_irqsave(&adapter->XmtZeroLock, flags); | ||
1754 | } | ||
1755 | break; | ||
1756 | default: | ||
1757 | ASSERT(0); | ||
1758 | } | ||
1759 | } | ||
1760 | spin_unlock_irqrestore(&adapter->XmtZeroLock, flags); | ||
1761 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnd", | ||
1762 | adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0); | ||
1763 | } | ||
1764 | |||
1765 | /* | ||
1766 | * sxg_slow_receive | ||
1767 | * | ||
1768 | * Arguments - | ||
1769 | * adapter - A pointer to our adapter structure | ||
1770 | * Event - Receive event | ||
1771 | * | ||
1772 | * Return - skb | ||
1773 | */ | ||
1774 | static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter, | ||
1775 | struct sxg_event *Event) | ||
1776 | { | ||
1777 | u32 BufferSize = adapter->ReceiveBufferSize; | ||
1778 | struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr; | ||
1779 | struct sk_buff *Packet; | ||
1780 | static int read_counter = 0; | ||
1781 | |||
1782 | RcvDataBufferHdr = (struct sxg_rcv_data_buffer_hdr *) Event->HostHandle; | ||
1783 | if(read_counter++ & 0x100) | ||
1784 | { | ||
1785 | sxg_collect_statistics(adapter); | ||
1786 | read_counter = 0; | ||
1787 | } | ||
1788 | ASSERT(RcvDataBufferHdr); | ||
1789 | ASSERT(RcvDataBufferHdr->State == SXG_BUFFER_ONCARD); | ||
1790 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "SlowRcv", Event, | ||
1791 | RcvDataBufferHdr, RcvDataBufferHdr->State, | ||
1792 | /*RcvDataBufferHdr->VirtualAddress*/ 0); | ||
1793 | /* Drop rcv frames in non-running state */ | ||
1794 | switch (adapter->State) { | ||
1795 | case SXG_STATE_RUNNING: | ||
1796 | break; | ||
1797 | case SXG_STATE_PAUSING: | ||
1798 | case SXG_STATE_PAUSED: | ||
1799 | case SXG_STATE_HALTING: | ||
1800 | goto drop; | ||
1801 | default: | ||
1802 | ASSERT(0); | ||
1803 | goto drop; | ||
1804 | } | ||
1805 | |||
1806 | /* | ||
1807 | * memcpy(SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr), | ||
1808 | * RcvDataBufferHdr->VirtualAddress, Event->Length); | ||
1809 | */ | ||
1810 | |||
1811 | /* Change buffer state to UPSTREAM */ | ||
1812 | RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM; | ||
1813 | if (Event->Status & EVENT_STATUS_RCVERR) { | ||
1814 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RcvError", | ||
1815 | Event, Event->Status, Event->HostHandle, 0); | ||
1816 | sxg_process_rcv_error(adapter, *(u32 *) | ||
1817 | SXG_RECEIVE_DATA_LOCATION | ||
1818 | (RcvDataBufferHdr)); | ||
1819 | goto drop; | ||
1820 | } | ||
1821 | #if XXXTODO /* VLAN stuff */ | ||
1822 | /* If there's a VLAN tag, extract it and validate it */ | ||
1823 | if (((struct ether_header *) | ||
1824 | (SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr)))->EtherType | ||
1825 | == ETHERTYPE_VLAN) { | ||
1826 | if (SxgExtractVlanHeader(adapter, RcvDataBufferHdr, Event) != | ||
1827 | STATUS_SUCCESS) { | ||
1828 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, | ||
1829 | "BadVlan", Event, | ||
1830 | SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr), | ||
1831 | Event->Length, 0); | ||
1832 | goto drop; | ||
1833 | } | ||
1834 | } | ||
1835 | #endif | ||
1836 | /* Dumb-nic frame. See if it passes our mac filter and update stats */ | ||
1837 | |||
1838 | if (!sxg_mac_filter(adapter, | ||
1839 | (struct ether_header *)(SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr)), | ||
1840 | Event->Length)) { | ||
1841 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RcvFiltr", | ||
1842 | Event, SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr), | ||
1843 | Event->Length, 0); | ||
1844 | goto drop; | ||
1845 | } | ||
1846 | |||
1847 | Packet = RcvDataBufferHdr->SxgDumbRcvPacket; | ||
1848 | SXG_ADJUST_RCV_PACKET(Packet, RcvDataBufferHdr, Event); | ||
1849 | Packet->protocol = eth_type_trans(Packet, adapter->netdev); | ||
1850 | |||
1851 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumbRcv", | ||
1852 | RcvDataBufferHdr, Packet, Event->Length, 0); | ||
1853 | /* Lastly adjust the receive packet length. */ | ||
1854 | RcvDataBufferHdr->SxgDumbRcvPacket = NULL; | ||
1855 | RcvDataBufferHdr->PhysicalAddress = (dma_addr_t)NULL; | ||
1856 | SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr, BufferSize); | ||
1857 | if (RcvDataBufferHdr->skb) | ||
1858 | { | ||
1859 | spin_lock(&adapter->RcvQLock); | ||
1860 | SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr); | ||
1861 | // adapter->RcvBuffersOnCard ++; | ||
1862 | spin_unlock(&adapter->RcvQLock); | ||
1863 | } | ||
1864 | return (Packet); | ||
1865 | |||
1866 | drop: | ||
1867 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DropRcv", | ||
1868 | RcvDataBufferHdr, Event->Length, 0, 0); | ||
1869 | adapter->stats.rx_dropped++; | ||
1870 | // adapter->Stats.RcvDiscards++; | ||
1871 | spin_lock(&adapter->RcvQLock); | ||
1872 | SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr); | ||
1873 | spin_unlock(&adapter->RcvQLock); | ||
1874 | return (NULL); | ||
1875 | } | ||
1876 | |||
1877 | /* | ||
1878 | * sxg_process_rcv_error - process receive error and update | ||
1879 | * stats | ||
1880 | * | ||
1881 | * Arguments: | ||
1882 | * adapter - Adapter structure | ||
1883 | * ErrorStatus - 4-byte receive error status | ||
1884 | * | ||
1885 | * Return Value : None | ||
1886 | */ | ||
1887 | static void sxg_process_rcv_error(struct adapter_t *adapter, u32 ErrorStatus) | ||
1888 | { | ||
1889 | u32 Error; | ||
1890 | |||
1891 | adapter->stats.rx_errors++; | ||
1892 | |||
1893 | if (ErrorStatus & SXG_RCV_STATUS_TRANSPORT_ERROR) { | ||
1894 | Error = ErrorStatus & SXG_RCV_STATUS_TRANSPORT_MASK; | ||
1895 | switch (Error) { | ||
1896 | case SXG_RCV_STATUS_TRANSPORT_CSUM: | ||
1897 | adapter->Stats.TransportCsum++; | ||
1898 | break; | ||
1899 | case SXG_RCV_STATUS_TRANSPORT_UFLOW: | ||
1900 | adapter->Stats.TransportUflow++; | ||
1901 | break; | ||
1902 | case SXG_RCV_STATUS_TRANSPORT_HDRLEN: | ||
1903 | adapter->Stats.TransportHdrLen++; | ||
1904 | break; | ||
1905 | } | ||
1906 | } | ||
1907 | if (ErrorStatus & SXG_RCV_STATUS_NETWORK_ERROR) { | ||
1908 | Error = ErrorStatus & SXG_RCV_STATUS_NETWORK_MASK; | ||
1909 | switch (Error) { | ||
1910 | case SXG_RCV_STATUS_NETWORK_CSUM: | ||
1911 | adapter->Stats.NetworkCsum++; | ||
1912 | break; | ||
1913 | case SXG_RCV_STATUS_NETWORK_UFLOW: | ||
1914 | adapter->Stats.NetworkUflow++; | ||
1915 | break; | ||
1916 | case SXG_RCV_STATUS_NETWORK_HDRLEN: | ||
1917 | adapter->Stats.NetworkHdrLen++; | ||
1918 | break; | ||
1919 | } | ||
1920 | } | ||
1921 | if (ErrorStatus & SXG_RCV_STATUS_PARITY) { | ||
1922 | adapter->Stats.Parity++; | ||
1923 | } | ||
1924 | if (ErrorStatus & SXG_RCV_STATUS_LINK_ERROR) { | ||
1925 | Error = ErrorStatus & SXG_RCV_STATUS_LINK_MASK; | ||
1926 | switch (Error) { | ||
1927 | case SXG_RCV_STATUS_LINK_PARITY: | ||
1928 | adapter->Stats.LinkParity++; | ||
1929 | break; | ||
1930 | case SXG_RCV_STATUS_LINK_EARLY: | ||
1931 | adapter->Stats.LinkEarly++; | ||
1932 | break; | ||
1933 | case SXG_RCV_STATUS_LINK_BUFOFLOW: | ||
1934 | adapter->Stats.LinkBufOflow++; | ||
1935 | break; | ||
1936 | case SXG_RCV_STATUS_LINK_CODE: | ||
1937 | adapter->Stats.LinkCode++; | ||
1938 | break; | ||
1939 | case SXG_RCV_STATUS_LINK_DRIBBLE: | ||
1940 | adapter->Stats.LinkDribble++; | ||
1941 | break; | ||
1942 | case SXG_RCV_STATUS_LINK_CRC: | ||
1943 | adapter->Stats.LinkCrc++; | ||
1944 | break; | ||
1945 | case SXG_RCV_STATUS_LINK_OFLOW: | ||
1946 | adapter->Stats.LinkOflow++; | ||
1947 | break; | ||
1948 | case SXG_RCV_STATUS_LINK_UFLOW: | ||
1949 | adapter->Stats.LinkUflow++; | ||
1950 | break; | ||
1951 | } | ||
1952 | } | ||
1953 | } | ||
1954 | |||
1955 | /* | ||
1956 | * sxg_mac_filter | ||
1957 | * | ||
1958 | * Arguments: | ||
1959 | * adapter - Adapter structure | ||
1960 | * pether - Ethernet header | ||
1961 | * length - Frame length | ||
1962 | * | ||
1963 | * Return Value : TRUE if the frame is to be allowed | ||
1964 | */ | ||
1965 | static bool sxg_mac_filter(struct adapter_t *adapter, | ||
1966 | struct ether_header *EtherHdr, ushort length) | ||
1967 | { | ||
1968 | bool EqualAddr; | ||
1969 | struct net_device *dev = adapter->netdev; | ||
1970 | |||
1971 | if (SXG_MULTICAST_PACKET(EtherHdr)) { | ||
1972 | if (SXG_BROADCAST_PACKET(EtherHdr)) { | ||
1973 | /* broadcast */ | ||
1974 | if (adapter->MacFilter & MAC_BCAST) { | ||
1975 | adapter->Stats.DumbRcvBcastPkts++; | ||
1976 | adapter->Stats.DumbRcvBcastBytes += length; | ||
1977 | return (TRUE); | ||
1978 | } | ||
1979 | } else { | ||
1980 | /* multicast */ | ||
1981 | if (adapter->MacFilter & MAC_ALLMCAST) { | ||
1982 | adapter->Stats.DumbRcvMcastPkts++; | ||
1983 | adapter->Stats.DumbRcvMcastBytes += length; | ||
1984 | return (TRUE); | ||
1985 | } | ||
1986 | if (adapter->MacFilter & MAC_MCAST) { | ||
1987 | struct dev_mc_list *mclist = dev->mc_list; | ||
1988 | while (mclist) { | ||
1989 | ETHER_EQ_ADDR(mclist->da_addr, | ||
1990 | EtherHdr->ether_dhost, | ||
1991 | EqualAddr); | ||
1992 | if (EqualAddr) { | ||
1993 | adapter->Stats. | ||
1994 | DumbRcvMcastPkts++; | ||
1995 | adapter->Stats. | ||
1996 | DumbRcvMcastBytes += length; | ||
1997 | return (TRUE); | ||
1998 | } | ||
1999 | mclist = mclist->next; | ||
2000 | } | ||
2001 | } | ||
2002 | } | ||
2003 | } else if (adapter->MacFilter & MAC_DIRECTED) { | ||
2004 | /* | ||
2005 | * Not broadcast or multicast. Must be directed at us or | ||
2006 | * the card is in promiscuous mode. Either way, consider it | ||
2007 | * ours if MAC_DIRECTED is set | ||
2008 | */ | ||
2009 | adapter->Stats.DumbRcvUcastPkts++; | ||
2010 | adapter->Stats.DumbRcvUcastBytes += length; | ||
2011 | return (TRUE); | ||
2012 | } | ||
2013 | if (adapter->MacFilter & MAC_PROMISC) { | ||
2014 | /* Whatever it is, keep it. */ | ||
2015 | return (TRUE); | ||
2016 | } | ||
2017 | return (FALSE); | ||
2018 | } | ||
2019 | |||
2020 | static int sxg_register_interrupt(struct adapter_t *adapter) | ||
2021 | { | ||
2022 | if (!adapter->intrregistered) { | ||
2023 | int retval; | ||
2024 | |||
2025 | DBG_ERROR | ||
2026 | ("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x] %x\n", | ||
2027 | __func__, adapter, adapter->netdev->irq, NR_IRQS); | ||
2028 | |||
2029 | spin_unlock_irqrestore(&sxg_global.driver_lock, | ||
2030 | sxg_global.flags); | ||
2031 | |||
2032 | retval = request_irq(adapter->netdev->irq, | ||
2033 | &sxg_isr, | ||
2034 | IRQF_SHARED, | ||
2035 | adapter->netdev->name, adapter->netdev); | ||
2036 | |||
2037 | spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags); | ||
2038 | |||
2039 | if (retval) { | ||
2040 | DBG_ERROR("sxg: request_irq (%s) FAILED [%x]\n", | ||
2041 | adapter->netdev->name, retval); | ||
2042 | return (retval); | ||
2043 | } | ||
2044 | adapter->intrregistered = 1; | ||
2045 | adapter->IntRegistered = TRUE; | ||
2046 | /* Disable RSS with line-based interrupts */ | ||
2047 | adapter->RssEnabled = FALSE; | ||
2048 | DBG_ERROR("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x]\n", | ||
2049 | __func__, adapter, adapter->netdev->irq); | ||
2050 | } | ||
2051 | return (STATUS_SUCCESS); | ||
2052 | } | ||
2053 | |||
2054 | static void sxg_deregister_interrupt(struct adapter_t *adapter) | ||
2055 | { | ||
2056 | DBG_ERROR("sxg: %s ENTER adapter[%p]\n", __func__, adapter); | ||
2057 | #if XXXTODO | ||
2058 | slic_init_cleanup(adapter); | ||
2059 | #endif | ||
2060 | memset(&adapter->stats, 0, sizeof(struct net_device_stats)); | ||
2061 | adapter->error_interrupts = 0; | ||
2062 | adapter->rcv_interrupts = 0; | ||
2063 | adapter->xmit_interrupts = 0; | ||
2064 | adapter->linkevent_interrupts = 0; | ||
2065 | adapter->upr_interrupts = 0; | ||
2066 | adapter->num_isrs = 0; | ||
2067 | adapter->xmit_completes = 0; | ||
2068 | adapter->rcv_broadcasts = 0; | ||
2069 | adapter->rcv_multicasts = 0; | ||
2070 | adapter->rcv_unicasts = 0; | ||
2071 | DBG_ERROR("sxg: %s EXIT\n", __func__); | ||
2072 | } | ||
2073 | |||
2074 | /* | ||
2075 | * sxg_if_init | ||
2076 | * | ||
2077 | * Perform initialization of our slic interface. | ||
2078 | * | ||
2079 | */ | ||
2080 | static int sxg_if_init(struct adapter_t *adapter) | ||
2081 | { | ||
2082 | struct net_device *dev = adapter->netdev; | ||
2083 | int status = 0; | ||
2084 | |||
2085 | DBG_ERROR("sxg: %s (%s) ENTER states[%d:%d] flags[%x]\n", | ||
2086 | __func__, adapter->netdev->name, | ||
2087 | adapter->state, | ||
2088 | adapter->linkstate, dev->flags); | ||
2089 | |||
2090 | /* adapter should be down at this point */ | ||
2091 | if (adapter->state != ADAPT_DOWN) { | ||
2092 | DBG_ERROR("sxg_if_init adapter->state != ADAPT_DOWN\n"); | ||
2093 | return (-EIO); | ||
2094 | } | ||
2095 | ASSERT(adapter->linkstate == LINK_DOWN); | ||
2096 | |||
2097 | adapter->devflags_prev = dev->flags; | ||
2098 | adapter->MacFilter = MAC_DIRECTED; | ||
2099 | if (dev->flags) { | ||
2100 | DBG_ERROR("sxg: %s (%s) Set MAC options: ", __func__, | ||
2101 | adapter->netdev->name); | ||
2102 | if (dev->flags & IFF_BROADCAST) { | ||
2103 | adapter->MacFilter |= MAC_BCAST; | ||
2104 | DBG_ERROR("BCAST "); | ||
2105 | } | ||
2106 | if (dev->flags & IFF_PROMISC) { | ||
2107 | adapter->MacFilter |= MAC_PROMISC; | ||
2108 | DBG_ERROR("PROMISC "); | ||
2109 | } | ||
2110 | if (dev->flags & IFF_ALLMULTI) { | ||
2111 | adapter->MacFilter |= MAC_ALLMCAST; | ||
2112 | DBG_ERROR("ALL_MCAST "); | ||
2113 | } | ||
2114 | if (dev->flags & IFF_MULTICAST) { | ||
2115 | adapter->MacFilter |= MAC_MCAST; | ||
2116 | DBG_ERROR("MCAST "); | ||
2117 | } | ||
2118 | DBG_ERROR("\n"); | ||
2119 | } | ||
2120 | status = sxg_register_intr(adapter); | ||
2121 | if (status != STATUS_SUCCESS) { | ||
2122 | DBG_ERROR("sxg_if_init: sxg_register_intr FAILED %x\n", | ||
2123 | status); | ||
2124 | sxg_deregister_interrupt(adapter); | ||
2125 | return (status); | ||
2126 | } | ||
2127 | |||
2128 | adapter->state = ADAPT_UP; | ||
2129 | |||
2130 | /* clear any pending events, then enable interrupts */ | ||
2131 | DBG_ERROR("sxg: %s ENABLE interrupts(slic)\n", __func__); | ||
2132 | |||
2133 | return (STATUS_SUCCESS); | ||
2134 | } | ||
2135 | |||
2136 | void sxg_set_interrupt_aggregation(struct adapter_t *adapter) | ||
2137 | { | ||
2138 | /* | ||
2139 | * Top bit disables aggregation on xmt (SXG_AGG_XMT_DISABLE). | ||
2140 | * Make sure Max is less than 0x8000. | ||
2141 | */ | ||
2142 | adapter->max_aggregation = SXG_MAX_AGG_DEFAULT; | ||
2143 | adapter->min_aggregation = SXG_MIN_AGG_DEFAULT; | ||
2144 | WRITE_REG(adapter->UcodeRegs[0].Aggregation, | ||
2145 | ((adapter->max_aggregation << SXG_MAX_AGG_SHIFT) | | ||
2146 | adapter->min_aggregation), | ||
2147 | TRUE); | ||
2148 | } | ||
2149 | |||
2150 | static int sxg_entry_open(struct net_device *dev) | ||
2151 | { | ||
2152 | struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev); | ||
2153 | int status; | ||
2154 | static int turn; | ||
2155 | int sxg_initial_rcv_data_buffers = SXG_INITIAL_RCV_DATA_BUFFERS; | ||
2156 | int i; | ||
2157 | |||
2158 | if (adapter->JumboEnabled == TRUE) { | ||
2159 | sxg_initial_rcv_data_buffers = | ||
2160 | SXG_INITIAL_JUMBO_RCV_DATA_BUFFERS; | ||
2161 | SXG_INITIALIZE_RING(adapter->RcvRingZeroInfo, | ||
2162 | SXG_JUMBO_RCV_RING_SIZE); | ||
2163 | } | ||
2164 | |||
2165 | /* | ||
2166 | * Allocate receive data buffers. We allocate a block of buffers and | ||
2167 | * a corresponding descriptor block at once. See sxghw.h:SXG_RCV_BLOCK | ||
2168 | */ | ||
2169 | |||
2170 | for (i = 0; i < sxg_initial_rcv_data_buffers; | ||
2171 | i += SXG_RCV_DESCRIPTORS_PER_BLOCK) | ||
2172 | { | ||
2173 | status = sxg_allocate_buffer_memory(adapter, | ||
2174 | SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE), | ||
2175 | SXG_BUFFER_TYPE_RCV); | ||
2176 | if (status != STATUS_SUCCESS) | ||
2177 | return status; | ||
2178 | } | ||
2179 | /* | ||
2180 | * NBL resource allocation can fail in the 'AllocateComplete' routine, | ||
2181 | * which doesn't return status. Make sure we got the number of buffers | ||
2182 | * we requested | ||
2183 | */ | ||
2184 | |||
2185 | if (adapter->FreeRcvBufferCount < sxg_initial_rcv_data_buffers) { | ||
2186 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF6", | ||
2187 | adapter, adapter->FreeRcvBufferCount, SXG_MAX_ENTRIES, | ||
2188 | 0); | ||
2189 | return (STATUS_RESOURCES); | ||
2190 | } | ||
2191 | /* | ||
2192 | * The microcode expects it to be downloaded on every open. | ||
2193 | */ | ||
2194 | DBG_ERROR("sxg: %s ENTER sxg_download_microcode\n", __func__); | ||
2195 | if (sxg_download_microcode(adapter, SXG_UCODE_SYSTEM)) { | ||
2196 | DBG_ERROR("sxg: %s ENTER sxg_adapter_set_hwaddr\n", | ||
2197 | __func__); | ||
2198 | sxg_read_config(adapter); | ||
2199 | } else { | ||
2200 | adapter->state = ADAPT_FAIL; | ||
2201 | adapter->linkstate = LINK_DOWN; | ||
2202 | DBG_ERROR("sxg_download_microcode FAILED status[%x]\n", | ||
2203 | status); | ||
2204 | } | ||
2205 | msleep(5); | ||
2206 | |||
2207 | if (turn) { | ||
2208 | sxg_second_open(adapter->netdev); | ||
2209 | |||
2210 | return STATUS_SUCCESS; | ||
2211 | } | ||
2212 | |||
2213 | turn++; | ||
2214 | |||
2215 | ASSERT(adapter); | ||
2216 | DBG_ERROR("sxg: %s adapter->activated[%d]\n", __func__, | ||
2217 | adapter->activated); | ||
2218 | DBG_ERROR | ||
2219 | ("sxg: %s (%s): [jiffies[%lx] cpu %d] dev[%p] adapt[%p] port[%d]\n", | ||
2220 | __func__, adapter->netdev->name, jiffies, smp_processor_id(), | ||
2221 | adapter->netdev, adapter, adapter->port); | ||
2222 | |||
2223 | netif_stop_queue(adapter->netdev); | ||
2224 | |||
2225 | spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags); | ||
2226 | if (!adapter->activated) { | ||
2227 | sxg_global.num_sxg_ports_active++; | ||
2228 | adapter->activated = 1; | ||
2229 | } | ||
2230 | /* Initialize the adapter */ | ||
2231 | DBG_ERROR("sxg: %s ENTER sxg_initialize_adapter\n", __func__); | ||
2232 | status = sxg_initialize_adapter(adapter); | ||
2233 | DBG_ERROR("sxg: %s EXIT sxg_initialize_adapter status[%x]\n", | ||
2234 | __func__, status); | ||
2235 | |||
2236 | if (status == STATUS_SUCCESS) { | ||
2237 | DBG_ERROR("sxg: %s ENTER sxg_if_init\n", __func__); | ||
2238 | status = sxg_if_init(adapter); | ||
2239 | DBG_ERROR("sxg: %s EXIT sxg_if_init status[%x]\n", __func__, | ||
2240 | status); | ||
2241 | } | ||
2242 | |||
2243 | if (status != STATUS_SUCCESS) { | ||
2244 | if (adapter->activated) { | ||
2245 | sxg_global.num_sxg_ports_active--; | ||
2246 | adapter->activated = 0; | ||
2247 | } | ||
2248 | spin_unlock_irqrestore(&sxg_global.driver_lock, | ||
2249 | sxg_global.flags); | ||
2250 | return (status); | ||
2251 | } | ||
2252 | DBG_ERROR("sxg: %s ENABLE ALL INTERRUPTS\n", __func__); | ||
2253 | sxg_set_interrupt_aggregation(adapter); | ||
2254 | napi_enable(&adapter->napi); | ||
2255 | |||
2256 | /* Enable interrupts */ | ||
2257 | SXG_ENABLE_ALL_INTERRUPTS(adapter); | ||
2258 | |||
2259 | DBG_ERROR("sxg: %s EXIT\n", __func__); | ||
2260 | |||
2261 | spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags); | ||
2262 | mod_timer(&adapter->watchdog_timer, jiffies); | ||
2263 | |||
2264 | return STATUS_SUCCESS; | ||
2265 | } | ||
2266 | |||
2267 | int sxg_second_open(struct net_device * dev) | ||
2268 | { | ||
2269 | struct adapter_t *adapter = (struct adapter_t*) netdev_priv(dev); | ||
2270 | int status = 0; | ||
2271 | |||
2272 | spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags); | ||
2273 | netif_start_queue(adapter->netdev); | ||
2274 | adapter->state = ADAPT_UP; | ||
2275 | adapter->linkstate = LINK_UP; | ||
2276 | |||
2277 | status = sxg_initialize_adapter(adapter); | ||
2278 | sxg_set_interrupt_aggregation(adapter); | ||
2279 | napi_enable(&adapter->napi); | ||
2280 | /* Re-enable interrupts */ | ||
2281 | SXG_ENABLE_ALL_INTERRUPTS(adapter); | ||
2282 | |||
2283 | sxg_register_intr(adapter); | ||
2284 | spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags); | ||
2285 | mod_timer(&adapter->watchdog_timer, jiffies); | ||
2286 | return (STATUS_SUCCESS); | ||
2287 | |||
2288 | } | ||
2289 | |||
2290 | static void __devexit sxg_entry_remove(struct pci_dev *pcidev) | ||
2291 | { | ||
2292 | u32 mmio_start = 0; | ||
2293 | u32 mmio_len = 0; | ||
2294 | |||
2295 | struct net_device *dev = pci_get_drvdata(pcidev); | ||
2296 | struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev); | ||
2297 | |||
2298 | flush_scheduled_work(); | ||
2299 | |||
2300 | /* Deallocate Resources */ | ||
2301 | unregister_netdev(dev); | ||
2302 | sxg_reset_interrupt_capability(adapter); | ||
2303 | sxg_free_resources(adapter); | ||
2304 | |||
2305 | ASSERT(adapter); | ||
2306 | |||
2307 | mmio_start = pci_resource_start(pcidev, 0); | ||
2308 | mmio_len = pci_resource_len(pcidev, 0); | ||
2309 | |||
2310 | DBG_ERROR("sxg: %s rel_region(0) start[%x] len[%x]\n", __func__, | ||
2311 | mmio_start, mmio_len); | ||
2312 | release_mem_region(mmio_start, mmio_len); | ||
2313 | |||
2314 | mmio_start = pci_resource_start(pcidev, 2); | ||
2315 | mmio_len = pci_resource_len(pcidev, 2); | ||
2316 | |||
2317 | DBG_ERROR("sxg: %s rel_region(2) start[%x] len[%x]\n", __func__, | ||
2318 | mmio_start, mmio_len); | ||
2319 | release_mem_region(mmio_start, mmio_len); | ||
2320 | |||
2321 | pci_disable_device(pcidev); | ||
2322 | |||
2323 | DBG_ERROR("sxg: %s deallocate device\n", __func__); | ||
2324 | kfree(dev); | ||
2325 | DBG_ERROR("sxg: %s EXIT\n", __func__); | ||
2326 | } | ||
2327 | |||
2328 | static int sxg_entry_halt(struct net_device *dev) | ||
2329 | { | ||
2330 | struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev); | ||
2331 | struct sxg_hw_regs *HwRegs = adapter->HwRegs; | ||
2332 | int i; | ||
2333 | u32 RssIds, IsrCount; | ||
2334 | unsigned long flags; | ||
2335 | |||
2336 | RssIds = SXG_RSS_CPU_COUNT(adapter); | ||
2337 | IsrCount = adapter->msi_enabled ? RssIds : 1; | ||
2338 | /* Disable interrupts */ | ||
2339 | spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags); | ||
2340 | SXG_DISABLE_ALL_INTERRUPTS(adapter); | ||
2341 | adapter->state = ADAPT_DOWN; | ||
2342 | adapter->linkstate = LINK_DOWN; | ||
2343 | |||
2344 | spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags); | ||
2345 | sxg_deregister_interrupt(adapter); | ||
2346 | WRITE_REG(HwRegs->Reset, 0xDEAD, FLUSH); | ||
2347 | mdelay(5000); | ||
2348 | |||
2349 | del_timer_sync(&adapter->watchdog_timer); | ||
2350 | netif_stop_queue(dev); | ||
2351 | netif_carrier_off(dev); | ||
2352 | |||
2353 | napi_disable(&adapter->napi); | ||
2354 | |||
2355 | WRITE_REG(adapter->UcodeRegs[0].RcvCmd, 0, true); | ||
2356 | adapter->devflags_prev = 0; | ||
2357 | DBG_ERROR("sxg: %s (%s) set adapter[%p] state to ADAPT_DOWN(%d)\n", | ||
2358 | __func__, dev->name, adapter, adapter->state); | ||
2359 | |||
2360 | spin_lock(&adapter->RcvQLock); | ||
2361 | /* Free all the blocks and the buffers, moved from remove() routine */ | ||
2362 | if (!(IsListEmpty(&adapter->AllRcvBlocks))) { | ||
2363 | sxg_free_rcvblocks(adapter); | ||
2364 | } | ||
2365 | |||
2366 | |||
2367 | InitializeListHead(&adapter->FreeRcvBuffers); | ||
2368 | InitializeListHead(&adapter->FreeRcvBlocks); | ||
2369 | InitializeListHead(&adapter->AllRcvBlocks); | ||
2370 | InitializeListHead(&adapter->FreeSglBuffers); | ||
2371 | InitializeListHead(&adapter->AllSglBuffers); | ||
2372 | |||
2373 | adapter->FreeRcvBufferCount = 0; | ||
2374 | adapter->FreeRcvBlockCount = 0; | ||
2375 | adapter->AllRcvBlockCount = 0; | ||
2376 | adapter->RcvBuffersOnCard = 0; | ||
2377 | adapter->PendingRcvCount = 0; | ||
2378 | |||
2379 | memset(adapter->RcvRings, 0, sizeof(struct sxg_rcv_ring) * 1); | ||
2380 | memset(adapter->EventRings, 0, sizeof(struct sxg_event_ring) * RssIds); | ||
2381 | memset(adapter->Isr, 0, sizeof(u32) * IsrCount); | ||
2382 | for (i = 0; i < SXG_MAX_RING_SIZE; i++) | ||
2383 | adapter->RcvRingZeroInfo.Context[i] = NULL; | ||
2384 | SXG_INITIALIZE_RING(adapter->RcvRingZeroInfo, SXG_RCV_RING_SIZE); | ||
2385 | SXG_INITIALIZE_RING(adapter->XmtRingZeroInfo, SXG_XMT_RING_SIZE); | ||
2386 | |||
2387 | spin_unlock(&adapter->RcvQLock); | ||
2388 | |||
2389 | spin_lock_irqsave(&adapter->XmtZeroLock, flags); | ||
2390 | adapter->AllSglBufferCount = 0; | ||
2391 | adapter->FreeSglBufferCount = 0; | ||
2392 | adapter->PendingXmtCount = 0; | ||
2393 | memset(adapter->XmtRings, 0, sizeof(struct sxg_xmt_ring) * 1); | ||
2394 | memset(adapter->XmtRingZeroIndex, 0, sizeof(u32)); | ||
2395 | spin_unlock_irqrestore(&adapter->XmtZeroLock, flags); | ||
2396 | |||
2397 | for (i = 0; i < SXG_MAX_RSS; i++) { | ||
2398 | adapter->NextEvent[i] = 0; | ||
2399 | } | ||
2400 | atomic_set(&adapter->pending_allocations, 0); | ||
2401 | adapter->intrregistered = 0; | ||
2402 | sxg_remove_isr(adapter); | ||
2403 | DBG_ERROR("sxg: %s (%s) EXIT\n", __func__, dev->name); | ||
2404 | return (STATUS_SUCCESS); | ||
2405 | } | ||
2406 | |||
2407 | static int sxg_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | ||
2408 | { | ||
2409 | ASSERT(rq); | ||
2410 | /* DBG_ERROR("sxg: %s cmd[%x] rq[%p] dev[%p]\n", __func__, cmd, rq, dev);*/ | ||
2411 | switch (cmd) { | ||
2412 | case SIOCSLICSETINTAGG: | ||
2413 | { | ||
2414 | /* struct adapter_t *adapter = (struct adapter_t *) | ||
2415 | * netdev_priv(dev); | ||
2416 | */ | ||
2417 | u32 data[7]; | ||
2418 | u32 intagg; | ||
2419 | |||
2420 | if (copy_from_user(data, rq->ifr_data, 28)) { | ||
2421 | DBG_ERROR("copy_from_user FAILED getting \ | ||
2422 | initial params\n"); | ||
2423 | return -EFAULT; | ||
2424 | } | ||
2425 | intagg = data[0]; | ||
2426 | printk(KERN_EMERG | ||
2427 | "%s: set interrupt aggregation to %d\n", | ||
2428 | __func__, intagg); | ||
2429 | return 0; | ||
2430 | } | ||
2431 | |||
2432 | default: | ||
2433 | /* DBG_ERROR("sxg: %s UNSUPPORTED[%x]\n", __func__, cmd); */ | ||
2434 | return -EOPNOTSUPP; | ||
2435 | } | ||
2436 | return 0; | ||
2437 | } | ||
2438 | |||
2439 | #define NORMAL_ETHFRAME 0 | ||
2440 | |||
2441 | /* | ||
2442 | * sxg_send_packets - Send a skb packet | ||
2443 | * | ||
2444 | * Arguments: | ||
2445 | * skb - The packet to send | ||
2446 | * dev - Our linux net device that refs our adapter | ||
2447 | * | ||
2448 | * Return: | ||
2449 | * 0 regardless of outcome XXXTODO refer to e1000 driver | ||
2450 | */ | ||
2451 | static int sxg_send_packets(struct sk_buff *skb, struct net_device *dev) | ||
2452 | { | ||
2453 | struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev); | ||
2454 | u32 status = STATUS_SUCCESS; | ||
2455 | |||
2456 | /* | ||
2457 | * DBG_ERROR("sxg: %s ENTER sxg_send_packets skb[%p]\n", __func__, | ||
2458 | * skb); | ||
2459 | */ | ||
2460 | |||
2461 | /* Check the adapter state */ | ||
2462 | switch (adapter->State) { | ||
2463 | case SXG_STATE_INITIALIZING: | ||
2464 | case SXG_STATE_HALTED: | ||
2465 | case SXG_STATE_SHUTDOWN: | ||
2466 | ASSERT(0); /* unexpected */ | ||
2467 | /* fall through */ | ||
2468 | case SXG_STATE_RESETTING: | ||
2469 | case SXG_STATE_SLEEP: | ||
2470 | case SXG_STATE_BOOTDIAG: | ||
2471 | case SXG_STATE_DIAG: | ||
2472 | case SXG_STATE_HALTING: | ||
2473 | status = STATUS_FAILURE; | ||
2474 | break; | ||
2475 | case SXG_STATE_RUNNING: | ||
2476 | if (adapter->LinkState != SXG_LINK_UP) { | ||
2477 | status = STATUS_FAILURE; | ||
2478 | } | ||
2479 | break; | ||
2480 | default: | ||
2481 | ASSERT(0); | ||
2482 | status = STATUS_FAILURE; | ||
2483 | } | ||
2484 | if (status != STATUS_SUCCESS) { | ||
2485 | goto xmit_fail; | ||
2486 | } | ||
2487 | /* send a packet */ | ||
2488 | status = sxg_transmit_packet(adapter, skb); | ||
2489 | if (status == STATUS_SUCCESS) { | ||
2490 | goto xmit_done; | ||
2491 | } | ||
2492 | |||
2493 | xmit_fail: | ||
2494 | /* reject & complete all the packets if they cant be sent */ | ||
2495 | if (status != STATUS_SUCCESS) { | ||
2496 | #if XXXTODO | ||
2497 | /* sxg_send_packets_fail(adapter, skb, status); */ | ||
2498 | #else | ||
2499 | SXG_DROP_DUMB_SEND(adapter, skb); | ||
2500 | adapter->stats.tx_dropped++; | ||
2501 | return NETDEV_TX_BUSY; | ||
2502 | #endif | ||
2503 | } | ||
2504 | DBG_ERROR("sxg: %s EXIT sxg_send_packets status[%x]\n", __func__, | ||
2505 | status); | ||
2506 | |||
2507 | xmit_done: | ||
2508 | return NETDEV_TX_OK; | ||
2509 | } | ||
2510 | |||
2511 | /* | ||
2512 | * sxg_transmit_packet | ||
2513 | * | ||
2514 | * This function transmits a single packet. | ||
2515 | * | ||
2516 | * Arguments - | ||
2517 | * adapter - Pointer to our adapter structure | ||
2518 | * skb - The packet to be sent | ||
2519 | * | ||
2520 | * Return - STATUS of send | ||
2521 | */ | ||
2522 | static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb) | ||
2523 | { | ||
2524 | struct sxg_x64_sgl *pSgl; | ||
2525 | struct sxg_scatter_gather *SxgSgl; | ||
2526 | unsigned long sgl_flags; | ||
2527 | /* void *SglBuffer; */ | ||
2528 | /* u32 SglBufferLength; */ | ||
2529 | |||
2530 | /* | ||
2531 | * The vast majority of work is done in the shared | ||
2532 | * sxg_dumb_sgl routine. | ||
2533 | */ | ||
2534 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSend", | ||
2535 | adapter, skb, 0, 0); | ||
2536 | |||
2537 | /* Allocate a SGL buffer */ | ||
2538 | SXG_GET_SGL_BUFFER(adapter, SxgSgl, 0); | ||
2539 | if (!SxgSgl) { | ||
2540 | adapter->Stats.NoSglBuf++; | ||
2541 | adapter->stats.tx_errors++; | ||
2542 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "SndPktF1", | ||
2543 | adapter, skb, 0, 0); | ||
2544 | return (STATUS_RESOURCES); | ||
2545 | } | ||
2546 | ASSERT(SxgSgl->adapter == adapter); | ||
2547 | /*SglBuffer = SXG_SGL_BUFFER(SxgSgl); | ||
2548 | SglBufferLength = SXG_SGL_BUF_SIZE; */ | ||
2549 | SxgSgl->VlanTag.VlanTci = 0; | ||
2550 | SxgSgl->VlanTag.VlanTpid = 0; | ||
2551 | SxgSgl->Type = SXG_SGL_DUMB; | ||
2552 | SxgSgl->DumbPacket = skb; | ||
2553 | pSgl = NULL; | ||
2554 | |||
2555 | /* Call the common sxg_dumb_sgl routine to complete the send. */ | ||
2556 | return (sxg_dumb_sgl(pSgl, SxgSgl)); | ||
2557 | } | ||
2558 | |||
2559 | /* | ||
2560 | * sxg_dumb_sgl | ||
2561 | * | ||
2562 | * Arguments: | ||
2563 | * pSgl - | ||
2564 | * SxgSgl - struct sxg_scatter_gather | ||
2565 | * | ||
2566 | * Return Value: | ||
2567 | * Status of send operation. | ||
2568 | */ | ||
2569 | static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl, | ||
2570 | struct sxg_scatter_gather *SxgSgl) | ||
2571 | { | ||
2572 | struct adapter_t *adapter = SxgSgl->adapter; | ||
2573 | struct sk_buff *skb = SxgSgl->DumbPacket; | ||
2574 | /* For now, all dumb-nic sends go on RSS queue zero */ | ||
2575 | struct sxg_xmt_ring *XmtRing = &adapter->XmtRings[0]; | ||
2576 | struct sxg_ring_info *XmtRingInfo = &adapter->XmtRingZeroInfo; | ||
2577 | struct sxg_cmd *XmtCmd = NULL; | ||
2578 | /* u32 Index = 0; */ | ||
2579 | u32 DataLength = skb->len; | ||
2580 | /* unsigned int BufLen; */ | ||
2581 | /* u32 SglOffset; */ | ||
2582 | u64 phys_addr; | ||
2583 | unsigned long flags; | ||
2584 | unsigned long queue_id=0; | ||
2585 | int offload_cksum = 0; | ||
2586 | |||
2587 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSgl", | ||
2588 | pSgl, SxgSgl, 0, 0); | ||
2589 | |||
2590 | /* Set aside a pointer to the sgl */ | ||
2591 | SxgSgl->pSgl = pSgl; | ||
2592 | |||
2593 | /* Sanity check that our SGL format is as we expect. */ | ||
2594 | ASSERT(sizeof(struct sxg_x64_sge) == sizeof(struct sxg_x64_sge)); | ||
2595 | /* Shouldn't be a vlan tag on this frame */ | ||
2596 | ASSERT(SxgSgl->VlanTag.VlanTci == 0); | ||
2597 | ASSERT(SxgSgl->VlanTag.VlanTpid == 0); | ||
2598 | |||
2599 | /* | ||
2600 | * From here below we work with the SGL placed in our | ||
2601 | * buffer. | ||
2602 | */ | ||
2603 | |||
2604 | SxgSgl->Sgl.NumberOfElements = 1; | ||
2605 | /* | ||
2606 | * Set ucode Queue ID based on bottom bits of destination TCP port. | ||
2607 | * This Queue ID splits slowpath/dumb-nic packet processing across | ||
2608 | * multiple threads on the card to improve performance. It is split | ||
2609 | * using the TCP port to avoid out-of-order packets that can result | ||
2610 | * from multithreaded processing. We use the destination port because | ||
2611 | * we expect to be run on a server, so in nearly all cases the local | ||
2612 | * port is likely to be constant (well-known server port) and the | ||
2613 | * remote port is likely to be random. The exception to this is iSCSI, | ||
2614 | * in which case we use the sport instead. Note | ||
2615 | * that original attempt at XOR'ing source and dest port resulted in | ||
2616 | * poor balance on NTTTCP/iometer applications since they tend to | ||
2617 | * line up (even-even, odd-odd..). | ||
2618 | */ | ||
2619 | |||
2620 | if (skb->protocol == htons(ETH_P_IP)) { | ||
2621 | struct iphdr *ip; | ||
2622 | |||
2623 | ip = ip_hdr(skb); | ||
2624 | if (ip->protocol == IPPROTO_TCP) | ||
2625 | offload_cksum = 1; | ||
2626 | if (!offload_cksum || !tcp_hdr(skb)) | ||
2627 | queue_id = 0; | ||
2628 | else if (offload_cksum && (DataLength >= sizeof( | ||
2629 | struct tcphdr))){ | ||
2630 | queue_id = ((ntohs(tcp_hdr(skb)->dest) == ISCSI_PORT) ? | ||
2631 | (ntohs (tcp_hdr(skb)->source) & | ||
2632 | SXG_LARGE_SEND_QUEUE_MASK): | ||
2633 | (ntohs(tcp_hdr(skb)->dest) & | ||
2634 | SXG_LARGE_SEND_QUEUE_MASK)); | ||
2635 | } | ||
2636 | } else if (skb->protocol == htons(ETH_P_IPV6)) { | ||
2637 | if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) | ||
2638 | offload_cksum = 1; | ||
2639 | if (!offload_cksum || !tcp_hdr(skb)) | ||
2640 | queue_id = 0; | ||
2641 | else if (offload_cksum && (DataLength>=sizeof(struct tcphdr))){ | ||
2642 | queue_id = ((ntohs(tcp_hdr(skb)->dest) == ISCSI_PORT) ? | ||
2643 | (ntohs (tcp_hdr(skb)->source) & | ||
2644 | SXG_LARGE_SEND_QUEUE_MASK): | ||
2645 | (ntohs(tcp_hdr(skb)->dest) & | ||
2646 | SXG_LARGE_SEND_QUEUE_MASK)); | ||
2647 | } | ||
2648 | } | ||
2649 | |||
2650 | /* Grab the spinlock and acquire a command */ | ||
2651 | spin_lock_irqsave(&adapter->XmtZeroLock, flags); | ||
2652 | SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl); | ||
2653 | if (XmtCmd == NULL) { | ||
2654 | /* | ||
2655 | * Call sxg_complete_slow_send to see if we can | ||
2656 | * free up any XmtRingZero entries and then try again | ||
2657 | */ | ||
2658 | |||
2659 | spin_unlock_irqrestore(&adapter->XmtZeroLock, flags); | ||
2660 | sxg_complete_slow_send(adapter); | ||
2661 | spin_lock_irqsave(&adapter->XmtZeroLock, flags); | ||
2662 | SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl); | ||
2663 | if (XmtCmd == NULL) { | ||
2664 | adapter->Stats.XmtZeroFull++; | ||
2665 | goto abortcmd; | ||
2666 | } | ||
2667 | } | ||
2668 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbCmd", | ||
2669 | XmtCmd, XmtRingInfo->Head, XmtRingInfo->Tail, 0); | ||
2670 | memset(XmtCmd, '\0', sizeof(*XmtCmd)); | ||
2671 | XmtCmd->SgEntries = 1; | ||
2672 | XmtCmd->Flags = 0; | ||
2673 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
2674 | /* | ||
2675 | * We need to set the Checkum in IP header to 0. This is | ||
2676 | * required by hardware. | ||
2677 | */ | ||
2678 | if (offload_cksum) { | ||
2679 | ip_hdr(skb)->check = 0x0; | ||
2680 | XmtCmd->CsumFlags.Flags |= SXG_SLOWCMD_CSUM_IP; | ||
2681 | XmtCmd->CsumFlags.Flags |= SXG_SLOWCMD_CSUM_TCP; | ||
2682 | /* | ||
2683 | * Dont know if length will require a change in | ||
2684 | * case of VLAN | ||
2685 | */ | ||
2686 | XmtCmd->CsumFlags.MacLen = ETH_HLEN; | ||
2687 | XmtCmd->CsumFlags.IpHl = skb_network_header_len(skb) >> | ||
2688 | SXG_NW_HDR_LEN_SHIFT; | ||
2689 | } else { | ||
2690 | if (skb_checksum_help(skb)){ | ||
2691 | printk(KERN_EMERG "Dropped UDP packet for" | ||
2692 | " incorrect checksum calculation\n"); | ||
2693 | if (XmtCmd) | ||
2694 | SXG_ABORT_CMD(XmtRingInfo); | ||
2695 | spin_unlock_irqrestore(&adapter->XmtZeroLock, | ||
2696 | flags); | ||
2697 | return STATUS_SUCCESS; | ||
2698 | } | ||
2699 | } | ||
2700 | } | ||
2701 | |||
2702 | /* | ||
2703 | * Fill in the command | ||
2704 | * Copy out the first SGE to the command and adjust for offset | ||
2705 | */ | ||
2706 | phys_addr = pci_map_single(adapter->pcidev, skb->data, skb->len, | ||
2707 | PCI_DMA_TODEVICE); | ||
2708 | |||
2709 | /* | ||
2710 | * SAHARA SGL WORKAROUND | ||
2711 | * See if the SGL straddles a 64k boundary. If so, skip to | ||
2712 | * the start of the next 64k boundary and continue | ||
2713 | */ | ||
2714 | |||
2715 | if ((adapter->asictype == SAHARA_REV_A) && | ||
2716 | (SXG_INVALID_SGL(phys_addr,skb->data_len))) | ||
2717 | { | ||
2718 | spin_unlock_irqrestore(&adapter->XmtZeroLock, flags); | ||
2719 | if (XmtCmd) | ||
2720 | SXG_ABORT_CMD(XmtRingInfo); | ||
2721 | /* Silently drop this packet */ | ||
2722 | printk(KERN_EMERG"Dropped a packet for 64k boundary problem\n"); | ||
2723 | return STATUS_SUCCESS; | ||
2724 | } | ||
2725 | XmtCmd->Buffer.FirstSgeAddress = phys_addr; | ||
2726 | XmtCmd->Buffer.FirstSgeLength = DataLength; | ||
2727 | XmtCmd->Buffer.SgeOffset = 0; | ||
2728 | XmtCmd->Buffer.TotalLength = DataLength; | ||
2729 | |||
2730 | /* | ||
2731 | * Advance transmit cmd descripter by 1. | ||
2732 | * NOTE - See comments in SxgTcpOutput where we write | ||
2733 | * to the XmtCmd register regarding CPU ID values and/or | ||
2734 | * multiple commands. | ||
2735 | * Top 16 bits specify queue_id. See comments about queue_id above | ||
2736 | */ | ||
2737 | /* Four queues at the moment */ | ||
2738 | ASSERT((queue_id & ~SXG_LARGE_SEND_QUEUE_MASK) == 0); | ||
2739 | WRITE_REG(adapter->UcodeRegs[0].XmtCmd, ((queue_id << 16) | 1), TRUE); | ||
2740 | adapter->Stats.XmtQLen++; /* Stats within lock */ | ||
2741 | /* Update stats */ | ||
2742 | adapter->stats.tx_packets++; | ||
2743 | adapter->stats.tx_bytes += DataLength; | ||
2744 | #if XXXTODO /* Stats stuff */ | ||
2745 | if (SXG_MULTICAST_PACKET(EtherHdr)) { | ||
2746 | if (SXG_BROADCAST_PACKET(EtherHdr)) { | ||
2747 | adapter->Stats.DumbXmtBcastPkts++; | ||
2748 | adapter->Stats.DumbXmtBcastBytes += DataLength; | ||
2749 | } else { | ||
2750 | adapter->Stats.DumbXmtMcastPkts++; | ||
2751 | adapter->Stats.DumbXmtMcastBytes += DataLength; | ||
2752 | } | ||
2753 | } else { | ||
2754 | adapter->Stats.DumbXmtUcastPkts++; | ||
2755 | adapter->Stats.DumbXmtUcastBytes += DataLength; | ||
2756 | } | ||
2757 | #endif | ||
2758 | |||
2759 | spin_unlock_irqrestore(&adapter->XmtZeroLock, flags); | ||
2760 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDumSgl2", | ||
2761 | XmtCmd, pSgl, SxgSgl, 0); | ||
2762 | return STATUS_SUCCESS; | ||
2763 | |||
2764 | abortcmd: | ||
2765 | /* | ||
2766 | * NOTE - Only jump to this label AFTER grabbing the | ||
2767 | * XmtZeroLock, and DO NOT DROP IT between the | ||
2768 | * command allocation and the following abort. | ||
2769 | */ | ||
2770 | if (XmtCmd) { | ||
2771 | SXG_ABORT_CMD(XmtRingInfo); | ||
2772 | } | ||
2773 | spin_unlock_irqrestore(&adapter->XmtZeroLock, flags); | ||
2774 | |||
2775 | /* | ||
2776 | * failsgl: | ||
2777 | * Jump to this label if failure occurs before the | ||
2778 | * XmtZeroLock is grabbed | ||
2779 | */ | ||
2780 | adapter->stats.tx_errors++; | ||
2781 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumSGFal", | ||
2782 | pSgl, SxgSgl, XmtRingInfo->Head, XmtRingInfo->Tail); | ||
2783 | /* SxgSgl->DumbPacket is the skb */ | ||
2784 | // SXG_COMPLETE_DUMB_SEND(adapter, SxgSgl->DumbPacket); | ||
2785 | |||
2786 | return STATUS_FAILURE; | ||
2787 | } | ||
2788 | |||
2789 | /* | ||
2790 | * Link management functions | ||
2791 | * | ||
2792 | * sxg_initialize_link - Initialize the link stuff | ||
2793 | * | ||
2794 | * Arguments - | ||
2795 | * adapter - A pointer to our adapter structure | ||
2796 | * | ||
2797 | * Return | ||
2798 | * status | ||
2799 | */ | ||
2800 | static int sxg_initialize_link(struct adapter_t *adapter) | ||
2801 | { | ||
2802 | struct sxg_hw_regs *HwRegs = adapter->HwRegs; | ||
2803 | u32 Value; | ||
2804 | u32 ConfigData; | ||
2805 | u32 MaxFrame; | ||
2806 | u32 AxgMacReg1; | ||
2807 | int status; | ||
2808 | |||
2809 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitLink", | ||
2810 | adapter, 0, 0, 0); | ||
2811 | |||
2812 | /* Reset PHY and XGXS module */ | ||
2813 | WRITE_REG(HwRegs->LinkStatus, LS_SERDES_POWER_DOWN, TRUE); | ||
2814 | |||
2815 | /* Reset transmit configuration register */ | ||
2816 | WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_RESET, TRUE); | ||
2817 | |||
2818 | /* Reset receive configuration register */ | ||
2819 | WRITE_REG(HwRegs->RcvConfig, RCV_CONFIG_RESET, TRUE); | ||
2820 | |||
2821 | /* Reset all MAC modules */ | ||
2822 | WRITE_REG(HwRegs->MacConfig0, AXGMAC_CFG0_SUB_RESET, TRUE); | ||
2823 | |||
2824 | /* | ||
2825 | * Link address 0 | ||
2826 | * XXXTODO - This assumes the MAC address (0a:0b:0c:0d:0e:0f) | ||
2827 | * is stored with the first nibble (0a) in the byte 0 | ||
2828 | * of the Mac address. Possibly reverse? | ||
2829 | */ | ||
2830 | Value = *(u32 *) adapter->macaddr; | ||
2831 | WRITE_REG(HwRegs->LinkAddress0Low, Value, TRUE); | ||
2832 | /* also write the MAC address to the MAC. Endian is reversed. */ | ||
2833 | WRITE_REG(HwRegs->MacAddressLow, ntohl(Value), TRUE); | ||
2834 | Value = (*(u16 *) & adapter->macaddr[4] & 0x0000FFFF); | ||
2835 | WRITE_REG(HwRegs->LinkAddress0High, Value | LINK_ADDRESS_ENABLE, TRUE); | ||
2836 | /* endian swap for the MAC (put high bytes in bits [31:16], swapped) */ | ||
2837 | Value = ntohl(Value); | ||
2838 | WRITE_REG(HwRegs->MacAddressHigh, Value, TRUE); | ||
2839 | /* Link address 1 */ | ||
2840 | WRITE_REG(HwRegs->LinkAddress1Low, 0, TRUE); | ||
2841 | WRITE_REG(HwRegs->LinkAddress1High, 0, TRUE); | ||
2842 | /* Link address 2 */ | ||
2843 | WRITE_REG(HwRegs->LinkAddress2Low, 0, TRUE); | ||
2844 | WRITE_REG(HwRegs->LinkAddress2High, 0, TRUE); | ||
2845 | /* Link address 3 */ | ||
2846 | WRITE_REG(HwRegs->LinkAddress3Low, 0, TRUE); | ||
2847 | WRITE_REG(HwRegs->LinkAddress3High, 0, TRUE); | ||
2848 | |||
2849 | /* Enable MAC modules */ | ||
2850 | WRITE_REG(HwRegs->MacConfig0, 0, TRUE); | ||
2851 | |||
2852 | /* Configure MAC */ | ||
2853 | AxgMacReg1 = ( /* Enable XMT */ | ||
2854 | AXGMAC_CFG1_XMT_EN | | ||
2855 | /* Enable receive */ | ||
2856 | AXGMAC_CFG1_RCV_EN | | ||
2857 | /* short frame detection */ | ||
2858 | AXGMAC_CFG1_SHORT_ASSERT | | ||
2859 | /* Verify frame length */ | ||
2860 | AXGMAC_CFG1_CHECK_LEN | | ||
2861 | /* Generate FCS */ | ||
2862 | AXGMAC_CFG1_GEN_FCS | | ||
2863 | /* Pad frames to 64 bytes */ | ||
2864 | AXGMAC_CFG1_PAD_64); | ||
2865 | |||
2866 | if (adapter->XmtFcEnabled) { | ||
2867 | AxgMacReg1 |= AXGMAC_CFG1_XMT_PAUSE; /* Allow sending of pause */ | ||
2868 | } | ||
2869 | if (adapter->RcvFcEnabled) { | ||
2870 | AxgMacReg1 |= AXGMAC_CFG1_RCV_PAUSE; /* Enable detection of pause */ | ||
2871 | } | ||
2872 | |||
2873 | WRITE_REG(HwRegs->MacConfig1, AxgMacReg1, TRUE); | ||
2874 | |||
2875 | /* Set AXGMAC max frame length if jumbo. Not needed for standard MTU */ | ||
2876 | if (adapter->JumboEnabled) { | ||
2877 | WRITE_REG(HwRegs->MacMaxFrameLen, AXGMAC_MAXFRAME_JUMBO, TRUE); | ||
2878 | } | ||
2879 | /* | ||
2880 | * AMIIM Configuration Register - | ||
2881 | * The value placed in the AXGMAC_AMIIM_CFG_HALF_CLOCK portion | ||
2882 | * (bottom bits) of this register is used to determine the MDC frequency | ||
2883 | * as specified in the A-XGMAC Design Document. This value must not be | ||
2884 | * zero. The following value (62 or 0x3E) is based on our MAC transmit | ||
2885 | * clock frequency (MTCLK) of 312.5 MHz. Given a maximum MDIO clock | ||
2886 | * frequency of 2.5 MHz (see the PHY spec), we get: | ||
2887 | * 312.5/(2*(X+1)) < 2.5 ==> X = 62. | ||
2888 | * This value happens to be the default value for this register, so we | ||
2889 | * really don't have to do this. | ||
2890 | */ | ||
2891 | if (adapter->asictype == SAHARA_REV_B) { | ||
2892 | WRITE_REG(HwRegs->MacAmiimConfig, 0x0000001F, TRUE); | ||
2893 | } else { | ||
2894 | WRITE_REG(HwRegs->MacAmiimConfig, 0x0000003E, TRUE); | ||
2895 | } | ||
2896 | |||
2897 | /* Power up and enable PHY and XAUI/XGXS/Serdes logic */ | ||
2898 | WRITE_REG(HwRegs->LinkStatus, | ||
2899 | (LS_PHY_CLR_RESET | | ||
2900 | LS_XGXS_ENABLE | | ||
2901 | LS_XGXS_CTL | | ||
2902 | LS_PHY_CLK_EN | | ||
2903 | LS_ATTN_ALARM), | ||
2904 | TRUE); | ||
2905 | DBG_ERROR("After Power Up and enable PHY in sxg_initialize_link\n"); | ||
2906 | |||
2907 | /* | ||
2908 | * Per information given by Aeluros, wait 100 ms after removing reset. | ||
2909 | * It's not enough to wait for the self-clearing reset bit in reg 0 to | ||
2910 | * clear. | ||
2911 | */ | ||
2912 | mdelay(100); | ||
2913 | |||
2914 | /* Verify the PHY has come up by checking that the Reset bit has | ||
2915 | * cleared. | ||
2916 | */ | ||
2917 | status = sxg_read_mdio_reg(adapter, | ||
2918 | MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */ | ||
2919 | PHY_PMA_CONTROL1, /* PMA/PMD control register */ | ||
2920 | &Value); | ||
2921 | DBG_ERROR("After sxg_read_mdio_reg Value[%x] fail=%x\n", Value, | ||
2922 | (Value & PMA_CONTROL1_RESET)); | ||
2923 | if (status != STATUS_SUCCESS) | ||
2924 | return (STATUS_FAILURE); | ||
2925 | if (Value & PMA_CONTROL1_RESET) /* reset complete if bit is 0 */ | ||
2926 | return (STATUS_FAILURE); | ||
2927 | |||
2928 | /* The SERDES should be initialized by now - confirm */ | ||
2929 | READ_REG(HwRegs->LinkStatus, Value); | ||
2930 | if (Value & LS_SERDES_DOWN) /* verify SERDES is initialized */ | ||
2931 | return (STATUS_FAILURE); | ||
2932 | |||
2933 | /* The XAUI link should also be up - confirm */ | ||
2934 | if (!(Value & LS_XAUI_LINK_UP)) /* verify XAUI link is up */ | ||
2935 | return (STATUS_FAILURE); | ||
2936 | |||
2937 | /* Initialize the PHY */ | ||
2938 | status = sxg_phy_init(adapter); | ||
2939 | if (status != STATUS_SUCCESS) | ||
2940 | return (STATUS_FAILURE); | ||
2941 | |||
2942 | /* Enable the Link Alarm */ | ||
2943 | |||
2944 | /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module | ||
2945 | * LASI_CONTROL - LASI control register | ||
2946 | * LASI_CTL_LS_ALARM_ENABLE - enable link alarm bit | ||
2947 | */ | ||
2948 | status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA, | ||
2949 | LASI_CONTROL, | ||
2950 | LASI_CTL_LS_ALARM_ENABLE); | ||
2951 | if (status != STATUS_SUCCESS) | ||
2952 | return (STATUS_FAILURE); | ||
2953 | |||
2954 | /* XXXTODO - temporary - verify bit is set */ | ||
2955 | |||
2956 | /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module | ||
2957 | * LASI_CONTROL - LASI control register | ||
2958 | */ | ||
2959 | status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, | ||
2960 | LASI_CONTROL, | ||
2961 | &Value); | ||
2962 | |||
2963 | if (status != STATUS_SUCCESS) | ||
2964 | return (STATUS_FAILURE); | ||
2965 | if (!(Value & LASI_CTL_LS_ALARM_ENABLE)) { | ||
2966 | DBG_ERROR("Error! LASI Control Alarm Enable bit not set!\n"); | ||
2967 | } | ||
2968 | /* Enable receive */ | ||
2969 | MaxFrame = adapter->JumboEnabled ? JUMBOMAXFRAME : ETHERMAXFRAME; | ||
2970 | ConfigData = (RCV_CONFIG_ENABLE | | ||
2971 | RCV_CONFIG_ENPARSE | | ||
2972 | RCV_CONFIG_RCVBAD | | ||
2973 | RCV_CONFIG_RCVPAUSE | | ||
2974 | RCV_CONFIG_TZIPV6 | | ||
2975 | RCV_CONFIG_TZIPV4 | | ||
2976 | RCV_CONFIG_HASH_16 | | ||
2977 | RCV_CONFIG_SOCKET | RCV_CONFIG_BUFSIZE(MaxFrame)); | ||
2978 | |||
2979 | if (adapter->asictype == SAHARA_REV_B) { | ||
2980 | ConfigData |= (RCV_CONFIG_HIPRICTL | | ||
2981 | RCV_CONFIG_NEWSTATUSFMT); | ||
2982 | } | ||
2983 | WRITE_REG(HwRegs->RcvConfig, ConfigData, TRUE); | ||
2984 | |||
2985 | WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_ENABLE, TRUE); | ||
2986 | |||
2987 | /* Mark the link as down. We'll get a link event when it comes up. */ | ||
2988 | sxg_link_state(adapter, SXG_LINK_DOWN); | ||
2989 | |||
2990 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XInitLnk", | ||
2991 | adapter, 0, 0, 0); | ||
2992 | return (STATUS_SUCCESS); | ||
2993 | } | ||
2994 | |||
2995 | /* | ||
2996 | * sxg_phy_init - Initialize the PHY | ||
2997 | * | ||
2998 | * Arguments - | ||
2999 | * adapter - A pointer to our adapter structure | ||
3000 | * | ||
3001 | * Return | ||
3002 | * status | ||
3003 | */ | ||
3004 | static int sxg_phy_init(struct adapter_t *adapter) | ||
3005 | { | ||
3006 | u32 Value; | ||
3007 | struct phy_ucode *p; | ||
3008 | int status; | ||
3009 | |||
3010 | DBG_ERROR("ENTER %s\n", __func__); | ||
3011 | |||
3012 | /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module | ||
3013 | * 0xC205 - PHY ID register (?) | ||
3014 | * &Value - XXXTODO - add def | ||
3015 | */ | ||
3016 | status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, | ||
3017 | 0xC205, | ||
3018 | &Value); | ||
3019 | if (status != STATUS_SUCCESS) | ||
3020 | return (STATUS_FAILURE); | ||
3021 | |||
3022 | if (Value == 0x0012) { | ||
3023 | /* 0x0012 == AEL2005C PHY(?) - XXXTODO - add def */ | ||
3024 | DBG_ERROR("AEL2005C PHY detected. Downloading PHY \ | ||
3025 | microcode.\n"); | ||
3026 | |||
3027 | /* Initialize AEL2005C PHY and download PHY microcode */ | ||
3028 | for (p = PhyUcode; p->Addr != 0xFFFF; p++) { | ||
3029 | if (p->Addr == 0) { | ||
3030 | /* if address == 0, data == sleep time in ms */ | ||
3031 | mdelay(p->Data); | ||
3032 | } else { | ||
3033 | /* write the given data to the specified address */ | ||
3034 | status = sxg_write_mdio_reg(adapter, | ||
3035 | MIIM_DEV_PHY_PMA, | ||
3036 | /* PHY address */ | ||
3037 | p->Addr, | ||
3038 | /* PHY data */ | ||
3039 | p->Data); | ||
3040 | if (status != STATUS_SUCCESS) | ||
3041 | return (STATUS_FAILURE); | ||
3042 | } | ||
3043 | } | ||
3044 | } | ||
3045 | DBG_ERROR("EXIT %s\n", __func__); | ||
3046 | |||
3047 | return (STATUS_SUCCESS); | ||
3048 | } | ||
3049 | |||
3050 | /* | ||
3051 | * sxg_link_event - Process a link event notification from the card | ||
3052 | * | ||
3053 | * Arguments - | ||
3054 | * adapter - A pointer to our adapter structure | ||
3055 | * | ||
3056 | * Return | ||
3057 | * None | ||
3058 | */ | ||
3059 | static void sxg_link_event(struct adapter_t *adapter) | ||
3060 | { | ||
3061 | struct sxg_hw_regs *HwRegs = adapter->HwRegs; | ||
3062 | struct net_device *netdev = adapter->netdev; | ||
3063 | enum SXG_LINK_STATE LinkState; | ||
3064 | int status; | ||
3065 | u32 Value; | ||
3066 | |||
3067 | if (adapter->state == ADAPT_DOWN) | ||
3068 | return; | ||
3069 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "LinkEvnt", | ||
3070 | adapter, 0, 0, 0); | ||
3071 | DBG_ERROR("ENTER %s\n", __func__); | ||
3072 | |||
3073 | /* Check the Link Status register. We should have a Link Alarm. */ | ||
3074 | READ_REG(HwRegs->LinkStatus, Value); | ||
3075 | if (Value & LS_LINK_ALARM) { | ||
3076 | /* | ||
3077 | * We got a Link Status alarm. First, pause to let the | ||
3078 | * link state settle (it can bounce a number of times) | ||
3079 | */ | ||
3080 | mdelay(10); | ||
3081 | |||
3082 | /* Now clear the alarm by reading the LASI status register. */ | ||
3083 | /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module */ | ||
3084 | status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, | ||
3085 | /* LASI status register */ | ||
3086 | LASI_STATUS, | ||
3087 | &Value); | ||
3088 | if (status != STATUS_SUCCESS) { | ||
3089 | DBG_ERROR("Error reading LASI Status MDIO register!\n"); | ||
3090 | sxg_link_state(adapter, SXG_LINK_DOWN); | ||
3091 | /* ASSERT(0); */ | ||
3092 | } | ||
3093 | /* | ||
3094 | * We used to assert that the LASI_LS_ALARM bit was set, as | ||
3095 | * it should be. But there appears to be cases during | ||
3096 | * initialization (when the PHY is reset and re-initialized) | ||
3097 | * when we get a link alarm, but the status bit is 0 when we | ||
3098 | * read it. Rather than trying to assure this never happens | ||
3099 | * (and nver being certain), just ignore it. | ||
3100 | |||
3101 | * ASSERT(Value & LASI_STATUS_LS_ALARM); | ||
3102 | */ | ||
3103 | |||
3104 | /* Now get and set the link state */ | ||
3105 | LinkState = sxg_get_link_state(adapter); | ||
3106 | sxg_link_state(adapter, LinkState); | ||
3107 | DBG_ERROR("SXG: Link Alarm occurred. Link is %s\n", | ||
3108 | ((LinkState == SXG_LINK_UP) ? "UP" : "DOWN")); | ||
3109 | if (LinkState == SXG_LINK_UP) { | ||
3110 | netif_carrier_on(netdev); | ||
3111 | netif_tx_start_all_queues(netdev); | ||
3112 | } else { | ||
3113 | netif_tx_stop_all_queues(netdev); | ||
3114 | netif_carrier_off(netdev); | ||
3115 | } | ||
3116 | } else { | ||
3117 | /* | ||
3118 | * XXXTODO - Assuming Link Attention is only being generated | ||
3119 | * for the Link Alarm pin (and not for a XAUI Link Status change) | ||
3120 | * , then it's impossible to get here. Yet we've gotten here | ||
3121 | * twice (under extreme conditions - bouncing the link up and | ||
3122 | * down many times a second). Needs further investigation. | ||
3123 | */ | ||
3124 | DBG_ERROR("SXG: sxg_link_event: Can't get here!\n"); | ||
3125 | DBG_ERROR("SXG: Link Status == 0x%08X.\n", Value); | ||
3126 | /* ASSERT(0); */ | ||
3127 | } | ||
3128 | DBG_ERROR("EXIT %s\n", __func__); | ||
3129 | |||
3130 | } | ||
3131 | |||
3132 | /* | ||
3133 | * sxg_get_link_state - Determine if the link is up or down | ||
3134 | * | ||
3135 | * Arguments - | ||
3136 | * adapter - A pointer to our adapter structure | ||
3137 | * | ||
3138 | * Return | ||
3139 | * Link State | ||
3140 | */ | ||
3141 | static enum SXG_LINK_STATE sxg_get_link_state(struct adapter_t *adapter) | ||
3142 | { | ||
3143 | int status; | ||
3144 | u32 Value; | ||
3145 | |||
3146 | DBG_ERROR("ENTER %s\n", __func__); | ||
3147 | |||
3148 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "GetLink", | ||
3149 | adapter, 0, 0, 0); | ||
3150 | |||
3151 | /* | ||
3152 | * Per the Xenpak spec (and the IEEE 10Gb spec?), the link is up if | ||
3153 | * the following 3 bits (from 3 different MDIO registers) are all true. | ||
3154 | */ | ||
3155 | |||
3156 | /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module */ | ||
3157 | status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, | ||
3158 | /* PMA/PMD Receive Signal Detect register */ | ||
3159 | PHY_PMA_RCV_DET, | ||
3160 | &Value); | ||
3161 | if (status != STATUS_SUCCESS) | ||
3162 | goto bad; | ||
3163 | |||
3164 | /* If PMA/PMD receive signal detect is 0, then the link is down */ | ||
3165 | if (!(Value & PMA_RCV_DETECT)) | ||
3166 | return (SXG_LINK_DOWN); | ||
3167 | |||
3168 | /* MIIM_DEV_PHY_PCS - PHY PCS module */ | ||
3169 | status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PCS, | ||
3170 | /* PCS 10GBASE-R Status 1 register */ | ||
3171 | PHY_PCS_10G_STATUS1, | ||
3172 | &Value); | ||
3173 | if (status != STATUS_SUCCESS) | ||
3174 | goto bad; | ||
3175 | |||
3176 | /* If PCS is not locked to receive blocks, then the link is down */ | ||
3177 | if (!(Value & PCS_10B_BLOCK_LOCK)) | ||
3178 | return (SXG_LINK_DOWN); | ||
3179 | |||
3180 | status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_XS,/* PHY XS module */ | ||
3181 | /* XS Lane Status register */ | ||
3182 | PHY_XS_LANE_STATUS, | ||
3183 | &Value); | ||
3184 | if (status != STATUS_SUCCESS) | ||
3185 | goto bad; | ||
3186 | |||
3187 | /* If XS transmit lanes are not aligned, then the link is down */ | ||
3188 | if (!(Value & XS_LANE_ALIGN)) | ||
3189 | return (SXG_LINK_DOWN); | ||
3190 | |||
3191 | /* All 3 bits are true, so the link is up */ | ||
3192 | DBG_ERROR("EXIT %s\n", __func__); | ||
3193 | |||
3194 | return (SXG_LINK_UP); | ||
3195 | |||
3196 | bad: | ||
3197 | /* An error occurred reading an MDIO register. This shouldn't happen. */ | ||
3198 | DBG_ERROR("Error reading an MDIO register!\n"); | ||
3199 | ASSERT(0); | ||
3200 | return (SXG_LINK_DOWN); | ||
3201 | } | ||
3202 | |||
3203 | static void sxg_indicate_link_state(struct adapter_t *adapter, | ||
3204 | enum SXG_LINK_STATE LinkState) | ||
3205 | { | ||
3206 | if (adapter->LinkState == SXG_LINK_UP) { | ||
3207 | DBG_ERROR("%s: LINK now UP, call netif_start_queue\n", | ||
3208 | __func__); | ||
3209 | netif_start_queue(adapter->netdev); | ||
3210 | } else { | ||
3211 | DBG_ERROR("%s: LINK now DOWN, call netif_stop_queue\n", | ||
3212 | __func__); | ||
3213 | netif_stop_queue(adapter->netdev); | ||
3214 | } | ||
3215 | } | ||
3216 | |||
3217 | /* | ||
3218 | * sxg_change_mtu - Change the Maximum Transfer Unit | ||
3219 | * * @returns 0 on success, negative on failure | ||
3220 | */ | ||
3221 | int sxg_change_mtu (struct net_device *netdev, int new_mtu) | ||
3222 | { | ||
3223 | struct adapter_t *adapter = (struct adapter_t *) netdev_priv(netdev); | ||
3224 | |||
3225 | if (!((new_mtu == SXG_DEFAULT_MTU) || (new_mtu == SXG_JUMBO_MTU))) | ||
3226 | return -EINVAL; | ||
3227 | |||
3228 | if(new_mtu == netdev->mtu) | ||
3229 | return 0; | ||
3230 | |||
3231 | netdev->mtu = new_mtu; | ||
3232 | |||
3233 | if (new_mtu == SXG_JUMBO_MTU) { | ||
3234 | adapter->JumboEnabled = TRUE; | ||
3235 | adapter->FrameSize = JUMBOMAXFRAME; | ||
3236 | adapter->ReceiveBufferSize = SXG_RCV_JUMBO_BUFFER_SIZE; | ||
3237 | } else { | ||
3238 | adapter->JumboEnabled = FALSE; | ||
3239 | adapter->FrameSize = ETHERMAXFRAME; | ||
3240 | adapter->ReceiveBufferSize = SXG_RCV_DATA_BUFFER_SIZE; | ||
3241 | } | ||
3242 | |||
3243 | sxg_entry_halt(netdev); | ||
3244 | sxg_entry_open(netdev); | ||
3245 | return 0; | ||
3246 | } | ||
3247 | |||
3248 | /* | ||
3249 | * sxg_link_state - Set the link state and if necessary, indicate. | ||
3250 | * This routine the central point of processing for all link state changes. | ||
3251 | * Nothing else in the driver should alter the link state or perform | ||
3252 | * link state indications | ||
3253 | * | ||
3254 | * Arguments - | ||
3255 | * adapter - A pointer to our adapter structure | ||
3256 | * LinkState - The link state | ||
3257 | * | ||
3258 | * Return | ||
3259 | * None | ||
3260 | */ | ||
3261 | static void sxg_link_state(struct adapter_t *adapter, | ||
3262 | enum SXG_LINK_STATE LinkState) | ||
3263 | { | ||
3264 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "LnkINDCT", | ||
3265 | adapter, LinkState, adapter->LinkState, adapter->State); | ||
3266 | |||
3267 | DBG_ERROR("ENTER %s\n", __func__); | ||
3268 | |||
3269 | /* | ||
3270 | * Hold the adapter lock during this routine. Maybe move | ||
3271 | * the lock to the caller. | ||
3272 | */ | ||
3273 | /* IMP TODO : Check if we can survive without taking this lock */ | ||
3274 | // spin_lock(&adapter->AdapterLock); | ||
3275 | if (LinkState == adapter->LinkState) { | ||
3276 | /* Nothing changed.. */ | ||
3277 | // spin_unlock(&adapter->AdapterLock); | ||
3278 | DBG_ERROR("EXIT #0 %s. Link status = %d\n", | ||
3279 | __func__, LinkState); | ||
3280 | return; | ||
3281 | } | ||
3282 | /* Save the adapter state */ | ||
3283 | adapter->LinkState = LinkState; | ||
3284 | |||
3285 | /* Drop the lock and indicate link state */ | ||
3286 | // spin_unlock(&adapter->AdapterLock); | ||
3287 | DBG_ERROR("EXIT #1 %s\n", __func__); | ||
3288 | |||
3289 | sxg_indicate_link_state(adapter, LinkState); | ||
3290 | } | ||
3291 | |||
3292 | /* | ||
3293 | * sxg_write_mdio_reg - Write to a register on the MDIO bus | ||
3294 | * | ||
3295 | * Arguments - | ||
3296 | * adapter - A pointer to our adapter structure | ||
3297 | * DevAddr - MDIO device number being addressed | ||
3298 | * RegAddr - register address for the specified MDIO device | ||
3299 | * Value - value to write to the MDIO register | ||
3300 | * | ||
3301 | * Return | ||
3302 | * status | ||
3303 | */ | ||
3304 | static int sxg_write_mdio_reg(struct adapter_t *adapter, | ||
3305 | u32 DevAddr, u32 RegAddr, u32 Value) | ||
3306 | { | ||
3307 | struct sxg_hw_regs *HwRegs = adapter->HwRegs; | ||
3308 | /* Address operation (written to MIIM field reg) */ | ||
3309 | u32 AddrOp; | ||
3310 | /* Write operation (written to MIIM field reg) */ | ||
3311 | u32 WriteOp; | ||
3312 | u32 Cmd;/* Command (written to MIIM command reg) */ | ||
3313 | u32 ValueRead; | ||
3314 | u32 Timeout; | ||
3315 | |||
3316 | /* DBG_ERROR("ENTER %s\n", __func__); */ | ||
3317 | |||
3318 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO", | ||
3319 | adapter, 0, 0, 0); | ||
3320 | |||
3321 | /* Ensure values don't exceed field width */ | ||
3322 | DevAddr &= 0x001F; /* 5-bit field */ | ||
3323 | RegAddr &= 0xFFFF; /* 16-bit field */ | ||
3324 | Value &= 0xFFFF; /* 16-bit field */ | ||
3325 | |||
3326 | /* Set MIIM field register bits for an MIIM address operation */ | ||
3327 | AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) | | ||
3328 | (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) | | ||
3329 | (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) | | ||
3330 | (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr; | ||
3331 | |||
3332 | /* Set MIIM field register bits for an MIIM write operation */ | ||
3333 | WriteOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) | | ||
3334 | (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) | | ||
3335 | (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) | | ||
3336 | (MIIM_OP_WRITE << AXGMAC_AMIIM_FIELD_OP_SHIFT) | Value; | ||
3337 | |||
3338 | /* Set MIIM command register bits to execute an MIIM command */ | ||
3339 | Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION; | ||
3340 | |||
3341 | /* Reset the command register command bit (in case it's not 0) */ | ||
3342 | WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE); | ||
3343 | |||
3344 | /* MIIM write to set the address of the specified MDIO register */ | ||
3345 | WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE); | ||
3346 | |||
3347 | /* Write to MIIM Command Register to execute to address operation */ | ||
3348 | WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE); | ||
3349 | |||
3350 | /* Poll AMIIM Indicator register to wait for completion */ | ||
3351 | Timeout = SXG_LINK_TIMEOUT; | ||
3352 | do { | ||
3353 | udelay(100); /* Timeout in 100us units */ | ||
3354 | READ_REG(HwRegs->MacAmiimIndicator, ValueRead); | ||
3355 | if (--Timeout == 0) { | ||
3356 | return (STATUS_FAILURE); | ||
3357 | } | ||
3358 | } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY); | ||
3359 | |||
3360 | /* Reset the command register command bit */ | ||
3361 | WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE); | ||
3362 | |||
3363 | /* MIIM write to set up an MDIO write operation */ | ||
3364 | WRITE_REG(HwRegs->MacAmiimField, WriteOp, TRUE); | ||
3365 | |||
3366 | /* Write to MIIM Command Register to execute the write operation */ | ||
3367 | WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE); | ||
3368 | |||
3369 | /* Poll AMIIM Indicator register to wait for completion */ | ||
3370 | Timeout = SXG_LINK_TIMEOUT; | ||
3371 | do { | ||
3372 | udelay(100); /* Timeout in 100us units */ | ||
3373 | READ_REG(HwRegs->MacAmiimIndicator, ValueRead); | ||
3374 | if (--Timeout == 0) { | ||
3375 | return (STATUS_FAILURE); | ||
3376 | } | ||
3377 | } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY); | ||
3378 | |||
3379 | /* DBG_ERROR("EXIT %s\n", __func__); */ | ||
3380 | |||
3381 | return (STATUS_SUCCESS); | ||
3382 | } | ||
3383 | |||
3384 | /* | ||
3385 | * sxg_read_mdio_reg - Read a register on the MDIO bus | ||
3386 | * | ||
3387 | * Arguments - | ||
3388 | * adapter - A pointer to our adapter structure | ||
3389 | * DevAddr - MDIO device number being addressed | ||
3390 | * RegAddr - register address for the specified MDIO device | ||
3391 | * pValue - pointer to where to put data read from the MDIO register | ||
3392 | * | ||
3393 | * Return | ||
3394 | * status | ||
3395 | */ | ||
3396 | static int sxg_read_mdio_reg(struct adapter_t *adapter, | ||
3397 | u32 DevAddr, u32 RegAddr, u32 *pValue) | ||
3398 | { | ||
3399 | struct sxg_hw_regs *HwRegs = adapter->HwRegs; | ||
3400 | u32 AddrOp; /* Address operation (written to MIIM field reg) */ | ||
3401 | u32 ReadOp; /* Read operation (written to MIIM field reg) */ | ||
3402 | u32 Cmd; /* Command (written to MIIM command reg) */ | ||
3403 | u32 ValueRead; | ||
3404 | u32 Timeout; | ||
3405 | |||
3406 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO", | ||
3407 | adapter, 0, 0, 0); | ||
3408 | DBG_ERROR("ENTER %s\n", __func__); | ||
3409 | |||
3410 | /* Ensure values don't exceed field width */ | ||
3411 | DevAddr &= 0x001F; /* 5-bit field */ | ||
3412 | RegAddr &= 0xFFFF; /* 16-bit field */ | ||
3413 | |||
3414 | /* Set MIIM field register bits for an MIIM address operation */ | ||
3415 | AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) | | ||
3416 | (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) | | ||
3417 | (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) | | ||
3418 | (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr; | ||
3419 | |||
3420 | /* Set MIIM field register bits for an MIIM read operation */ | ||
3421 | ReadOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) | | ||
3422 | (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) | | ||
3423 | (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) | | ||
3424 | (MIIM_OP_READ << AXGMAC_AMIIM_FIELD_OP_SHIFT); | ||
3425 | |||
3426 | /* Set MIIM command register bits to execute an MIIM command */ | ||
3427 | Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION; | ||
3428 | |||
3429 | /* Reset the command register command bit (in case it's not 0) */ | ||
3430 | WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE); | ||
3431 | |||
3432 | /* MIIM write to set the address of the specified MDIO register */ | ||
3433 | WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE); | ||
3434 | |||
3435 | /* Write to MIIM Command Register to execute to address operation */ | ||
3436 | WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE); | ||
3437 | |||
3438 | /* Poll AMIIM Indicator register to wait for completion */ | ||
3439 | Timeout = SXG_LINK_TIMEOUT; | ||
3440 | do { | ||
3441 | udelay(100); /* Timeout in 100us units */ | ||
3442 | READ_REG(HwRegs->MacAmiimIndicator, ValueRead); | ||
3443 | if (--Timeout == 0) { | ||
3444 | DBG_ERROR("EXIT %s with STATUS_FAILURE 1\n", __func__); | ||
3445 | |||
3446 | return (STATUS_FAILURE); | ||
3447 | } | ||
3448 | } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY); | ||
3449 | |||
3450 | /* Reset the command register command bit */ | ||
3451 | WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE); | ||
3452 | |||
3453 | /* MIIM write to set up an MDIO register read operation */ | ||
3454 | WRITE_REG(HwRegs->MacAmiimField, ReadOp, TRUE); | ||
3455 | |||
3456 | /* Write to MIIM Command Register to execute the read operation */ | ||
3457 | WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE); | ||
3458 | |||
3459 | /* Poll AMIIM Indicator register to wait for completion */ | ||
3460 | Timeout = SXG_LINK_TIMEOUT; | ||
3461 | do { | ||
3462 | udelay(100); /* Timeout in 100us units */ | ||
3463 | READ_REG(HwRegs->MacAmiimIndicator, ValueRead); | ||
3464 | if (--Timeout == 0) { | ||
3465 | DBG_ERROR("EXIT %s with STATUS_FAILURE 2\n", __func__); | ||
3466 | |||
3467 | return (STATUS_FAILURE); | ||
3468 | } | ||
3469 | } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY); | ||
3470 | |||
3471 | /* Read the MDIO register data back from the field register */ | ||
3472 | READ_REG(HwRegs->MacAmiimField, *pValue); | ||
3473 | *pValue &= 0xFFFF; /* data is in the lower 16 bits */ | ||
3474 | |||
3475 | DBG_ERROR("EXIT %s\n", __func__); | ||
3476 | |||
3477 | return (STATUS_SUCCESS); | ||
3478 | } | ||
3479 | |||
3480 | /* | ||
3481 | * Functions to obtain the CRC corresponding to the destination mac address. | ||
3482 | * This is a standard ethernet CRC in that it is a 32-bit, reflected CRC using | ||
3483 | * the polynomial: | ||
3484 | * x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^11 + x^10 + x^8 + x^7 + x^5 | ||
3485 | * + x^4 + x^2 + x^1. | ||
3486 | * | ||
3487 | * After the CRC for the 6 bytes is generated (but before the value is | ||
3488 | * complemented), we must then transpose the value and return bits 30-23. | ||
3489 | */ | ||
3490 | static u32 sxg_crc_table[256];/* Table of CRC's for all possible byte values */ | ||
3491 | static u32 sxg_crc_init; /* Is table initialized */ | ||
3492 | |||
3493 | /* Contruct the CRC32 table */ | ||
3494 | static void sxg_mcast_init_crc32(void) | ||
3495 | { | ||
3496 | u32 c; /* CRC shit reg */ | ||
3497 | u32 e = 0; /* Poly X-or pattern */ | ||
3498 | int i; /* counter */ | ||
3499 | int k; /* byte being shifted into crc */ | ||
3500 | |||
3501 | static int p[] = { 0, 1, 2, 4, 5, 7, 8, 10, 11, 12, 16, 22, 23, 26 }; | ||
3502 | |||
3503 | for (i = 0; i < ARRAY_SIZE(p); i++) { | ||
3504 | e |= 1L << (31 - p[i]); | ||
3505 | } | ||
3506 | |||
3507 | for (i = 1; i < 256; i++) { | ||
3508 | c = i; | ||
3509 | for (k = 8; k; k--) { | ||
3510 | c = c & 1 ? (c >> 1) ^ e : c >> 1; | ||
3511 | } | ||
3512 | sxg_crc_table[i] = c; | ||
3513 | } | ||
3514 | } | ||
3515 | |||
3516 | /* | ||
3517 | * Return the MAC hast as described above. | ||
3518 | */ | ||
3519 | static unsigned char sxg_mcast_get_mac_hash(char *macaddr) | ||
3520 | { | ||
3521 | u32 crc; | ||
3522 | char *p; | ||
3523 | int i; | ||
3524 | unsigned char machash = 0; | ||
3525 | |||
3526 | if (!sxg_crc_init) { | ||
3527 | sxg_mcast_init_crc32(); | ||
3528 | sxg_crc_init = 1; | ||
3529 | } | ||
3530 | |||
3531 | crc = 0xFFFFFFFF; /* Preload shift register, per crc-32 spec */ | ||
3532 | for (i = 0, p = macaddr; i < 6; ++p, ++i) { | ||
3533 | crc = (crc >> 8) ^ sxg_crc_table[(crc ^ *p) & 0xFF]; | ||
3534 | } | ||
3535 | |||
3536 | /* Return bits 1-8, transposed */ | ||
3537 | for (i = 1; i < 9; i++) { | ||
3538 | machash |= (((crc >> i) & 1) << (8 - i)); | ||
3539 | } | ||
3540 | |||
3541 | return (machash); | ||
3542 | } | ||
3543 | |||
3544 | static void sxg_mcast_set_mask(struct adapter_t *adapter) | ||
3545 | { | ||
3546 | struct sxg_ucode_regs *sxg_regs = adapter->UcodeRegs; | ||
3547 | |||
3548 | DBG_ERROR("%s ENTER (%s) MacFilter[%x] mask[%llx]\n", __func__, | ||
3549 | adapter->netdev->name, (unsigned int)adapter->MacFilter, | ||
3550 | adapter->MulticastMask); | ||
3551 | |||
3552 | if (adapter->MacFilter & (MAC_ALLMCAST | MAC_PROMISC)) { | ||
3553 | /* | ||
3554 | * Turn on all multicast addresses. We have to do this for | ||
3555 | * promiscuous mode as well as ALLMCAST mode. It saves the | ||
3556 | * Microcode from having keep state about the MAC configuration | ||
3557 | */ | ||
3558 | /* DBG_ERROR("sxg: %s MacFilter = MAC_ALLMCAST | MAC_PROMISC\n \ | ||
3559 | * SLUT MODE!!!\n",__func__); | ||
3560 | */ | ||
3561 | WRITE_REG(sxg_regs->McastLow, 0xFFFFFFFF, FLUSH); | ||
3562 | WRITE_REG(sxg_regs->McastHigh, 0xFFFFFFFF, FLUSH); | ||
3563 | /* DBG_ERROR("%s (%s) WRITE to slic_regs slic_mcastlow&high \ | ||
3564 | * 0xFFFFFFFF\n",__func__, adapter->netdev->name); | ||
3565 | */ | ||
3566 | |||
3567 | } else { | ||
3568 | /* | ||
3569 | * Commit our multicast mast to the SLIC by writing to the | ||
3570 | * multicast address mask registers | ||
3571 | */ | ||
3572 | DBG_ERROR("%s (%s) WRITE mcastlow[%lx] mcasthigh[%lx]\n", | ||
3573 | __func__, adapter->netdev->name, | ||
3574 | ((ulong) (adapter->MulticastMask & 0xFFFFFFFF)), | ||
3575 | ((ulong) | ||
3576 | ((adapter->MulticastMask >> 32) & 0xFFFFFFFF))); | ||
3577 | |||
3578 | WRITE_REG(sxg_regs->McastLow, | ||
3579 | (u32) (adapter->MulticastMask & 0xFFFFFFFF), FLUSH); | ||
3580 | WRITE_REG(sxg_regs->McastHigh, | ||
3581 | (u32) ((adapter-> | ||
3582 | MulticastMask >> 32) & 0xFFFFFFFF), FLUSH); | ||
3583 | } | ||
3584 | } | ||
3585 | |||
3586 | static void sxg_mcast_set_bit(struct adapter_t *adapter, char *address) | ||
3587 | { | ||
3588 | unsigned char crcpoly; | ||
3589 | |||
3590 | /* Get the CRC polynomial for the mac address */ | ||
3591 | crcpoly = sxg_mcast_get_mac_hash(address); | ||
3592 | |||
3593 | /* | ||
3594 | * We only have space on the SLIC for 64 entries. Lop | ||
3595 | * off the top two bits. (2^6 = 64) | ||
3596 | */ | ||
3597 | crcpoly &= 0x3F; | ||
3598 | |||
3599 | /* OR in the new bit into our 64 bit mask. */ | ||
3600 | adapter->MulticastMask |= (u64) 1 << crcpoly; | ||
3601 | } | ||
3602 | |||
3603 | /* | ||
3604 | * Function takes MAC addresses from dev_mc_list and generates the Mask | ||
3605 | */ | ||
3606 | |||
3607 | static void sxg_set_mcast_addr(struct adapter_t *adapter) | ||
3608 | { | ||
3609 | struct dev_mc_list *mclist; | ||
3610 | struct net_device *dev = adapter->netdev; | ||
3611 | int i; | ||
3612 | |||
3613 | if (adapter->MacFilter & (MAC_ALLMCAST | MAC_MCAST)) { | ||
3614 | for (i = 0, mclist = dev->mc_list; i < dev->mc_count; | ||
3615 | i++, mclist = mclist->next) { | ||
3616 | sxg_mcast_set_bit(adapter,mclist->da_addr); | ||
3617 | } | ||
3618 | } | ||
3619 | sxg_mcast_set_mask(adapter); | ||
3620 | } | ||
3621 | |||
3622 | static void sxg_mcast_set_list(struct net_device *dev) | ||
3623 | { | ||
3624 | struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev); | ||
3625 | |||
3626 | ASSERT(adapter); | ||
3627 | if (dev->flags & IFF_PROMISC) | ||
3628 | adapter->MacFilter |= MAC_PROMISC; | ||
3629 | if (dev->flags & IFF_MULTICAST) | ||
3630 | adapter->MacFilter |= MAC_MCAST; | ||
3631 | if (dev->flags & IFF_ALLMULTI) | ||
3632 | adapter->MacFilter |= MAC_ALLMCAST; | ||
3633 | |||
3634 | //XXX handle other flags as well | ||
3635 | sxg_set_mcast_addr(adapter); | ||
3636 | } | ||
3637 | |||
3638 | void sxg_free_sgl_buffers(struct adapter_t *adapter) | ||
3639 | { | ||
3640 | struct list_entry *ple; | ||
3641 | struct sxg_scatter_gather *Sgl; | ||
3642 | |||
3643 | while(!(IsListEmpty(&adapter->AllSglBuffers))) { | ||
3644 | ple = RemoveHeadList(&adapter->AllSglBuffers); | ||
3645 | Sgl = container_of(ple, struct sxg_scatter_gather, AllList); | ||
3646 | kfree(Sgl); | ||
3647 | adapter->AllSglBufferCount--; | ||
3648 | } | ||
3649 | } | ||
3650 | |||
3651 | void sxg_free_rcvblocks(struct adapter_t *adapter) | ||
3652 | { | ||
3653 | u32 i; | ||
3654 | void *temp_RcvBlock; | ||
3655 | struct list_entry *ple; | ||
3656 | struct sxg_rcv_block_hdr *RcvBlockHdr; | ||
3657 | struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr; | ||
3658 | ASSERT((adapter->state == SXG_STATE_INITIALIZING) || | ||
3659 | (adapter->state == SXG_STATE_HALTING)); | ||
3660 | while(!(IsListEmpty(&adapter->AllRcvBlocks))) { | ||
3661 | |||
3662 | ple = RemoveHeadList(&adapter->AllRcvBlocks); | ||
3663 | RcvBlockHdr = container_of(ple, struct sxg_rcv_block_hdr, AllList); | ||
3664 | |||
3665 | if(RcvBlockHdr->VirtualAddress) { | ||
3666 | temp_RcvBlock = RcvBlockHdr->VirtualAddress; | ||
3667 | |||
3668 | for(i=0; i< SXG_RCV_DESCRIPTORS_PER_BLOCK; | ||
3669 | i++, temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) { | ||
3670 | RcvDataBufferHdr = | ||
3671 | (struct sxg_rcv_data_buffer_hdr *)temp_RcvBlock; | ||
3672 | SXG_FREE_RCV_PACKET(RcvDataBufferHdr); | ||
3673 | } | ||
3674 | } | ||
3675 | |||
3676 | pci_free_consistent(adapter->pcidev, | ||
3677 | SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE), | ||
3678 | RcvBlockHdr->VirtualAddress, | ||
3679 | RcvBlockHdr->PhysicalAddress); | ||
3680 | adapter->AllRcvBlockCount--; | ||
3681 | } | ||
3682 | ASSERT(adapter->AllRcvBlockCount == 0); | ||
3683 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFrRBlk", | ||
3684 | adapter, 0, 0, 0); | ||
3685 | } | ||
3686 | void sxg_free_mcast_addrs(struct adapter_t *adapter) | ||
3687 | { | ||
3688 | struct sxg_multicast_address *address; | ||
3689 | while(adapter->MulticastAddrs) { | ||
3690 | address = adapter->MulticastAddrs; | ||
3691 | adapter->MulticastAddrs = address->Next; | ||
3692 | kfree(address); | ||
3693 | } | ||
3694 | |||
3695 | adapter->MulticastMask= 0; | ||
3696 | } | ||
3697 | |||
3698 | void sxg_unmap_resources(struct adapter_t *adapter) | ||
3699 | { | ||
3700 | if(adapter->HwRegs) { | ||
3701 | iounmap((void *)adapter->HwRegs); | ||
3702 | } | ||
3703 | if(adapter->UcodeRegs) { | ||
3704 | iounmap((void *)adapter->UcodeRegs); | ||
3705 | } | ||
3706 | |||
3707 | ASSERT(adapter->AllRcvBlockCount == 0); | ||
3708 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFrRBlk", | ||
3709 | adapter, 0, 0, 0); | ||
3710 | } | ||
3711 | |||
3712 | |||
3713 | |||
3714 | /* | ||
3715 | * sxg_free_resources - Free everything allocated in SxgAllocateResources | ||
3716 | * | ||
3717 | * Arguments - | ||
3718 | * adapter - A pointer to our adapter structure | ||
3719 | * | ||
3720 | * Return | ||
3721 | * none | ||
3722 | */ | ||
3723 | void sxg_free_resources(struct adapter_t *adapter) | ||
3724 | { | ||
3725 | u32 RssIds, IsrCount; | ||
3726 | RssIds = SXG_RSS_CPU_COUNT(adapter); | ||
3727 | IsrCount = adapter->msi_enabled ? RssIds : 1; | ||
3728 | |||
3729 | if (adapter->BasicAllocations == FALSE) { | ||
3730 | /* | ||
3731 | * No allocations have been made, including spinlocks, | ||
3732 | * or listhead initializations. Return. | ||
3733 | */ | ||
3734 | return; | ||
3735 | } | ||
3736 | |||
3737 | if (!(IsListEmpty(&adapter->AllRcvBlocks))) { | ||
3738 | sxg_free_rcvblocks(adapter); | ||
3739 | } | ||
3740 | if (!(IsListEmpty(&adapter->AllSglBuffers))) { | ||
3741 | sxg_free_sgl_buffers(adapter); | ||
3742 | } | ||
3743 | |||
3744 | if (adapter->XmtRingZeroIndex) { | ||
3745 | pci_free_consistent(adapter->pcidev, | ||
3746 | sizeof(u32), | ||
3747 | adapter->XmtRingZeroIndex, | ||
3748 | adapter->PXmtRingZeroIndex); | ||
3749 | } | ||
3750 | if (adapter->Isr) { | ||
3751 | pci_free_consistent(adapter->pcidev, | ||
3752 | sizeof(u32) * IsrCount, | ||
3753 | adapter->Isr, adapter->PIsr); | ||
3754 | } | ||
3755 | |||
3756 | if (adapter->EventRings) { | ||
3757 | pci_free_consistent(adapter->pcidev, | ||
3758 | sizeof(struct sxg_event_ring) * RssIds, | ||
3759 | adapter->EventRings, adapter->PEventRings); | ||
3760 | } | ||
3761 | if (adapter->RcvRings) { | ||
3762 | pci_free_consistent(adapter->pcidev, | ||
3763 | sizeof(struct sxg_rcv_ring) * 1, | ||
3764 | adapter->RcvRings, | ||
3765 | adapter->PRcvRings); | ||
3766 | adapter->RcvRings = NULL; | ||
3767 | } | ||
3768 | |||
3769 | if(adapter->XmtRings) { | ||
3770 | pci_free_consistent(adapter->pcidev, | ||
3771 | sizeof(struct sxg_xmt_ring) * 1, | ||
3772 | adapter->XmtRings, | ||
3773 | adapter->PXmtRings); | ||
3774 | adapter->XmtRings = NULL; | ||
3775 | } | ||
3776 | |||
3777 | if (adapter->ucode_stats) { | ||
3778 | pci_unmap_single(adapter->pcidev, | ||
3779 | sizeof(struct sxg_ucode_stats), | ||
3780 | adapter->pucode_stats, PCI_DMA_FROMDEVICE); | ||
3781 | adapter->ucode_stats = NULL; | ||
3782 | } | ||
3783 | |||
3784 | |||
3785 | /* Unmap register spaces */ | ||
3786 | sxg_unmap_resources(adapter); | ||
3787 | |||
3788 | sxg_free_mcast_addrs(adapter); | ||
3789 | |||
3790 | adapter->BasicAllocations = FALSE; | ||
3791 | |||
3792 | } | ||
3793 | |||
3794 | /* | ||
3795 | * sxg_allocate_complete - | ||
3796 | * | ||
3797 | * This routine is called when a memory allocation has completed. | ||
3798 | * | ||
3799 | * Arguments - | ||
3800 | * struct adapter_t * - Our adapter structure | ||
3801 | * VirtualAddress - Memory virtual address | ||
3802 | * PhysicalAddress - Memory physical address | ||
3803 | * Length - Length of memory allocated (or 0) | ||
3804 | * Context - The type of buffer allocated | ||
3805 | * | ||
3806 | * Return | ||
3807 | * None. | ||
3808 | */ | ||
3809 | static int sxg_allocate_complete(struct adapter_t *adapter, | ||
3810 | void *VirtualAddress, | ||
3811 | dma_addr_t PhysicalAddress, | ||
3812 | u32 Length, enum sxg_buffer_type Context) | ||
3813 | { | ||
3814 | int status = 0; | ||
3815 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocCmp", | ||
3816 | adapter, VirtualAddress, Length, Context); | ||
3817 | ASSERT(atomic_read(&adapter->pending_allocations)); | ||
3818 | atomic_dec(&adapter->pending_allocations); | ||
3819 | |||
3820 | switch (Context) { | ||
3821 | |||
3822 | case SXG_BUFFER_TYPE_RCV: | ||
3823 | status = sxg_allocate_rcvblock_complete(adapter, | ||
3824 | VirtualAddress, | ||
3825 | PhysicalAddress, Length); | ||
3826 | break; | ||
3827 | case SXG_BUFFER_TYPE_SGL: | ||
3828 | sxg_allocate_sgl_buffer_complete(adapter, (struct sxg_scatter_gather *) | ||
3829 | VirtualAddress, | ||
3830 | PhysicalAddress, Length); | ||
3831 | break; | ||
3832 | } | ||
3833 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlocCmp", | ||
3834 | adapter, VirtualAddress, Length, Context); | ||
3835 | |||
3836 | return status; | ||
3837 | } | ||
3838 | |||
3839 | /* | ||
3840 | * sxg_allocate_buffer_memory - Shared memory allocation routine used for | ||
3841 | * synchronous and asynchronous buffer allocations | ||
3842 | * | ||
3843 | * Arguments - | ||
3844 | * adapter - A pointer to our adapter structure | ||
3845 | * Size - block size to allocate | ||
3846 | * BufferType - Type of buffer to allocate | ||
3847 | * | ||
3848 | * Return | ||
3849 | * int | ||
3850 | */ | ||
3851 | static int sxg_allocate_buffer_memory(struct adapter_t *adapter, | ||
3852 | u32 Size, enum sxg_buffer_type BufferType) | ||
3853 | { | ||
3854 | int status; | ||
3855 | void *Buffer; | ||
3856 | dma_addr_t pBuffer; | ||
3857 | |||
3858 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocMem", | ||
3859 | adapter, Size, BufferType, 0); | ||
3860 | /* | ||
3861 | * Grab the adapter lock and check the state. If we're in anything other | ||
3862 | * than INITIALIZING or RUNNING state, fail. This is to prevent | ||
3863 | * allocations in an improper driver state | ||
3864 | */ | ||
3865 | |||
3866 | atomic_inc(&adapter->pending_allocations); | ||
3867 | |||
3868 | if(BufferType != SXG_BUFFER_TYPE_SGL) | ||
3869 | Buffer = pci_alloc_consistent(adapter->pcidev, Size, &pBuffer); | ||
3870 | else { | ||
3871 | Buffer = kzalloc(Size, GFP_ATOMIC); | ||
3872 | pBuffer = (dma_addr_t)NULL; | ||
3873 | } | ||
3874 | if (Buffer == NULL) { | ||
3875 | /* | ||
3876 | * Decrement the AllocationsPending count while holding | ||
3877 | * the lock. Pause processing relies on this | ||
3878 | */ | ||
3879 | atomic_dec(&adapter->pending_allocations); | ||
3880 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlcMemF1", | ||
3881 | adapter, Size, BufferType, 0); | ||
3882 | return (STATUS_RESOURCES); | ||
3883 | } | ||
3884 | status = sxg_allocate_complete(adapter, Buffer, pBuffer, Size, BufferType); | ||
3885 | |||
3886 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlocMem", | ||
3887 | adapter, Size, BufferType, status); | ||
3888 | return status; | ||
3889 | } | ||
3890 | |||
3891 | /* | ||
3892 | * sxg_allocate_rcvblock_complete - Complete a receive descriptor | ||
3893 | * block allocation | ||
3894 | * | ||
3895 | * Arguments - | ||
3896 | * adapter - A pointer to our adapter structure | ||
3897 | * RcvBlock - receive block virtual address | ||
3898 | * PhysicalAddress - Physical address | ||
3899 | * Length - Memory length | ||
3900 | * | ||
3901 | * Return | ||
3902 | */ | ||
3903 | static int sxg_allocate_rcvblock_complete(struct adapter_t *adapter, | ||
3904 | void *RcvBlock, | ||
3905 | dma_addr_t PhysicalAddress, | ||
3906 | u32 Length) | ||
3907 | { | ||
3908 | u32 i; | ||
3909 | u32 BufferSize = adapter->ReceiveBufferSize; | ||
3910 | u64 Paddr; | ||
3911 | void *temp_RcvBlock; | ||
3912 | struct sxg_rcv_block_hdr *RcvBlockHdr; | ||
3913 | struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr; | ||
3914 | struct sxg_rcv_descriptor_block *RcvDescriptorBlock; | ||
3915 | struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr; | ||
3916 | |||
3917 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlRcvBlk", | ||
3918 | adapter, RcvBlock, Length, 0); | ||
3919 | if (RcvBlock == NULL) { | ||
3920 | goto fail; | ||
3921 | } | ||
3922 | memset(RcvBlock, 0, Length); | ||
3923 | ASSERT((BufferSize == SXG_RCV_DATA_BUFFER_SIZE) || | ||
3924 | (BufferSize == SXG_RCV_JUMBO_BUFFER_SIZE)); | ||
3925 | ASSERT(Length == SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE)); | ||
3926 | /* | ||
3927 | * First, initialize the contained pool of receive data buffers. | ||
3928 | * This initialization requires NBL/NB/MDL allocations, if any of them | ||
3929 | * fail, free the block and return without queueing the shared memory | ||
3930 | */ | ||
3931 | //RcvDataBuffer = RcvBlock; | ||
3932 | temp_RcvBlock = RcvBlock; | ||
3933 | for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK; | ||
3934 | i++, temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) { | ||
3935 | RcvDataBufferHdr = (struct sxg_rcv_data_buffer_hdr *) | ||
3936 | temp_RcvBlock; | ||
3937 | /* For FREE macro assertion */ | ||
3938 | RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM; | ||
3939 | SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr, BufferSize); | ||
3940 | if (RcvDataBufferHdr->SxgDumbRcvPacket == NULL) | ||
3941 | goto fail; | ||
3942 | |||
3943 | } | ||
3944 | |||
3945 | /* | ||
3946 | * Place this entire block of memory on the AllRcvBlocks queue so it | ||
3947 | * can be free later | ||
3948 | */ | ||
3949 | |||
3950 | RcvBlockHdr = (struct sxg_rcv_block_hdr *) ((unsigned char *)RcvBlock + | ||
3951 | SXG_RCV_BLOCK_HDR_OFFSET(SXG_RCV_DATA_HDR_SIZE)); | ||
3952 | RcvBlockHdr->VirtualAddress = RcvBlock; | ||
3953 | RcvBlockHdr->PhysicalAddress = PhysicalAddress; | ||
3954 | spin_lock(&adapter->RcvQLock); | ||
3955 | adapter->AllRcvBlockCount++; | ||
3956 | InsertTailList(&adapter->AllRcvBlocks, &RcvBlockHdr->AllList); | ||
3957 | spin_unlock(&adapter->RcvQLock); | ||
3958 | |||
3959 | /* Now free the contained receive data buffers that we | ||
3960 | * initialized above */ | ||
3961 | temp_RcvBlock = RcvBlock; | ||
3962 | for (i = 0, Paddr = PhysicalAddress; | ||
3963 | i < SXG_RCV_DESCRIPTORS_PER_BLOCK; | ||
3964 | i++, Paddr += SXG_RCV_DATA_HDR_SIZE, | ||
3965 | temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) { | ||
3966 | RcvDataBufferHdr = | ||
3967 | (struct sxg_rcv_data_buffer_hdr *)temp_RcvBlock; | ||
3968 | spin_lock(&adapter->RcvQLock); | ||
3969 | SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr); | ||
3970 | spin_unlock(&adapter->RcvQLock); | ||
3971 | } | ||
3972 | |||
3973 | /* Locate the descriptor block and put it on a separate free queue */ | ||
3974 | RcvDescriptorBlock = | ||
3975 | (struct sxg_rcv_descriptor_block *) ((unsigned char *)RcvBlock + | ||
3976 | SXG_RCV_DESCRIPTOR_BLOCK_OFFSET | ||
3977 | (SXG_RCV_DATA_HDR_SIZE)); | ||
3978 | RcvDescriptorBlockHdr = | ||
3979 | (struct sxg_rcv_descriptor_block_hdr *) ((unsigned char *)RcvBlock + | ||
3980 | SXG_RCV_DESCRIPTOR_BLOCK_HDR_OFFSET | ||
3981 | (SXG_RCV_DATA_HDR_SIZE)); | ||
3982 | RcvDescriptorBlockHdr->VirtualAddress = RcvDescriptorBlock; | ||
3983 | RcvDescriptorBlockHdr->PhysicalAddress = Paddr; | ||
3984 | spin_lock(&adapter->RcvQLock); | ||
3985 | SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter, RcvDescriptorBlockHdr); | ||
3986 | spin_unlock(&adapter->RcvQLock); | ||
3987 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlRBlk", | ||
3988 | adapter, RcvBlock, Length, 0); | ||
3989 | return STATUS_SUCCESS; | ||
3990 | fail: | ||
3991 | /* Free any allocated resources */ | ||
3992 | if (RcvBlock) { | ||
3993 | temp_RcvBlock = RcvBlock; | ||
3994 | for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK; | ||
3995 | i++, temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) { | ||
3996 | RcvDataBufferHdr = | ||
3997 | (struct sxg_rcv_data_buffer_hdr *)temp_RcvBlock; | ||
3998 | SXG_FREE_RCV_PACKET(RcvDataBufferHdr); | ||
3999 | } | ||
4000 | pci_free_consistent(adapter->pcidev, | ||
4001 | Length, RcvBlock, PhysicalAddress); | ||
4002 | } | ||
4003 | DBG_ERROR("%s: OUT OF RESOURCES\n", __func__); | ||
4004 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "RcvAFail", | ||
4005 | adapter, adapter->FreeRcvBufferCount, | ||
4006 | adapter->FreeRcvBlockCount, adapter->AllRcvBlockCount); | ||
4007 | adapter->Stats.NoMem++; | ||
4008 | /* As allocation failed, free all previously allocated blocks..*/ | ||
4009 | //sxg_free_rcvblocks(adapter); | ||
4010 | |||
4011 | return STATUS_RESOURCES; | ||
4012 | } | ||
4013 | |||
4014 | /* | ||
4015 | * sxg_allocate_sgl_buffer_complete - Complete a SGL buffer allocation | ||
4016 | * | ||
4017 | * Arguments - | ||
4018 | * adapter - A pointer to our adapter structure | ||
4019 | * SxgSgl - struct sxg_scatter_gather buffer | ||
4020 | * PhysicalAddress - Physical address | ||
4021 | * Length - Memory length | ||
4022 | * | ||
4023 | * Return | ||
4024 | */ | ||
4025 | static void sxg_allocate_sgl_buffer_complete(struct adapter_t *adapter, | ||
4026 | struct sxg_scatter_gather *SxgSgl, | ||
4027 | dma_addr_t PhysicalAddress, | ||
4028 | u32 Length) | ||
4029 | { | ||
4030 | unsigned long sgl_flags; | ||
4031 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlSglCmp", | ||
4032 | adapter, SxgSgl, Length, 0); | ||
4033 | spin_lock_irqsave(&adapter->SglQLock, sgl_flags); | ||
4034 | adapter->AllSglBufferCount++; | ||
4035 | /* PhysicalAddress; */ | ||
4036 | SxgSgl->PhysicalAddress = PhysicalAddress; | ||
4037 | /* Initialize backpointer once */ | ||
4038 | SxgSgl->adapter = adapter; | ||
4039 | InsertTailList(&adapter->AllSglBuffers, &SxgSgl->AllList); | ||
4040 | spin_unlock_irqrestore(&adapter->SglQLock, sgl_flags); | ||
4041 | SxgSgl->State = SXG_BUFFER_BUSY; | ||
4042 | SXG_FREE_SGL_BUFFER(adapter, SxgSgl, NULL); | ||
4043 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlSgl", | ||
4044 | adapter, SxgSgl, Length, 0); | ||
4045 | } | ||
4046 | |||
4047 | |||
4048 | static int sxg_adapter_set_hwaddr(struct adapter_t *adapter) | ||
4049 | { | ||
4050 | /* | ||
4051 | * DBG_ERROR ("%s ENTER card->config_set[%x] port[%d] physport[%d] \ | ||
4052 | * funct#[%d]\n", __func__, card->config_set, | ||
4053 | * adapter->port, adapter->physport, adapter->functionnumber); | ||
4054 | * | ||
4055 | * sxg_dbg_macaddrs(adapter); | ||
4056 | */ | ||
4057 | /* DBG_ERROR ("%s AFTER copying from config.macinfo into currmacaddr\n", | ||
4058 | * __func__); | ||
4059 | */ | ||
4060 | |||
4061 | /* sxg_dbg_macaddrs(adapter); */ | ||
4062 | |||
4063 | struct net_device * dev = adapter->netdev; | ||
4064 | if(!dev) | ||
4065 | { | ||
4066 | printk("sxg: Dev is Null\n"); | ||
4067 | } | ||
4068 | |||
4069 | DBG_ERROR("%s ENTER (%s)\n", __func__, adapter->netdev->name); | ||
4070 | |||
4071 | if (netif_running(dev)) { | ||
4072 | return -EBUSY; | ||
4073 | } | ||
4074 | if (!adapter) { | ||
4075 | return -EBUSY; | ||
4076 | } | ||
4077 | |||
4078 | if (!(adapter->currmacaddr[0] || | ||
4079 | adapter->currmacaddr[1] || | ||
4080 | adapter->currmacaddr[2] || | ||
4081 | adapter->currmacaddr[3] || | ||
4082 | adapter->currmacaddr[4] || adapter->currmacaddr[5])) { | ||
4083 | memcpy(adapter->currmacaddr, adapter->macaddr, 6); | ||
4084 | } | ||
4085 | if (adapter->netdev) { | ||
4086 | memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6); | ||
4087 | memcpy(adapter->netdev->perm_addr, adapter->currmacaddr, 6); | ||
4088 | } | ||
4089 | /* DBG_ERROR ("%s EXIT port %d\n", __func__, adapter->port); */ | ||
4090 | sxg_dbg_macaddrs(adapter); | ||
4091 | |||
4092 | return 0; | ||
4093 | } | ||
4094 | |||
4095 | #if XXXTODO | ||
4096 | static int sxg_mac_set_address(struct net_device *dev, void *ptr) | ||
4097 | { | ||
4098 | struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev); | ||
4099 | struct sockaddr *addr = ptr; | ||
4100 | |||
4101 | DBG_ERROR("%s ENTER (%s)\n", __func__, adapter->netdev->name); | ||
4102 | |||
4103 | if (netif_running(dev)) { | ||
4104 | return -EBUSY; | ||
4105 | } | ||
4106 | if (!adapter) { | ||
4107 | return -EBUSY; | ||
4108 | } | ||
4109 | DBG_ERROR("sxg: %s (%s) curr %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n", | ||
4110 | __func__, adapter->netdev->name, adapter->currmacaddr[0], | ||
4111 | adapter->currmacaddr[1], adapter->currmacaddr[2], | ||
4112 | adapter->currmacaddr[3], adapter->currmacaddr[4], | ||
4113 | adapter->currmacaddr[5]); | ||
4114 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | ||
4115 | memcpy(adapter->currmacaddr, addr->sa_data, dev->addr_len); | ||
4116 | DBG_ERROR("sxg: %s (%s) new %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n", | ||
4117 | __func__, adapter->netdev->name, adapter->currmacaddr[0], | ||
4118 | adapter->currmacaddr[1], adapter->currmacaddr[2], | ||
4119 | adapter->currmacaddr[3], adapter->currmacaddr[4], | ||
4120 | adapter->currmacaddr[5]); | ||
4121 | |||
4122 | sxg_config_set(adapter, TRUE); | ||
4123 | return 0; | ||
4124 | } | ||
4125 | #endif | ||
4126 | |||
4127 | /* | ||
4128 | * SXG DRIVER FUNCTIONS (below) | ||
4129 | * | ||
4130 | * sxg_initialize_adapter - Initialize adapter | ||
4131 | * | ||
4132 | * Arguments - | ||
4133 | * adapter - A pointer to our adapter structure | ||
4134 | * | ||
4135 | * Return - int | ||
4136 | */ | ||
4137 | static int sxg_initialize_adapter(struct adapter_t *adapter) | ||
4138 | { | ||
4139 | u32 RssIds, IsrCount; | ||
4140 | u32 i; | ||
4141 | int status; | ||
4142 | int sxg_rcv_ring_size = SXG_RCV_RING_SIZE; | ||
4143 | |||
4144 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitAdpt", | ||
4145 | adapter, 0, 0, 0); | ||
4146 | |||
4147 | RssIds = 1; /* XXXTODO SXG_RSS_CPU_COUNT(adapter); */ | ||
4148 | IsrCount = adapter->msi_enabled ? RssIds : 1; | ||
4149 | |||
4150 | /* | ||
4151 | * Sanity check SXG_UCODE_REGS structure definition to | ||
4152 | * make sure the length is correct | ||
4153 | */ | ||
4154 | ASSERT(sizeof(struct sxg_ucode_regs) == SXG_REGISTER_SIZE_PER_CPU); | ||
4155 | |||
4156 | /* Disable interrupts */ | ||
4157 | SXG_DISABLE_ALL_INTERRUPTS(adapter); | ||
4158 | |||
4159 | /* Set MTU */ | ||
4160 | ASSERT((adapter->FrameSize == ETHERMAXFRAME) || | ||
4161 | (adapter->FrameSize == JUMBOMAXFRAME)); | ||
4162 | WRITE_REG(adapter->UcodeRegs[0].LinkMtu, adapter->FrameSize, TRUE); | ||
4163 | |||
4164 | /* Set event ring base address and size */ | ||
4165 | WRITE_REG64(adapter, | ||
4166 | adapter->UcodeRegs[0].EventBase, adapter->PEventRings, 0); | ||
4167 | WRITE_REG(adapter->UcodeRegs[0].EventSize, EVENT_RING_SIZE, TRUE); | ||
4168 | |||
4169 | /* Per-ISR initialization */ | ||
4170 | for (i = 0; i < IsrCount; i++) { | ||
4171 | u64 Addr; | ||
4172 | /* Set interrupt status pointer */ | ||
4173 | Addr = adapter->PIsr + (i * sizeof(u32)); | ||
4174 | WRITE_REG64(adapter, adapter->UcodeRegs[i].Isp, Addr, i); | ||
4175 | } | ||
4176 | |||
4177 | /* XMT ring zero index */ | ||
4178 | WRITE_REG64(adapter, | ||
4179 | adapter->UcodeRegs[0].SPSendIndex, | ||
4180 | adapter->PXmtRingZeroIndex, 0); | ||
4181 | |||
4182 | /* Per-RSS initialization */ | ||
4183 | for (i = 0; i < RssIds; i++) { | ||
4184 | /* Release all event ring entries to the Microcode */ | ||
4185 | WRITE_REG(adapter->UcodeRegs[i].EventRelease, EVENT_RING_SIZE, | ||
4186 | TRUE); | ||
4187 | } | ||
4188 | |||
4189 | /* Transmit ring base and size */ | ||
4190 | WRITE_REG64(adapter, | ||
4191 | adapter->UcodeRegs[0].XmtBase, adapter->PXmtRings, 0); | ||
4192 | WRITE_REG(adapter->UcodeRegs[0].XmtSize, SXG_XMT_RING_SIZE, TRUE); | ||
4193 | |||
4194 | /* Receive ring base and size */ | ||
4195 | WRITE_REG64(adapter, | ||
4196 | adapter->UcodeRegs[0].RcvBase, adapter->PRcvRings, 0); | ||
4197 | if (adapter->JumboEnabled == TRUE) | ||
4198 | sxg_rcv_ring_size = SXG_JUMBO_RCV_RING_SIZE; | ||
4199 | WRITE_REG(adapter->UcodeRegs[0].RcvSize, sxg_rcv_ring_size, TRUE); | ||
4200 | |||
4201 | /* Populate the card with receive buffers */ | ||
4202 | sxg_stock_rcv_buffers(adapter); | ||
4203 | |||
4204 | /* | ||
4205 | * Initialize checksum offload capabilities. At the moment we always | ||
4206 | * enable IP and TCP receive checksums on the card. Depending on the | ||
4207 | * checksum configuration specified by the user, we can choose to | ||
4208 | * report or ignore the checksum information provided by the card. | ||
4209 | */ | ||
4210 | WRITE_REG(adapter->UcodeRegs[0].ReceiveChecksum, | ||
4211 | SXG_RCV_TCP_CSUM_ENABLED | SXG_RCV_IP_CSUM_ENABLED, TRUE); | ||
4212 | |||
4213 | adapter->flags |= (SXG_RCV_TCP_CSUM_ENABLED | SXG_RCV_IP_CSUM_ENABLED ); | ||
4214 | |||
4215 | /* Initialize the MAC, XAUI */ | ||
4216 | DBG_ERROR("sxg: %s ENTER sxg_initialize_link\n", __func__); | ||
4217 | status = sxg_initialize_link(adapter); | ||
4218 | DBG_ERROR("sxg: %s EXIT sxg_initialize_link status[%x]\n", __func__, | ||
4219 | status); | ||
4220 | if (status != STATUS_SUCCESS) { | ||
4221 | return (status); | ||
4222 | } | ||
4223 | /* | ||
4224 | * Initialize Dead to FALSE. | ||
4225 | * SlicCheckForHang or SlicDumpThread will take it from here. | ||
4226 | */ | ||
4227 | adapter->Dead = FALSE; | ||
4228 | adapter->PingOutstanding = FALSE; | ||
4229 | adapter->XmtFcEnabled = TRUE; | ||
4230 | adapter->RcvFcEnabled = TRUE; | ||
4231 | |||
4232 | adapter->State = SXG_STATE_RUNNING; | ||
4233 | |||
4234 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XInit", | ||
4235 | adapter, 0, 0, 0); | ||
4236 | return (STATUS_SUCCESS); | ||
4237 | } | ||
4238 | |||
4239 | /* | ||
4240 | * sxg_fill_descriptor_block - Populate a descriptor block and give it to | ||
4241 | * the card. The caller should hold the RcvQLock | ||
4242 | * | ||
4243 | * Arguments - | ||
4244 | * adapter - A pointer to our adapter structure | ||
4245 | * RcvDescriptorBlockHdr - Descriptor block to fill | ||
4246 | * | ||
4247 | * Return | ||
4248 | * status | ||
4249 | */ | ||
4250 | static int sxg_fill_descriptor_block(struct adapter_t *adapter, | ||
4251 | struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr) | ||
4252 | { | ||
4253 | u32 i; | ||
4254 | struct sxg_ring_info *RcvRingInfo = &adapter->RcvRingZeroInfo; | ||
4255 | struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr; | ||
4256 | struct sxg_rcv_descriptor_block *RcvDescriptorBlock; | ||
4257 | struct sxg_cmd *RingDescriptorCmd; | ||
4258 | struct sxg_rcv_ring *RingZero = &adapter->RcvRings[0]; | ||
4259 | |||
4260 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "FilBlk", | ||
4261 | adapter, adapter->RcvBuffersOnCard, | ||
4262 | adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount); | ||
4263 | |||
4264 | ASSERT(RcvDescriptorBlockHdr); | ||
4265 | |||
4266 | /* | ||
4267 | * If we don't have the resources to fill the descriptor block, | ||
4268 | * return failure | ||
4269 | */ | ||
4270 | if ((adapter->FreeRcvBufferCount < SXG_RCV_DESCRIPTORS_PER_BLOCK) || | ||
4271 | SXG_RING_FULL(RcvRingInfo)) { | ||
4272 | adapter->Stats.NoMem++; | ||
4273 | return (STATUS_FAILURE); | ||
4274 | } | ||
4275 | /* Get a ring descriptor command */ | ||
4276 | SXG_GET_CMD(RingZero, | ||
4277 | RcvRingInfo, RingDescriptorCmd, RcvDescriptorBlockHdr); | ||
4278 | ASSERT(RingDescriptorCmd); | ||
4279 | RcvDescriptorBlockHdr->State = SXG_BUFFER_ONCARD; | ||
4280 | RcvDescriptorBlock = (struct sxg_rcv_descriptor_block *) | ||
4281 | RcvDescriptorBlockHdr->VirtualAddress; | ||
4282 | |||
4283 | /* Fill in the descriptor block */ | ||
4284 | for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK; i++) { | ||
4285 | SXG_GET_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr); | ||
4286 | ASSERT(RcvDataBufferHdr); | ||
4287 | // ASSERT(RcvDataBufferHdr->SxgDumbRcvPacket); | ||
4288 | if (!RcvDataBufferHdr->SxgDumbRcvPacket) { | ||
4289 | SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr, | ||
4290 | adapter->ReceiveBufferSize); | ||
4291 | if(RcvDataBufferHdr->skb) | ||
4292 | RcvDataBufferHdr->SxgDumbRcvPacket = | ||
4293 | RcvDataBufferHdr->skb; | ||
4294 | else | ||
4295 | goto no_memory; | ||
4296 | } | ||
4297 | SXG_REINIATIALIZE_PACKET(RcvDataBufferHdr->SxgDumbRcvPacket); | ||
4298 | RcvDataBufferHdr->State = SXG_BUFFER_ONCARD; | ||
4299 | RcvDescriptorBlock->Descriptors[i].VirtualAddress = | ||
4300 | (void *)RcvDataBufferHdr; | ||
4301 | |||
4302 | RcvDescriptorBlock->Descriptors[i].PhysicalAddress = | ||
4303 | RcvDataBufferHdr->PhysicalAddress; | ||
4304 | } | ||
4305 | /* Add the descriptor block to receive descriptor ring 0 */ | ||
4306 | RingDescriptorCmd->Sgl = RcvDescriptorBlockHdr->PhysicalAddress; | ||
4307 | |||
4308 | /* | ||
4309 | * RcvBuffersOnCard is not protected via the receive lock (see | ||
4310 | * sxg_process_event_queue) We don't want to grap a lock every time a | ||
4311 | * buffer is returned to us, so we use atomic interlocked functions | ||
4312 | * instead. | ||
4313 | */ | ||
4314 | adapter->RcvBuffersOnCard += SXG_RCV_DESCRIPTORS_PER_BLOCK; | ||
4315 | |||
4316 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DscBlk", | ||
4317 | RcvDescriptorBlockHdr, | ||
4318 | RingDescriptorCmd, RcvRingInfo->Head, RcvRingInfo->Tail); | ||
4319 | |||
4320 | WRITE_REG(adapter->UcodeRegs[0].RcvCmd, 1, true); | ||
4321 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFilBlk", | ||
4322 | adapter, adapter->RcvBuffersOnCard, | ||
4323 | adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount); | ||
4324 | return (STATUS_SUCCESS); | ||
4325 | no_memory: | ||
4326 | for (; i >= 0 ; i--) { | ||
4327 | if (RcvDescriptorBlock->Descriptors[i].VirtualAddress) { | ||
4328 | RcvDataBufferHdr = (struct sxg_rcv_data_buffer_hdr *) | ||
4329 | RcvDescriptorBlock->Descriptors[i]. | ||
4330 | VirtualAddress; | ||
4331 | RcvDescriptorBlock->Descriptors[i].PhysicalAddress = | ||
4332 | (dma_addr_t)NULL; | ||
4333 | RcvDescriptorBlock->Descriptors[i].VirtualAddress=NULL; | ||
4334 | } | ||
4335 | SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr); | ||
4336 | } | ||
4337 | RcvDescriptorBlockHdr->State = SXG_BUFFER_FREE; | ||
4338 | SXG_RETURN_CMD(RingZero, RcvRingInfo, RingDescriptorCmd, | ||
4339 | RcvDescriptorBlockHdr); | ||
4340 | |||
4341 | return (-ENOMEM); | ||
4342 | } | ||
4343 | |||
4344 | /* | ||
4345 | * sxg_stock_rcv_buffers - Stock the card with receive buffers | ||
4346 | * | ||
4347 | * Arguments - | ||
4348 | * adapter - A pointer to our adapter structure | ||
4349 | * | ||
4350 | * Return | ||
4351 | * None | ||
4352 | */ | ||
4353 | static void sxg_stock_rcv_buffers(struct adapter_t *adapter) | ||
4354 | { | ||
4355 | struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr; | ||
4356 | int sxg_rcv_data_buffers = SXG_RCV_DATA_BUFFERS; | ||
4357 | int sxg_min_rcv_data_buffers = SXG_MIN_RCV_DATA_BUFFERS; | ||
4358 | |||
4359 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "StockBuf", | ||
4360 | adapter, adapter->RcvBuffersOnCard, | ||
4361 | adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount); | ||
4362 | /* | ||
4363 | * First, see if we've got less than our minimum threshold of | ||
4364 | * receive buffers, there isn't an allocation in progress, and | ||
4365 | * we haven't exceeded our maximum.. get another block of buffers | ||
4366 | * None of this needs to be SMP safe. It's round numbers. | ||
4367 | */ | ||
4368 | if (adapter->JumboEnabled == TRUE) | ||
4369 | sxg_min_rcv_data_buffers = SXG_MIN_JUMBO_RCV_DATA_BUFFERS; | ||
4370 | if ((adapter->FreeRcvBufferCount < sxg_min_rcv_data_buffers) && | ||
4371 | (adapter->AllRcvBlockCount < SXG_MAX_RCV_BLOCKS) && | ||
4372 | (atomic_read(&adapter->pending_allocations) == 0)) { | ||
4373 | sxg_allocate_buffer_memory(adapter, | ||
4374 | SXG_RCV_BLOCK_SIZE | ||
4375 | (SXG_RCV_DATA_HDR_SIZE), | ||
4376 | SXG_BUFFER_TYPE_RCV); | ||
4377 | } | ||
4378 | /* Now grab the RcvQLock lock and proceed */ | ||
4379 | spin_lock(&adapter->RcvQLock); | ||
4380 | if (adapter->JumboEnabled) | ||
4381 | sxg_rcv_data_buffers = SXG_JUMBO_RCV_DATA_BUFFERS; | ||
4382 | while (adapter->RcvBuffersOnCard < sxg_rcv_data_buffers) { | ||
4383 | struct list_entry *_ple; | ||
4384 | |||
4385 | /* Get a descriptor block */ | ||
4386 | RcvDescriptorBlockHdr = NULL; | ||
4387 | if (adapter->FreeRcvBlockCount) { | ||
4388 | _ple = RemoveHeadList(&adapter->FreeRcvBlocks); | ||
4389 | RcvDescriptorBlockHdr = | ||
4390 | container_of(_ple, struct sxg_rcv_descriptor_block_hdr, | ||
4391 | FreeList); | ||
4392 | adapter->FreeRcvBlockCount--; | ||
4393 | RcvDescriptorBlockHdr->State = SXG_BUFFER_BUSY; | ||
4394 | } | ||
4395 | |||
4396 | if (RcvDescriptorBlockHdr == NULL) { | ||
4397 | /* Bail out.. */ | ||
4398 | adapter->Stats.NoMem++; | ||
4399 | break; | ||
4400 | } | ||
4401 | /* Fill in the descriptor block and give it to the card */ | ||
4402 | if (sxg_fill_descriptor_block(adapter, RcvDescriptorBlockHdr) == | ||
4403 | STATUS_FAILURE) { | ||
4404 | /* Free the descriptor block */ | ||
4405 | SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter, | ||
4406 | RcvDescriptorBlockHdr); | ||
4407 | break; | ||
4408 | } | ||
4409 | } | ||
4410 | spin_unlock(&adapter->RcvQLock); | ||
4411 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFilBlks", | ||
4412 | adapter, adapter->RcvBuffersOnCard, | ||
4413 | adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount); | ||
4414 | } | ||
4415 | |||
4416 | /* | ||
4417 | * sxg_complete_descriptor_blocks - Return descriptor blocks that have been | ||
4418 | * completed by the microcode | ||
4419 | * | ||
4420 | * Arguments - | ||
4421 | * adapter - A pointer to our adapter structure | ||
4422 | * Index - Where the microcode is up to | ||
4423 | * | ||
4424 | * Return | ||
4425 | * None | ||
4426 | */ | ||
4427 | static void sxg_complete_descriptor_blocks(struct adapter_t *adapter, | ||
4428 | unsigned char Index) | ||
4429 | { | ||
4430 | struct sxg_rcv_ring *RingZero = &adapter->RcvRings[0]; | ||
4431 | struct sxg_ring_info *RcvRingInfo = &adapter->RcvRingZeroInfo; | ||
4432 | struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr; | ||
4433 | struct sxg_cmd *RingDescriptorCmd; | ||
4434 | |||
4435 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpRBlks", | ||
4436 | adapter, Index, RcvRingInfo->Head, RcvRingInfo->Tail); | ||
4437 | |||
4438 | /* Now grab the RcvQLock lock and proceed */ | ||
4439 | spin_lock(&adapter->RcvQLock); | ||
4440 | ASSERT(Index != RcvRingInfo->Tail); | ||
4441 | while (sxg_ring_get_forward_diff(RcvRingInfo, Index, | ||
4442 | RcvRingInfo->Tail) > 3) { | ||
4443 | /* | ||
4444 | * Locate the current Cmd (ring descriptor entry), and | ||
4445 | * associated receive descriptor block, and advance | ||
4446 | * the tail | ||
4447 | */ | ||
4448 | SXG_RETURN_CMD(RingZero, | ||
4449 | RcvRingInfo, | ||
4450 | RingDescriptorCmd, RcvDescriptorBlockHdr); | ||
4451 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpRBlk", | ||
4452 | RcvRingInfo->Head, RcvRingInfo->Tail, | ||
4453 | RingDescriptorCmd, RcvDescriptorBlockHdr); | ||
4454 | |||
4455 | /* Clear the SGL field */ | ||
4456 | RingDescriptorCmd->Sgl = 0; | ||
4457 | /* | ||
4458 | * Attempt to refill it and hand it right back to the | ||
4459 | * card. If we fail to refill it, free the descriptor block | ||
4460 | * header. The card will be restocked later via the | ||
4461 | * RcvBuffersOnCard test | ||
4462 | */ | ||
4463 | if (sxg_fill_descriptor_block(adapter, | ||
4464 | RcvDescriptorBlockHdr) == STATUS_FAILURE) | ||
4465 | SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter, | ||
4466 | RcvDescriptorBlockHdr); | ||
4467 | } | ||
4468 | spin_unlock(&adapter->RcvQLock); | ||
4469 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XCRBlks", | ||
4470 | adapter, Index, RcvRingInfo->Head, RcvRingInfo->Tail); | ||
4471 | } | ||
4472 | |||
4473 | /* | ||
4474 | * Read the statistics which the card has been maintaining. | ||
4475 | */ | ||
4476 | void sxg_collect_statistics(struct adapter_t *adapter) | ||
4477 | { | ||
4478 | if(adapter->ucode_stats) | ||
4479 | WRITE_REG64(adapter, adapter->UcodeRegs[0].GetUcodeStats, | ||
4480 | adapter->pucode_stats, 0); | ||
4481 | adapter->stats.rx_fifo_errors = adapter->ucode_stats->ERDrops; | ||
4482 | adapter->stats.rx_over_errors = adapter->ucode_stats->NBDrops; | ||
4483 | adapter->stats.tx_fifo_errors = adapter->ucode_stats->XDrops; | ||
4484 | } | ||
4485 | |||
4486 | static struct net_device_stats *sxg_get_stats(struct net_device * dev) | ||
4487 | { | ||
4488 | struct adapter_t *adapter = netdev_priv(dev); | ||
4489 | |||
4490 | sxg_collect_statistics(adapter); | ||
4491 | return (&adapter->stats); | ||
4492 | } | ||
4493 | |||
4494 | static void sxg_watchdog(unsigned long data) | ||
4495 | { | ||
4496 | struct adapter_t *adapter = (struct adapter_t *) data; | ||
4497 | |||
4498 | if (adapter->state != ADAPT_DOWN) { | ||
4499 | sxg_link_event(adapter); | ||
4500 | /* Reset the timer */ | ||
4501 | mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ)); | ||
4502 | } | ||
4503 | } | ||
4504 | |||
4505 | static void sxg_update_link_status (struct work_struct *work) | ||
4506 | { | ||
4507 | struct adapter_t *adapter = (struct adapter_t *)container_of | ||
4508 | (work, struct adapter_t, update_link_status); | ||
4509 | if (likely(adapter->link_status_changed)) { | ||
4510 | sxg_link_event(adapter); | ||
4511 | adapter->link_status_changed = 0; | ||
4512 | } | ||
4513 | } | ||
4514 | |||
4515 | static struct pci_driver sxg_driver = { | ||
4516 | .name = sxg_driver_name, | ||
4517 | .id_table = sxg_pci_tbl, | ||
4518 | .probe = sxg_entry_probe, | ||
4519 | .remove = __devexit_p(sxg_entry_remove), | ||
4520 | #if SXG_POWER_MANAGEMENT_ENABLED | ||
4521 | .suspend = sxgpm_suspend, | ||
4522 | .resume = sxgpm_resume, | ||
4523 | #endif | ||
4524 | /* .shutdown = slic_shutdown, MOOK_INVESTIGATE */ | ||
4525 | }; | ||
4526 | |||
4527 | static int __init sxg_module_init(void) | ||
4528 | { | ||
4529 | sxg_init_driver(); | ||
4530 | |||
4531 | if (debug >= 0) | ||
4532 | sxg_debug = debug; | ||
4533 | |||
4534 | return pci_register_driver(&sxg_driver); | ||
4535 | } | ||
4536 | |||
4537 | static void __exit sxg_module_cleanup(void) | ||
4538 | { | ||
4539 | pci_unregister_driver(&sxg_driver); | ||
4540 | } | ||
4541 | |||
4542 | module_init(sxg_module_init); | ||
4543 | module_exit(sxg_module_cleanup); | ||
diff --git a/drivers/staging/sxg/sxg.h b/drivers/staging/sxg/sxg.h deleted file mode 100644 index 110096a5c52f..000000000000 --- a/drivers/staging/sxg/sxg.h +++ /dev/null | |||
@@ -1,787 +0,0 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright © 2000-2008 Alacritech, Inc. All rights reserved. | ||
4 | * | ||
5 | * $Id: sxg.h,v 1.3 2008/07/24 17:25:08 chris Exp $ | ||
6 | * | ||
7 | * Redistribution and use in source and binary forms, with or without | ||
8 | * modification, are permitted provided that the following conditions | ||
9 | * are met: | ||
10 | * | ||
11 | * 1. Redistributions of source code must retain the above copyright | ||
12 | * notice, this list of conditions and the following disclaimer. | ||
13 | * 2. Redistributions in binary form must reproduce the above | ||
14 | * copyright notice, this list of conditions and the following | ||
15 | * disclaimer in the documentation and/or other materials provided | ||
16 | * with the distribution. | ||
17 | * | ||
18 | * THIS SOFTWARE IS PROVIDED BY ALACRITECH, INC. ``AS IS'' AND ANY | ||
19 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
20 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | ||
21 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ALACRITECH, INC. OR | ||
22 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF | ||
25 | * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
26 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, | ||
27 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT | ||
28 | * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | ||
29 | * SUCH DAMAGE. | ||
30 | * | ||
31 | * The views and conclusions contained in the software and documentation | ||
32 | * are those of the authors and should not be interpreted as representing | ||
33 | * official policies, either expressed or implied, of Alacritech, Inc. | ||
34 | * | ||
35 | **************************************************************************/ | ||
36 | |||
37 | /* | ||
38 | * FILENAME: sxg.h | ||
39 | * | ||
40 | * This is the base set of header definitions for the SXG driver. | ||
41 | */ | ||
42 | #ifndef __SXG_DRIVER_H__ | ||
43 | #define __SXG_DRIVER_H__ | ||
44 | |||
45 | #define SLIC_DUMP_ENABLED 0 | ||
46 | |||
47 | #define SXG_DRV_NAME "sxg" /* TBD: This might be removed eventually */ | ||
48 | #define SXG_DRV_VERSION "1.0.1" | ||
49 | |||
50 | extern char sxg_driver_name[]; | ||
51 | |||
52 | #define SXG_NETDEV_WEIGHT 64 | ||
53 | |||
54 | /* | ||
55 | * struct sxg_stats - Probably move these to someplace where | ||
56 | * the slicstat (sxgstat?) program can get them. | ||
57 | */ | ||
58 | struct sxg_stats { | ||
59 | /* Xmt */ | ||
60 | u64 DumbXmtUcastPkts; /* directed packets */ | ||
61 | u64 DumbXmtMcastPkts; /* Multicast packets */ | ||
62 | u64 DumbXmtBcastPkts; /* OID_GEN_BROADCAST_FRAMES_RCV */ | ||
63 | u64 DumbXmtUcastBytes; /* OID_GEN_DIRECTED_BYTES_XMIT */ | ||
64 | u64 DumbXmtMcastBytes; /* OID_GEN_MULTICAST_BYTES_XMIT */ | ||
65 | u64 DumbXmtBcastBytes; /* OID_GEN_BROADCAST_BYTES_XMIT */ | ||
66 | u64 XmtQLen; /* OID_GEN_TRANSMIT_QUEUE_LENGTH */ | ||
67 | u64 XmtZeroFull; /* Transmit ring zero full */ | ||
68 | /* Rcv */ | ||
69 | u64 DumbRcvUcastBytes; /* OID_GEN_DIRECTED_BYTES_RCV */ | ||
70 | u64 DumbRcvMcastBytes; /* OID_GEN_MULTICAST_BYTES_RCV */ | ||
71 | u64 DumbRcvBcastBytes; /* OID_GEN_BROADCAST_BYTES_RCV */ | ||
72 | u64 DumbRcvUcastPkts; /* directed packets */ | ||
73 | u64 DumbRcvMcastPkts; /* Multicast packets */ | ||
74 | u64 DumbRcvBcastPkts; /* OID_GEN_BROADCAST_FRAMES_RCV */ | ||
75 | u64 PdqFull; /* Processed Data Queue Full */ | ||
76 | u64 EventRingFull; /* Event ring full */ | ||
77 | /* Verbose stats */ | ||
78 | u64 NoSglBuf; /* SGL buffer allocation failure */ | ||
79 | u64 NoMem; /* Memory allocation failure */ | ||
80 | u64 NumInts; /* Interrupts */ | ||
81 | u64 FalseInts; /* Interrupt with ISR == 0 */ | ||
82 | /* Sahara receive status */ | ||
83 | u64 TransportCsum; /* SXG_RCV_STATUS_TRANSPORT_CSUM */ | ||
84 | u64 TransportUflow; /* SXG_RCV_STATUS_TRANSPORT_UFLOW */ | ||
85 | u64 TransportHdrLen; /* SXG_RCV_STATUS_TRANSPORT_HDRLEN */ | ||
86 | u64 NetworkCsum; /* SXG_RCV_STATUS_NETWORK_CSUM: */ | ||
87 | u64 NetworkUflow; /* SXG_RCV_STATUS_NETWORK_UFLOW: */ | ||
88 | u64 NetworkHdrLen; /* SXG_RCV_STATUS_NETWORK_HDRLEN: */ | ||
89 | u64 Parity; /* SXG_RCV_STATUS_PARITY */ | ||
90 | u64 LinkParity; /* SXG_RCV_STATUS_LINK_PARITY: */ | ||
91 | u64 LinkEarly; /* SXG_RCV_STATUS_LINK_EARLY: */ | ||
92 | u64 LinkBufOflow; /* SXG_RCV_STATUS_LINK_BUFOFLOW: */ | ||
93 | u64 LinkCode; /* SXG_RCV_STATUS_LINK_CODE: */ | ||
94 | u64 LinkDribble; /* SXG_RCV_STATUS_LINK_DRIBBLE: */ | ||
95 | u64 LinkCrc; /* SXG_RCV_STATUS_LINK_CRC: */ | ||
96 | u64 LinkOflow; /* SXG_RCV_STATUS_LINK_OFLOW: */ | ||
97 | u64 LinkUflow; /* SXG_RCV_STATUS_LINK_UFLOW: */ | ||
98 | }; | ||
99 | |||
100 | |||
101 | /* DUMB-NIC Send path definitions */ | ||
102 | |||
103 | #define SXG_COMPLETE_DUMB_SEND(_pAdapt, _skb, _phys_addr, _size) { \ | ||
104 | ASSERT(_skb); \ | ||
105 | pci_unmap_single(_pAdapt->pcidev, _size, _phys_addr, PCI_DMA_TODEVICE); \ | ||
106 | dev_kfree_skb_irq(_skb); \ | ||
107 | } | ||
108 | |||
109 | #define SXG_DROP_DUMB_SEND(_pAdapt, _skb) { \ | ||
110 | ASSERT(_skb); \ | ||
111 | } | ||
112 | |||
113 | /* | ||
114 | * Locate current receive header buffer location. Use this | ||
115 | * instead of RcvDataHdr->VirtualAddress since the data | ||
116 | * may have been offset by SXG_ADVANCE_MDL_OFFSET | ||
117 | */ | ||
118 | #define SXG_RECEIVE_DATA_LOCATION(_RcvDataHdr) (_RcvDataHdr)->skb->data | ||
119 | |||
120 | /* Dumb-NIC receive processing */ | ||
121 | /* Define an SXG_PACKET as an NDIS_PACKET */ | ||
122 | #define PSXG_PACKET struct sk_buff * | ||
123 | /* Indications array size */ | ||
124 | #define SXG_RCV_ARRAYSIZE 64 | ||
125 | |||
126 | #define SXG_ALLOCATE_RCV_PACKET(_pAdapt, _RcvDataBufferHdr, BufferSize) {\ | ||
127 | struct sk_buff * skb; \ | ||
128 | skb = netdev_alloc_skb(_pAdapt->netdev, BufferSize); \ | ||
129 | if (skb) { \ | ||
130 | (_RcvDataBufferHdr)->skb = skb; \ | ||
131 | skb->next = NULL; \ | ||
132 | _RcvDataBufferHdr->PhysicalAddress = pci_map_single(adapter->pcidev,\ | ||
133 | _RcvDataBufferHdr->skb->data, BufferSize, PCI_DMA_FROMDEVICE); \ | ||
134 | if (SXG_INVALID_SGL(_RcvDataBufferHdr->PhysicalAddress,BufferSize)) \ | ||
135 | printk(KERN_EMERG "SXG_ALLOCATE_RCV_PACKET: RCV packet" \ | ||
136 | "non-64k boundary aligned\n"); \ | ||
137 | } else { \ | ||
138 | (_RcvDataBufferHdr)->skb = NULL; \ | ||
139 | } \ | ||
140 | } | ||
141 | |||
142 | #define SXG_FREE_RCV_PACKET(_RcvDataBufferHdr) { \ | ||
143 | if((_RcvDataBufferHdr)->skb) { \ | ||
144 | dev_kfree_skb((_RcvDataBufferHdr)->skb); \ | ||
145 | } \ | ||
146 | } | ||
147 | |||
148 | /* | ||
149 | * Macro to add a NDIS_PACKET to an indication array | ||
150 | * If we fill up our array of packet pointers, then indicate this | ||
151 | * block up now and start on a new one. | ||
152 | */ | ||
153 | #define SXG_ADD_RCV_PACKET(_pAdapt, _Packet, _PrevPacket, _IndicationList, \ | ||
154 | _NumPackets) { \ | ||
155 | (_IndicationList)[_NumPackets] = (_Packet); \ | ||
156 | (_NumPackets)++; \ | ||
157 | if((_NumPackets) == SXG_RCV_ARRAYSIZE) { \ | ||
158 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "IndicRcv", \ | ||
159 | (_NumPackets), 0, 0, 0); \ | ||
160 | netif_rx((_IndicationList),(_NumPackets)); \ | ||
161 | (_NumPackets) = 0; \ | ||
162 | } \ | ||
163 | } | ||
164 | |||
165 | #define SXG_INDICATE_PACKETS(_pAdapt, _IndicationList, _NumPackets) { \ | ||
166 | if(_NumPackets) { \ | ||
167 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "IndicRcv", \ | ||
168 | (_NumPackets), 0, 0, 0); \ | ||
169 | netif_rx((_IndicationList),(_NumPackets)); \ | ||
170 | (_NumPackets) = 0; \ | ||
171 | } \ | ||
172 | } | ||
173 | |||
174 | #define SXG_REINIATIALIZE_PACKET(_Packet) \ | ||
175 | {} /*_NdisReinitializePacket(_Packet)*/ | ||
176 | /* this is not necessary with an skb */ | ||
177 | |||
178 | /* Definitions to initialize Dumb-nic Receive NBLs */ | ||
179 | #define SXG_RCV_PACKET_BUFFER_HDR(_Packet) (((struct sxg_rcv_nbl_reserved *)\ | ||
180 | ((_Packet)->MiniportReservedEx))->RcvDataBufferHdr) | ||
181 | |||
182 | #define SXG_RCV_SET_CHECKSUM_INFO(_Packet, _Cpi) \ | ||
183 | NDIS_PER_PACKET_INFO_FROM_PACKET((_Packet), \ | ||
184 | TcpIpChecksumPacketInfo) = (PVOID)(_Cpi) | ||
185 | |||
186 | #define SXG_RCV_SET_TOEPLITZ(_Packet, _Toeplitz, _Type, _Function) { \ | ||
187 | NDIS_PACKET_SET_HASH_VALUE((_Packet), (_Toeplitz)); \ | ||
188 | NDIS_PACKET_SET_HASH_TYPE((_Packet), (_Type)); \ | ||
189 | NDIS_PACKET_SET_HASH_FUNCTION((_Packet), (_Function)); \ | ||
190 | } | ||
191 | |||
192 | #define SXG_RCV_SET_VLAN_INFO(_Packet, _VlanId, _Priority) { \ | ||
193 | NDIS_PACKET_8021Q_INFO _Packet8021qInfo; \ | ||
194 | _Packet8021qInfo.TagHeader.VlanId = (_VlanId); \ | ||
195 | _Packet8021qInfo.TagHeader.UserPriority = (_Priority); \ | ||
196 | NDIS_PER_PACKET_INFO_FROM_PACKET((_Packet), Ieee8021QNetBufferListInfo) = \ | ||
197 | _Packet8021qInfo.Value; \ | ||
198 | } | ||
199 | |||
200 | #define SXG_ADJUST_RCV_PACKET(_Packet, _RcvDataBufferHdr, _Event) { \ | ||
201 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbRcv", \ | ||
202 | (_RcvDataBufferHdr), (_Packet), \ | ||
203 | (_Event)->Status, 0); \ | ||
204 | /* ASSERT((_Event)->Length <= (_RcvDataBufferHdr)->Size); */ \ | ||
205 | skb_put(Packet, (_Event)->Length); \ | ||
206 | } | ||
207 | |||
208 | /* | ||
209 | * Macros to free a receive data buffer and receive data descriptor block | ||
210 | * NOTE - Lock must be held with RCV macros | ||
211 | */ | ||
212 | #define SXG_GET_RCV_DATA_BUFFER(_pAdapt, _Hdr) { \ | ||
213 | struct list_entry *_ple; \ | ||
214 | _Hdr = NULL; \ | ||
215 | if((_pAdapt)->FreeRcvBufferCount) { \ | ||
216 | ASSERT(!(IsListEmpty(&(_pAdapt)->FreeRcvBuffers))); \ | ||
217 | _ple = RemoveHeadList(&(_pAdapt)->FreeRcvBuffers); \ | ||
218 | (_Hdr) = container_of(_ple, struct sxg_rcv_data_buffer_hdr, \ | ||
219 | FreeList); \ | ||
220 | (_pAdapt)->FreeRcvBufferCount--; \ | ||
221 | ASSERT((_Hdr)->State == SXG_BUFFER_FREE); \ | ||
222 | } \ | ||
223 | } | ||
224 | |||
225 | #define SXG_FREE_RCV_DATA_BUFFER(_pAdapt, _Hdr) { \ | ||
226 | SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RtnDHdr", \ | ||
227 | (_Hdr), (_pAdapt)->FreeRcvBufferCount, \ | ||
228 | (_Hdr)->State, 0/*(_Hdr)->VirtualAddress*/); \ | ||
229 | /* SXG_RESTORE_MDL_OFFSET(_Hdr); */ \ | ||
230 | (_pAdapt)->FreeRcvBufferCount++; \ | ||
231 | ASSERT(((_pAdapt)->AllRcvBlockCount * SXG_RCV_DESCRIPTORS_PER_BLOCK) \ | ||
232 | >= (_pAdapt)->FreeRcvBufferCount); \ | ||
233 | ASSERT((_Hdr)->State != SXG_BUFFER_FREE); \ | ||
234 | (_Hdr)->State = SXG_BUFFER_FREE; \ | ||
235 | InsertTailList(&(_pAdapt)->FreeRcvBuffers, &((_Hdr)->FreeList)); \ | ||
236 | } | ||
237 | |||
238 | #define SXG_FREE_RCV_DESCRIPTOR_BLOCK(_pAdapt, _Hdr) { \ | ||
239 | ASSERT((_Hdr)->State != SXG_BUFFER_FREE); \ | ||
240 | (_Hdr)->State = SXG_BUFFER_FREE; \ | ||
241 | (_pAdapt)->FreeRcvBlockCount++; \ | ||
242 | ASSERT((_pAdapt)->AllRcvBlockCount >= (_pAdapt)->FreeRcvBlockCount); \ | ||
243 | InsertTailList(&(_pAdapt)->FreeRcvBlocks, &(_Hdr)->FreeList); \ | ||
244 | } | ||
245 | |||
246 | /* SGL macros */ | ||
247 | #define SXG_FREE_SGL_BUFFER(_pAdapt, _Sgl, _NB) { \ | ||
248 | spin_lock_irqsave(&(_pAdapt)->SglQLock, sgl_flags); \ | ||
249 | (_pAdapt)->FreeSglBufferCount++; \ | ||
250 | ASSERT((_pAdapt)->AllSglBufferCount >= (_pAdapt)->FreeSglBufferCount); \ | ||
251 | ASSERT(!((_Sgl)->State & SXG_BUFFER_FREE)); \ | ||
252 | (_Sgl)->State = SXG_BUFFER_FREE; \ | ||
253 | InsertTailList(&(_pAdapt)->FreeSglBuffers, &(_Sgl)->FreeList); \ | ||
254 | spin_unlock_irqrestore(&(_pAdapt)->SglQLock, sgl_flags); \ | ||
255 | } | ||
256 | |||
257 | /* | ||
258 | * Get an SGL buffer from the free queue. The first part of this macro | ||
259 | * attempts to keep ahead of buffer depletion by allocating more when | ||
260 | * we hit a minimum threshold. Note that we don't grab the lock | ||
261 | * until after that. We're dealing with round numbers here, so we don't need to, | ||
262 | * and not grabbing it avoids a possible double-trip. | ||
263 | */ | ||
264 | #define SXG_GET_SGL_BUFFER(_pAdapt, _Sgl, _irq) { \ | ||
265 | struct list_entry *_ple; \ | ||
266 | if ((_pAdapt->FreeSglBufferCount < SXG_MIN_SGL_BUFFERS) && \ | ||
267 | (_pAdapt->AllSglBufferCount < SXG_MAX_SGL_BUFFERS) && \ | ||
268 | (atomic_read(&_pAdapt->pending_allocations) == 0)) { \ | ||
269 | sxg_allocate_buffer_memory(_pAdapt, \ | ||
270 | (sizeof(struct sxg_scatter_gather) + SXG_SGL_BUF_SIZE),\ | ||
271 | SXG_BUFFER_TYPE_SGL); \ | ||
272 | } \ | ||
273 | _Sgl = NULL; \ | ||
274 | if(!_irq) \ | ||
275 | spin_lock_irqsave(&(_pAdapt)->SglQLock, sgl_flags); \ | ||
276 | else \ | ||
277 | spin_lock_irqsave(&(_pAdapt)->SglQLock, sgl_flags); \ | ||
278 | if((_pAdapt)->FreeSglBufferCount) { \ | ||
279 | ASSERT(!(IsListEmpty(&(_pAdapt)->FreeSglBuffers))); \ | ||
280 | _ple = RemoveHeadList(&(_pAdapt)->FreeSglBuffers); \ | ||
281 | (_Sgl) = container_of(_ple, struct sxg_scatter_gather, \ | ||
282 | FreeList); \ | ||
283 | (_pAdapt)->FreeSglBufferCount--; \ | ||
284 | ASSERT((_Sgl)->State == SXG_BUFFER_FREE); \ | ||
285 | (_Sgl)->State = SXG_BUFFER_BUSY; \ | ||
286 | (_Sgl)->pSgl = NULL; \ | ||
287 | } \ | ||
288 | if(!_irq) \ | ||
289 | spin_unlock_irqrestore(&(_pAdapt)->SglQLock, sgl_flags);\ | ||
290 | else \ | ||
291 | spin_unlock_irqrestore(&(_pAdapt)->SglQLock, sgl_flags);\ | ||
292 | } | ||
293 | |||
294 | /* | ||
295 | * struct sxg_multicast_address | ||
296 | * Linked list of multicast addresses. | ||
297 | */ | ||
298 | struct sxg_multicast_address { | ||
299 | unsigned char Address[6]; | ||
300 | struct sxg_multicast_address *Next; | ||
301 | }; | ||
302 | |||
303 | /* | ||
304 | * Structure to maintain chimney send and receive buffer queues. | ||
305 | * This structure maintains NET_BUFFER_LIST queues that are | ||
306 | * given to us via the Chimney MiniportTcpOffloadSend and | ||
307 | * MiniportTcpOffloadReceive routines. This structure DOES NOT | ||
308 | * manage our data buffer queue | ||
309 | */ | ||
310 | struct sxg_buffer_queue { | ||
311 | u32 Type; /* Slow or fast - See below */ | ||
312 | u32 Direction; /* Xmt or Rcv */ | ||
313 | u32 Bytes; /* Byte count */ | ||
314 | u32 * Head; /* Send queue head */ | ||
315 | u32 * Tail; /* Send queue tail */ | ||
316 | /* PNET_BUFFER_LIST NextNBL;*/ /* Short cut - next NBL */ | ||
317 | /* PNET_BUFFER NextNB; */ /* Short cut - next NB */ | ||
318 | }; | ||
319 | |||
320 | #define SXG_SLOW_SEND_BUFFER 0 | ||
321 | #define SXG_FAST_SEND_BUFFER 1 | ||
322 | #define SXG_RECEIVE_BUFFER 2 | ||
323 | |||
324 | #define SXG_INIT_BUFFER(_Buffer, _Type) { \ | ||
325 | (_Buffer)->Type = (_Type); \ | ||
326 | if((_Type) == SXG_RECEIVE_BUFFER) { \ | ||
327 | (_Buffer)->Direction = 0; \ | ||
328 | } else { \ | ||
329 | (_Buffer)->Direction = NDIS_SG_LIST_WRITE_TO_DEVICE; \ | ||
330 | } \ | ||
331 | (_Buffer)->Bytes = 0; \ | ||
332 | (_Buffer)->Head = NULL; \ | ||
333 | (_Buffer)->Tail = NULL; \ | ||
334 | } | ||
335 | |||
336 | |||
337 | #define SXG_RSS_CPU_COUNT(_pAdapt) \ | ||
338 | ((_pAdapt)->RssEnabled ? NR_CPUS : 1) | ||
339 | |||
340 | /* DRIVER and ADAPTER structures */ | ||
341 | |||
342 | /* | ||
343 | * Adapter states - These states closely match the adapter states | ||
344 | * documented in the DDK (with a few exceptions). | ||
345 | */ | ||
346 | enum SXG_STATE { | ||
347 | SXG_STATE_INITIALIZING, /* Initializing */ | ||
348 | SXG_STATE_BOOTDIAG, /* Boot-Diagnostic mode */ | ||
349 | SXG_STATE_PAUSING, /* Pausing */ | ||
350 | SXG_STATE_PAUSED, /* Paused */ | ||
351 | SXG_STATE_RUNNING, /* Running */ | ||
352 | SXG_STATE_RESETTING, /* Reset in progress */ | ||
353 | SXG_STATE_SLEEP, /* Sleeping */ | ||
354 | SXG_STATE_DIAG, /* Diagnostic mode */ | ||
355 | SXG_STATE_HALTING, /* Halting */ | ||
356 | SXG_STATE_HALTED, /* Down or not-initialized */ | ||
357 | SXG_STATE_SHUTDOWN /* shutdown */ | ||
358 | }; | ||
359 | |||
360 | /* Link state */ | ||
361 | enum SXG_LINK_STATE { | ||
362 | SXG_LINK_DOWN, | ||
363 | SXG_LINK_UP | ||
364 | }; | ||
365 | |||
366 | /* Link initialization timeout in 100us units */ | ||
367 | #define SXG_LINK_TIMEOUT 100000 /* 10 Seconds - REDUCE! */ | ||
368 | |||
369 | |||
370 | /* Microcode file selection codes */ | ||
371 | enum SXG_UCODE_SEL { | ||
372 | SXG_UCODE_SYSTEM, /* System (operational) uucode */ | ||
373 | SXG_UCODE_SDIAGCPU, /* System CPU diagnostic ucode */ | ||
374 | SXG_UCODE_SDIAGSYS /* System diagnostic ucode */ | ||
375 | }; | ||
376 | |||
377 | |||
378 | #define SXG_DISABLE_ALL_INTERRUPTS(_padapt) sxg_disable_interrupt(_padapt) | ||
379 | #define SXG_ENABLE_ALL_INTERRUPTS(_padapt) sxg_enable_interrupt(_padapt) | ||
380 | |||
381 | /* This probably lives in a proto.h file. Move later */ | ||
382 | #define SXG_MULTICAST_PACKET(_pether) ((_pether)->ether_dhost[0] & 0x01) | ||
383 | #define SXG_BROADCAST_PACKET(_pether) \ | ||
384 | ((*(u32 *)(_pether)->ether_dhost == 0xFFFFFFFF) && \ | ||
385 | (*(u16 *)&(_pether)->ether_dhost[4] == 0xFFFF)) | ||
386 | |||
387 | /* For DbgPrints */ | ||
388 | #define SXG_ID DPFLTR_IHVNETWORK_ID | ||
389 | #define SXG_ERROR DPFLTR_ERROR_LEVEL | ||
390 | |||
391 | /* | ||
392 | * struct sxg_driver structure - | ||
393 | * | ||
394 | * contains information about the sxg driver. There is only | ||
395 | * one of these, and it is defined as a global. | ||
396 | */ | ||
397 | |||
398 | struct sxg_driver { | ||
399 | struct adapter_t *Adapters; /* Linked list of adapters */ | ||
400 | ushort AdapterID; /* Maintain unique adapter ID */ | ||
401 | }; | ||
402 | |||
403 | #ifdef STATUS_SUCCESS | ||
404 | #undef STATUS_SUCCESS | ||
405 | #endif | ||
406 | |||
407 | /* TODO: We need to try and use NETDEV_TX_* before posting this out */ | ||
408 | #define STATUS_SUCCESS 0 | ||
409 | #define STATUS_PENDING 0 | ||
410 | #define STATUS_FAILURE -1 | ||
411 | #define STATUS_ERROR -2 | ||
412 | #define STATUS_NOT_SUPPORTED -3 | ||
413 | #define STATUS_BUFFER_TOO_SHORT -4 | ||
414 | #define STATUS_RESOURCES -5 | ||
415 | |||
416 | #define SLIC_MAX_CARDS 32 | ||
417 | #define SLIC_MAX_PORTS 4 /* Max # of ports per card */ | ||
418 | #if SLIC_DUMP_ENABLED | ||
419 | |||
420 | /* | ||
421 | * Dump buffer size | ||
422 | * This cannot be bigger than the max DMA size the card supports, | ||
423 | * given the current code structure in the host and ucode. | ||
424 | * Mojave supports 16K, Oasis supports 16K-1, so | ||
425 | * just set this at 15K, shouldnt make that much of a diff. | ||
426 | */ | ||
427 | #define DUMP_BUF_SIZE 0x3C00 | ||
428 | #endif | ||
429 | |||
430 | #define MIN(a, b) ((u32)(a) < (u32)(b) ? (a) : (b)) | ||
431 | #define MAX(a, b) ((u32)(a) > (u32)(b) ? (a) : (b)) | ||
432 | |||
433 | struct mcast_address { | ||
434 | unsigned char address[6]; | ||
435 | struct mcast_address *next; | ||
436 | }; | ||
437 | |||
438 | #define CARD_DOWN 0x00000000 | ||
439 | #define CARD_UP 0x00000001 | ||
440 | #define CARD_FAIL 0x00000002 | ||
441 | #define CARD_DIAG 0x00000003 | ||
442 | #define CARD_SLEEP 0x00000004 | ||
443 | |||
444 | #define ADAPT_DOWN 0x00 | ||
445 | #define ADAPT_UP 0x01 | ||
446 | #define ADAPT_FAIL 0x02 | ||
447 | #define ADAPT_RESET 0x03 | ||
448 | #define ADAPT_SLEEP 0x04 | ||
449 | |||
450 | #define ADAPT_FLAGS_BOOTTIME 0x0001 | ||
451 | #define ADAPT_FLAGS_IS64BIT 0x0002 | ||
452 | #define ADAPT_FLAGS_PENDINGLINKDOWN 0x0004 | ||
453 | #define ADAPT_FLAGS_FIBERMEDIA 0x0008 | ||
454 | #define ADAPT_FLAGS_LOCKS_ALLOCED 0x0010 | ||
455 | #define ADAPT_FLAGS_INT_REGISTERED 0x0020 | ||
456 | #define ADAPT_FLAGS_LOAD_TIMER_SET 0x0040 | ||
457 | #define ADAPT_FLAGS_STATS_TIMER_SET 0x0080 | ||
458 | #define ADAPT_FLAGS_RESET_TIMER_SET 0x0100 | ||
459 | |||
460 | #define LINK_DOWN 0x00 | ||
461 | #define LINK_CONFIG 0x01 | ||
462 | #define LINK_UP 0x02 | ||
463 | |||
464 | #define LINK_10MB 0x00 | ||
465 | #define LINK_100MB 0x01 | ||
466 | #define LINK_AUTOSPEED 0x02 | ||
467 | #define LINK_1000MB 0x03 | ||
468 | #define LINK_10000MB 0x04 | ||
469 | |||
470 | #define LINK_HALFD 0x00 | ||
471 | #define LINK_FULLD 0x01 | ||
472 | #define LINK_AUTOD 0x02 | ||
473 | |||
474 | #define MAC_DIRECTED 0x00000001 | ||
475 | #define MAC_BCAST 0x00000002 | ||
476 | #define MAC_MCAST 0x00000004 | ||
477 | #define MAC_PROMISC 0x00000008 | ||
478 | #define MAC_LOOPBACK 0x00000010 | ||
479 | #define MAC_ALLMCAST 0x00000020 | ||
480 | |||
481 | #define SLIC_DUPLEX(x) ((x==LINK_FULLD) ? "FDX" : "HDX") | ||
482 | #define SLIC_SPEED(x) ((x==LINK_100MB) ? "100Mb" : \ | ||
483 | ((x==LINK_1000MB) ? "1000Mb" : " 10Mb")) | ||
484 | #define SLIC_LINKSTATE(x) ((x==LINK_DOWN) ? "Down" : "Up ") | ||
485 | #define SLIC_ADAPTER_STATE(x) ((x==ADAPT_UP) ? "UP" : "Down") | ||
486 | #define SLIC_CARD_STATE(x) ((x==CARD_UP) ? "UP" : "Down") | ||
487 | |||
488 | |||
489 | struct ether_header { | ||
490 | unsigned char ether_dhost[6]; | ||
491 | unsigned char ether_shost[6]; | ||
492 | ushort ether_type; | ||
493 | }; | ||
494 | |||
495 | |||
496 | #define NUM_CFG_SPACES 2 | ||
497 | #define NUM_CFG_REGS 64 | ||
498 | |||
499 | /* | ||
500 | * We split LSS sends across four microcode queues derived from | ||
501 | * destination TCP port (if TCP/IP). | ||
502 | */ | ||
503 | #define SXG_LARGE_SEND_QUEUE_MASK 0x3 | ||
504 | #define ISCSI_PORT 0xbc0c /* 3260 */ | ||
505 | |||
506 | struct physcard { | ||
507 | struct adapter_t *adapter[SLIC_MAX_PORTS]; | ||
508 | struct physcard *next; | ||
509 | unsigned int adapters_allocd; | ||
510 | }; | ||
511 | |||
512 | struct sxgbase_driver { | ||
513 | spinlock_t driver_lock; | ||
514 | unsigned long flags; /* irqsave for spinlock */ | ||
515 | u32 num_sxg_cards; | ||
516 | u32 num_sxg_ports; | ||
517 | u32 num_sxg_ports_active; | ||
518 | u32 dynamic_intagg; | ||
519 | struct physcard *phys_card; | ||
520 | }; | ||
521 | |||
522 | |||
523 | struct adapter_t { | ||
524 | void * ifp; | ||
525 | unsigned int port; | ||
526 | struct napi_struct napi; | ||
527 | struct physcard *physcard; | ||
528 | unsigned int physport; | ||
529 | unsigned int slotnumber; | ||
530 | unsigned int functionnumber; | ||
531 | ushort vendid; | ||
532 | ushort devid; | ||
533 | ushort subsysid; | ||
534 | u32 irq; | ||
535 | |||
536 | void __iomem * base_addr; | ||
537 | u32 memorylength; | ||
538 | u32 drambase; | ||
539 | u32 dramlength; | ||
540 | enum asic_type asictype; /* type of ASIC (chip) */ | ||
541 | unsigned int activated; | ||
542 | u32 intrregistered; | ||
543 | unsigned int isp_initialized; | ||
544 | unsigned char state; | ||
545 | unsigned char linkstate; | ||
546 | unsigned int flags; | ||
547 | unsigned char macaddr[6]; | ||
548 | unsigned char currmacaddr[6]; | ||
549 | u32 macopts; | ||
550 | ushort devflags_prev; | ||
551 | u64 mcastmask; | ||
552 | struct mcast_address *mcastaddrs; | ||
553 | struct timer_list pingtimer; | ||
554 | u32 pingtimerset; | ||
555 | struct timer_list statstimer; | ||
556 | u32 statstimerset; | ||
557 | struct timer_list vpci_timer; | ||
558 | u32 vpci_timerset; | ||
559 | struct timer_list loadtimer; | ||
560 | u32 loadtimerset; | ||
561 | |||
562 | u32 xmitq_full; | ||
563 | u32 all_reg_writes; | ||
564 | u32 icr_reg_writes; | ||
565 | u32 isr_reg_writes; | ||
566 | u32 error_interrupts; | ||
567 | u32 error_rmiss_interrupts; | ||
568 | u32 rx_errors; | ||
569 | u32 rcv_drops; | ||
570 | u32 rcv_interrupts; | ||
571 | u32 xmit_interrupts; | ||
572 | u32 linkevent_interrupts; | ||
573 | u32 upr_interrupts; | ||
574 | u32 num_isrs; | ||
575 | u32 false_interrupts; | ||
576 | u32 tx_packets; | ||
577 | u32 xmit_completes; | ||
578 | u32 tx_drops; | ||
579 | u32 rcv_broadcasts; | ||
580 | u32 rcv_multicasts; | ||
581 | u32 rcv_unicasts; | ||
582 | u32 max_isr_rcvs; | ||
583 | u32 max_isr_xmits; | ||
584 | u32 rcv_interrupt_yields; | ||
585 | u32 intagg_period; | ||
586 | struct net_device_stats stats; | ||
587 | u32 * MiniportHandle; /* Our miniport handle */ | ||
588 | enum SXG_STATE State; /* Adapter state */ | ||
589 | enum SXG_LINK_STATE LinkState; /* Link state */ | ||
590 | u64 LinkSpeed; /* Link Speed */ | ||
591 | u32 PowerState; /* NDIS power state */ | ||
592 | struct adapter_t *Next; /* Linked list */ | ||
593 | ushort AdapterID; /* 1..n */ | ||
594 | struct net_device * netdev; | ||
595 | struct net_device * next_netdevice; | ||
596 | struct pci_dev *pcidev; | ||
597 | |||
598 | struct sxg_multicast_address *MulticastAddrs; /* Multicast list */ | ||
599 | u64 MulticastMask; /* Multicast mask */ | ||
600 | u32 *InterruptHandle; /* Register Interrupt handle */ | ||
601 | u32 InterruptLevel; /* From Resource list */ | ||
602 | u32 InterruptVector; /* From Resource list */ | ||
603 | spinlock_t AdapterLock; /* Serialize access adapter routines */ | ||
604 | spinlock_t Bit64RegLock; /* For writing 64-bit addresses */ | ||
605 | struct sxg_hw_regs *HwRegs; /* Sahara HW Register Memory (BAR0/1) */ | ||
606 | struct sxg_ucode_regs *UcodeRegs; /* Microcode Register Memory (BAR2/3) */ | ||
607 | struct sxg_tcb_regs *TcbRegs; /* Same as Ucode regs - See sxghw.h */ | ||
608 | ushort FrameSize; /* Maximum frame size */ | ||
609 | u32 * DmaHandle; /* NDIS DMA handle */ | ||
610 | u32 * PacketPoolHandle; /* Used with NDIS 5.2 only. Don't ifdef out */ | ||
611 | u32 * BufferPoolHandle; /* Used with NDIS 5.2 only. Don't ifdef out */ | ||
612 | u32 MacFilter; /* NDIS MAC Filter */ | ||
613 | struct sxg_event_ring *EventRings; /* Host event rings. 1/CPU to 16 max */ | ||
614 | dma_addr_t PEventRings; /* Physical address */ | ||
615 | u32 NextEvent[SXG_MAX_RSS]; /* Current location in ring */ | ||
616 | dma_addr_t PTcbBuffers; /* TCB Buffers - physical address */ | ||
617 | dma_addr_t PTcbCompBuffers; /* TCB Composite Buffers - phys addr */ | ||
618 | struct sxg_xmt_ring *XmtRings; /* Transmit rings */ | ||
619 | dma_addr_t PXmtRings; /* Transmit rings - physical address */ | ||
620 | struct sxg_ring_info XmtRingZeroInfo; /* Transmit ring 0 info */ | ||
621 | |||
622 | spinlock_t XmtZeroLock; /* Transmit ring 0 lock */ | ||
623 | u32 * XmtRingZeroIndex; /* Shared XMT ring 0 index */ | ||
624 | dma_addr_t PXmtRingZeroIndex; /* Shared XMT ring 0 index - physical */ | ||
625 | struct list_entry FreeProtocolHeaders;/* Free protocol headers */ | ||
626 | u32 FreeProtoHdrCount; /* Count */ | ||
627 | void * ProtocolHeaders; /* Block of protocol header */ | ||
628 | dma_addr_t PProtocolHeaders; /* Block of protocol headers - phys */ | ||
629 | |||
630 | struct sxg_rcv_ring *RcvRings; /* Receive rings */ | ||
631 | dma_addr_t PRcvRings; /* Receive rings - physical address */ | ||
632 | struct sxg_ucode_stats *ucode_stats; /* Ucode Stats */ | ||
633 | /* Ucode Stats - physical address */ | ||
634 | dma_addr_t pucode_stats; | ||
635 | |||
636 | struct sxg_ring_info RcvRingZeroInfo; /* Receive ring 0 info */ | ||
637 | |||
638 | u32 * Isr; /* Interrupt status register */ | ||
639 | dma_addr_t PIsr; /* ISR - physical address */ | ||
640 | u32 IsrCopy[SXG_MAX_RSS]; /* Copy of ISR */ | ||
641 | ushort InterruptsEnabled; /* Bitmask of enabled vectors */ | ||
642 | unsigned char *IndirectionTable; /* RSS indirection table */ | ||
643 | dma_addr_t PIndirectionTable; /* Physical address */ | ||
644 | ushort RssTableSize; /* From NDIS_RECEIVE_SCALE_PARAMETERS */ | ||
645 | ushort HashKeySize; /* From NDIS_RECEIVE_SCALE_PARAMETERS */ | ||
646 | unsigned char HashSecretKey[40]; /* rss key */ | ||
647 | u32 HashInformation; | ||
648 | /* Receive buffer queues */ | ||
649 | spinlock_t RcvQLock; /* Receive Queue Lock */ | ||
650 | struct list_entry FreeRcvBuffers; /* Free SXG_DATA_BUFFER queue */ | ||
651 | struct list_entry FreeRcvBlocks; /* Free SXG_RCV_DESCRIPTOR_BLOCK Q */ | ||
652 | struct list_entry AllRcvBlocks; /* All SXG_RCV_BLOCKs */ | ||
653 | ushort FreeRcvBufferCount; /* Number of free rcv data buffers */ | ||
654 | ushort FreeRcvBlockCount; /* # of free rcv descriptor blocks */ | ||
655 | ushort AllRcvBlockCount; /* Number of total receive blocks */ | ||
656 | ushort ReceiveBufferSize; /* SXG_RCV_DATA/JUMBO_BUFFER_SIZE only */ | ||
657 | /* Converted this to a atomic variable | ||
658 | u32 AllocationsPending; */ | ||
659 | atomic_t pending_allocations; | ||
660 | u32 AllocationsPending; /* Receive allocation pending */ | ||
661 | u32 RcvBuffersOnCard; /* SXG_DATA_BUFFERS owned by card */ | ||
662 | /* SGL buffers */ | ||
663 | spinlock_t SglQLock; /* SGL Queue Lock */ | ||
664 | struct list_entry FreeSglBuffers; /* Free struct sxg_scatter_gather */ | ||
665 | struct list_entry AllSglBuffers; /* All struct sxg_scatter_gather */ | ||
666 | ushort FreeSglBufferCount; /* Number of free SGL buffers */ | ||
667 | ushort AllSglBufferCount; /* Number of total SGL buffers */ | ||
668 | u32 CurrentTime; /* Tick count */ | ||
669 | u32 FastpathConnections;/* # of fastpath connections */ | ||
670 | /* Various single-bit flags: */ | ||
671 | u32 BasicAllocations:1; /* Locks and listheads */ | ||
672 | u32 IntRegistered:1; /* Interrupt registered */ | ||
673 | u32 PingOutstanding:1; /* Ping outstanding to card */ | ||
674 | u32 Dead:1; /* Card dead */ | ||
675 | u32 DumpDriver:1; /* OID_SLIC_DRIVER_DUMP request */ | ||
676 | u32 DumpCard:1; /* OID_SLIC_CARD_DUMP request */ | ||
677 | u32 DumpCmdRunning:1; /* Dump command in progress */ | ||
678 | u32 DebugRunning:1; /* AGDB debug in progress */ | ||
679 | u32 JumboEnabled:1; /* Jumbo frames enabled */ | ||
680 | u32 msi_enabled:1; /* MSI interrupt enabled */ | ||
681 | u32 RssEnabled:1; /* RSS Enabled */ | ||
682 | u32 FailOnBadEeprom:1; /* Fail on Bad Eeprom */ | ||
683 | u32 DiagStart:1; /* Init adapter for diagnostic start */ | ||
684 | u32 XmtFcEnabled:1; | ||
685 | u32 RcvFcEnabled:1; | ||
686 | /* Stats */ | ||
687 | u32 PendingRcvCount; /* Outstanding rcv indications */ | ||
688 | u32 PendingXmtCount; /* Outstanding send requests */ | ||
689 | struct sxg_stats Stats; /* Statistics */ | ||
690 | u32 ReassBufs; /* Number of reassembly buffers */ | ||
691 | /* Card Crash Info */ | ||
692 | ushort CrashLocation; /* Microcode crash location */ | ||
693 | unsigned char CrashCpu; /* Sahara CPU ID */ | ||
694 | /* Diagnostics */ | ||
695 | /* PDIAG_CMD DiagCmds; */ /* List of free diagnostic commands */ | ||
696 | /* PDIAG_BUFFER DiagBuffers; */ /* List of free diagnostic buffers */ | ||
697 | /* PDIAG_REQ DiagReqQ; */ /* List of outstanding (asynchronous) diag requests */ | ||
698 | /* u32 DiagCmdTimeout; */ /* Time out for diag cmds (seconds) XXXTODO - replace with SXG_PARAM var? */ | ||
699 | /* unsigned char DiagDmaDesc[DMA_CPU_CTXS]; */ /* Free DMA descriptors bit field (32 CPU ctx * 8 DMA ctx) */ | ||
700 | /* | ||
701 | * Put preprocessor-conditional fields at the end so we don't | ||
702 | * have to recompile sxgdbg everytime we reconfigure the driver | ||
703 | */ | ||
704 | #if defined(CONFIG_X86) | ||
705 | u32 AddrUpper; /* Upper 32 bits of 64-bit register */ | ||
706 | #endif | ||
707 | unsigned short max_aggregation; | ||
708 | unsigned short min_aggregation; | ||
709 | /*#if SXG_FAILURE_DUMP */ | ||
710 | /* NDIS_EVENT DumpThreadEvent; */ /* syncronize dump thread */ | ||
711 | /* BOOLEAN DumpThreadRunning; */ /* termination flag */ | ||
712 | /* PSXG_DUMP_CMD DumpBuffer; */ /* 68k - Cmd and Buffer */ | ||
713 | /* dma_addr_t PDumpBuffer; */ /* Physical address */ | ||
714 | /*#endif */ /* SXG_FAILURE_DUMP */ | ||
715 | /*MSI-X related data elements*/ | ||
716 | u32 nr_msix_entries; | ||
717 | struct msix_entry *msi_entries; | ||
718 | struct timer_list watchdog_timer; | ||
719 | struct work_struct update_link_status; | ||
720 | u32 link_status_changed; | ||
721 | }; | ||
722 | |||
723 | #if SLIC_DUMP_ENABLED | ||
724 | #define SLIC_DUMP_REQUESTED 1 | ||
725 | #define SLIC_DUMP_IN_PROGRESS 2 | ||
726 | #define SLIC_DUMP_DONE 3 | ||
727 | |||
728 | /* | ||
729 | * Microcode crash information structure. This | ||
730 | * structure is written out to the card's SRAM when the microcode panic's. | ||
731 | */ | ||
732 | struct slic_crash_info { | ||
733 | ushort cpu_id; | ||
734 | ushort crash_pc; | ||
735 | }; | ||
736 | |||
737 | #define CRASH_INFO_OFFSET 0x155C | ||
738 | |||
739 | #endif | ||
740 | |||
741 | #define UPDATE_STATS(largestat, newstat, oldstat) \ | ||
742 | { \ | ||
743 | if ((newstat) < (oldstat)) \ | ||
744 | (largestat) += ((newstat) + (0xFFFFFFFF - oldstat + 1)); \ | ||
745 | else \ | ||
746 | (largestat) += ((newstat) - (oldstat)); \ | ||
747 | } | ||
748 | |||
749 | #define UPDATE_STATS_GB(largestat, newstat, oldstat) \ | ||
750 | { \ | ||
751 | (largestat) += ((newstat) - (oldstat)); \ | ||
752 | } | ||
753 | |||
754 | #define ETHER_EQ_ADDR(_AddrA, _AddrB, _Result) \ | ||
755 | { \ | ||
756 | _Result = TRUE; \ | ||
757 | if (*(u32 *)(_AddrA) != *(u32 *)(_AddrB)) \ | ||
758 | _Result = FALSE; \ | ||
759 | if (*(u16 *)(&((_AddrA)[4])) != *(u16 *)(&((_AddrB)[4]))) \ | ||
760 | _Result = FALSE; \ | ||
761 | } | ||
762 | |||
763 | #define ETHERMAXFRAME 1514 | ||
764 | #define JUMBOMAXFRAME 9014 | ||
765 | |||
766 | #define SXG_JUMBO_MTU 9000 | ||
767 | #define SXG_DEFAULT_MTU 1500 | ||
768 | |||
769 | #if defined(CONFIG_X86_64) || defined(CONFIG_IA64) | ||
770 | #define SXG_GET_ADDR_LOW(_addr) (u32)((u64)(_addr) & 0x00000000FFFFFFFF) | ||
771 | #define SXG_GET_ADDR_HIGH(_addr) \ | ||
772 | (u32)(((u64)(_addr) >> 32) & 0x00000000FFFFFFFF) | ||
773 | #else | ||
774 | #define SXG_GET_ADDR_LOW(_addr) (u32)_addr | ||
775 | #define SXG_GET_ADDR_HIGH(_addr) (u32)0 | ||
776 | #endif | ||
777 | |||
778 | #define FLUSH TRUE | ||
779 | #define DONT_FLUSH FALSE | ||
780 | |||
781 | #define SIOCSLICDUMPCARD (SIOCDEVPRIVATE+9) | ||
782 | #define SIOCSLICSETINTAGG (SIOCDEVPRIVATE+10) | ||
783 | #define SIOCSLICTRACEDUMP (SIOCDEVPRIVATE+11) | ||
784 | |||
785 | extern const struct ethtool_ops sxg_nic_ethtool_ops; | ||
786 | #define SXG_COMPLETE_SLOW_SEND_LIMIT 128 | ||
787 | #endif /* __SXG_DRIVER_H__ */ | ||
diff --git a/drivers/staging/sxg/sxg_ethtool.c b/drivers/staging/sxg/sxg_ethtool.c deleted file mode 100644 index f5a0706478da..000000000000 --- a/drivers/staging/sxg/sxg_ethtool.c +++ /dev/null | |||
@@ -1,328 +0,0 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright (C) 2000-2008 Alacritech, Inc. All rights reserved. | ||
4 | * | ||
5 | * Redistribution and use in source and binary forms, with or without | ||
6 | * modification, are permitted provided that the following conditions | ||
7 | * are met: | ||
8 | * | ||
9 | * 1. Redistributions of source code must retain the above copyright | ||
10 | * notice, this list of conditions and the following disclaimer. | ||
11 | * 2. Redistributions in binary form must reproduce the above | ||
12 | * copyright notice, this list of conditions and the following | ||
13 | * disclaimer in the documentation and/or other materials provided | ||
14 | * with the distribution. | ||
15 | * | ||
16 | * THIS SOFTWARE IS PROVIDED BY ALACRITECH, INC. ``AS IS'' AND ANY | ||
17 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
18 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | ||
19 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ALACRITECH, INC. OR | ||
20 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
21 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
22 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF | ||
23 | * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
24 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, | ||
25 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT | ||
26 | * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | ||
27 | * SUCH DAMAGE. | ||
28 | * | ||
29 | * The views and conclusions contained in the software and documentation | ||
30 | * are those of the authors and should not be interpreted as representing | ||
31 | * official policies, either expressed or implied, of Alacritech, Inc. | ||
32 | * | ||
33 | **************************************************************************/ | ||
34 | |||
35 | /* | ||
36 | * FILENAME: sxg_ethtool.c | ||
37 | * | ||
38 | * The ethtool support for SXG driver for Alacritech's 10Gbe products. | ||
39 | * | ||
40 | * NOTE: This is the standard, non-accelerated version of Alacritech's | ||
41 | * IS-NIC driver. | ||
42 | */ | ||
43 | #include <linux/kernel.h> | ||
44 | #include <linux/errno.h> | ||
45 | #include <linux/module.h> | ||
46 | #include <linux/netdevice.h> | ||
47 | #include <linux/etherdevice.h> | ||
48 | #include <linux/ethtool.h> | ||
49 | #include <linux/skbuff.h> | ||
50 | #include <linux/pci.h> | ||
51 | |||
52 | #include "sxg_os.h" | ||
53 | #include "sxghw.h" | ||
54 | #include "sxghif.h" | ||
55 | #include "sxg.h" | ||
56 | |||
57 | struct sxg_nic_stats { | ||
58 | char stat_string[ETH_GSTRING_LEN]; | ||
59 | int sizeof_stat; | ||
60 | int stat_offset; | ||
61 | }; | ||
62 | |||
63 | #define SXG_NIC_STATS(m) sizeof(((struct adapter_t *)0)->m), \ | ||
64 | offsetof(struct adapter_t, m) | ||
65 | |||
66 | #define USER_VIEWABLE_EEPROM_SIZE 28 | ||
67 | |||
68 | static struct sxg_nic_stats sxg_nic_gstrings_stats[] = { | ||
69 | {"xmit_ring_0_full", SXG_NIC_STATS(Stats.XmtZeroFull)}, | ||
70 | |||
71 | /* May be will need in future */ | ||
72 | /* {"dumb_xmit_broadcast_packets", SXG_NIC_STATS(Stats.DumbXmtBcastPkts)}, | ||
73 | {"dumb_xmit_broadcast_bytes", SXG_NIC_STATS(Stats.DumbXmtBcastBytes)}, | ||
74 | {"dumb_xmit_unicast_packets", SXG_NIC_STATS(Stats.DumbXmtUcastPkts)}, | ||
75 | {"dumb_xmit_unicast_bytes", SXG_NIC_STATS(Stats.DumbXmtUcastBytes)}, | ||
76 | */ | ||
77 | {"xmit_queue_length", SXG_NIC_STATS(Stats.XmtQLen)}, | ||
78 | {"memory_allocation_failure", SXG_NIC_STATS(Stats.NoMem)}, | ||
79 | {"Interrupts", SXG_NIC_STATS(Stats.NumInts)}, | ||
80 | {"false_interrupts", SXG_NIC_STATS(Stats.FalseInts)}, | ||
81 | {"processed_data_queue_full", SXG_NIC_STATS(Stats.PdqFull)}, | ||
82 | {"event_ring_full", SXG_NIC_STATS(Stats.EventRingFull)}, | ||
83 | {"transport_checksum_error", SXG_NIC_STATS(Stats.TransportCsum)}, | ||
84 | {"transport_underflow_error", SXG_NIC_STATS(Stats.TransportUflow)}, | ||
85 | {"transport_header_length_error", SXG_NIC_STATS(Stats.TransportHdrLen)}, | ||
86 | {"network_checksum_error", SXG_NIC_STATS(Stats.NetworkCsum)}, | ||
87 | {"network_underflow_error", SXG_NIC_STATS(Stats.NetworkUflow)}, | ||
88 | {"network_header_length_error", SXG_NIC_STATS(Stats.NetworkHdrLen)}, | ||
89 | {"receive_parity_error", SXG_NIC_STATS(Stats.Parity)}, | ||
90 | {"link_parity_error", SXG_NIC_STATS(Stats.LinkParity)}, | ||
91 | {"link/data early_error", SXG_NIC_STATS(Stats.LinkEarly)}, | ||
92 | {"buffer_overflow_error", SXG_NIC_STATS(Stats.LinkBufOflow)}, | ||
93 | {"link_code_error", SXG_NIC_STATS(Stats.LinkCode)}, | ||
94 | {"dribble nibble", SXG_NIC_STATS(Stats.LinkDribble)}, | ||
95 | {"CRC_error", SXG_NIC_STATS(Stats.LinkCrc)}, | ||
96 | {"link_overflow_error", SXG_NIC_STATS(Stats.LinkOflow)}, | ||
97 | {"link_underflow_error", SXG_NIC_STATS(Stats.LinkUflow)}, | ||
98 | |||
99 | /* May be need in future */ | ||
100 | /* {"dumb_rcv_broadcast_packets", SXG_NIC_STATS(Stats.DumbRcvBcastPkts)}, | ||
101 | {"dumb_rcv_broadcast_bytes", SXG_NIC_STATS(Stats.DumbRcvBcastBytes)}, | ||
102 | */ {"dumb_rcv_multicast_packets", SXG_NIC_STATS(Stats.DumbRcvMcastPkts)}, | ||
103 | {"dumb_rcv_multicast_bytes", SXG_NIC_STATS(Stats.DumbRcvMcastBytes)}, | ||
104 | /* {"dumb_rcv_unicast_packets", SXG_NIC_STATS(Stats.DumbRcvUcastPkts)}, | ||
105 | {"dumb_rcv_unicast_bytes", SXG_NIC_STATS(Stats.DumbRcvUcastBytes)}, | ||
106 | */ | ||
107 | {"no_sgl_buffer", SXG_NIC_STATS(Stats.NoSglBuf)}, | ||
108 | }; | ||
109 | |||
110 | #define SXG_NIC_STATS_LEN ARRAY_SIZE(sxg_nic_gstrings_stats) | ||
111 | |||
112 | static inline void sxg_reg32_write(void __iomem *reg, u32 value, bool flush) | ||
113 | { | ||
114 | writel(value, reg); | ||
115 | if (flush) | ||
116 | mb(); | ||
117 | } | ||
118 | |||
119 | static inline void sxg_reg64_write(struct adapter_t *adapter, void __iomem *reg, | ||
120 | u64 value, u32 cpu) | ||
121 | { | ||
122 | u32 value_high = (u32) (value >> 32); | ||
123 | u32 value_low = (u32) (value & 0x00000000FFFFFFFF); | ||
124 | unsigned long flags; | ||
125 | |||
126 | spin_lock_irqsave(&adapter->Bit64RegLock, flags); | ||
127 | writel(value_high, (void __iomem *)(&adapter->UcodeRegs[cpu].Upper)); | ||
128 | writel(value_low, reg); | ||
129 | spin_unlock_irqrestore(&adapter->Bit64RegLock, flags); | ||
130 | } | ||
131 | |||
132 | static void | ||
133 | sxg_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) | ||
134 | { | ||
135 | struct adapter_t *adapter = netdev_priv(dev); | ||
136 | strncpy(drvinfo->driver, sxg_driver_name, 32); | ||
137 | strncpy(drvinfo->version, SXG_DRV_VERSION, 32); | ||
138 | // strncpy(drvinfo->fw_version, SAHARA_UCODE_VERS_STRING, 32); | ||
139 | strncpy(drvinfo->bus_info, pci_name(adapter->pcidev), 32); | ||
140 | /* TODO : Read the major and minor number of firmware. Is this | ||
141 | * from the FLASH/EEPROM or download file ? | ||
142 | */ | ||
143 | /* LINSYS : Check if this is correct or if not find the right value | ||
144 | * Also check what is the right EEPROM length : EEPROM_SIZE_XFMR or EEPROM_SIZE_NO_XFMR | ||
145 | */ | ||
146 | } | ||
147 | |||
148 | static int sxg_nic_set_settings(struct net_device *netdev, | ||
149 | struct ethtool_cmd *ecmd) | ||
150 | { | ||
151 | /* No settings are applicable as we support only 10Gb/FIBRE_media */ | ||
152 | return -EOPNOTSUPP; | ||
153 | } | ||
154 | |||
155 | static void | ||
156 | sxg_nic_get_strings(struct net_device *netdev, u32 stringset, u8 * data) | ||
157 | { | ||
158 | int index; | ||
159 | |||
160 | switch(stringset) { | ||
161 | case ETH_SS_TEST: | ||
162 | break; | ||
163 | case ETH_SS_STATS: | ||
164 | for (index = 0; index < SXG_NIC_STATS_LEN; index++) { | ||
165 | memcpy(data + index * ETH_GSTRING_LEN, | ||
166 | sxg_nic_gstrings_stats[index].stat_string, | ||
167 | ETH_GSTRING_LEN); | ||
168 | } | ||
169 | break; | ||
170 | } | ||
171 | } | ||
172 | |||
173 | static void | ||
174 | sxg_nic_get_ethtool_stats(struct net_device *netdev, | ||
175 | struct ethtool_stats *stats, u64 * data) | ||
176 | { | ||
177 | struct adapter_t *adapter = netdev_priv(netdev); | ||
178 | int index; | ||
179 | for (index = 0; index < SXG_NIC_STATS_LEN; index++) { | ||
180 | char *p = (char *)adapter + | ||
181 | sxg_nic_gstrings_stats[index].stat_offset; | ||
182 | data[index] = (sxg_nic_gstrings_stats[index].sizeof_stat == | ||
183 | sizeof(u64)) ? *(u64 *) p : *(u32 *) p; | ||
184 | } | ||
185 | } | ||
186 | |||
187 | static int sxg_nic_get_sset_count(struct net_device *netdev, int sset) | ||
188 | { | ||
189 | switch (sset) { | ||
190 | case ETH_SS_STATS: | ||
191 | return SXG_NIC_STATS_LEN; | ||
192 | default: | ||
193 | return -EOPNOTSUPP; | ||
194 | } | ||
195 | } | ||
196 | |||
197 | static int sxg_nic_get_settings(struct net_device *netdev, | ||
198 | struct ethtool_cmd *ecmd) | ||
199 | { | ||
200 | struct adapter_t *adapter = netdev_priv(netdev); | ||
201 | |||
202 | ecmd->supported = SUPPORTED_10000baseT_Full; | ||
203 | ecmd->autoneg = AUTONEG_ENABLE; //VSS check This | ||
204 | ecmd->transceiver = XCVR_EXTERNAL; //VSS check This | ||
205 | |||
206 | /* For Fibre Channel */ | ||
207 | ecmd->supported |= SUPPORTED_FIBRE; | ||
208 | ecmd->advertising = (ADVERTISED_10000baseT_Full | | ||
209 | ADVERTISED_FIBRE); | ||
210 | ecmd->port = PORT_FIBRE; | ||
211 | |||
212 | |||
213 | /* Link Speed */ | ||
214 | if(adapter->LinkState & SXG_LINK_UP) { | ||
215 | ecmd->speed = SPEED_10000; //adapter->LinkSpeed; | ||
216 | ecmd->duplex = DUPLEX_FULL; | ||
217 | } | ||
218 | return 0; | ||
219 | } | ||
220 | |||
221 | static u32 sxg_nic_get_rx_csum(struct net_device *netdev) | ||
222 | { | ||
223 | struct adapter_t *adapter = netdev_priv(netdev); | ||
224 | return ((adapter->flags & SXG_RCV_IP_CSUM_ENABLED) && | ||
225 | (adapter->flags & SXG_RCV_TCP_CSUM_ENABLED)); | ||
226 | } | ||
227 | |||
228 | static int sxg_nic_set_rx_csum(struct net_device *netdev, u32 data) | ||
229 | { | ||
230 | struct adapter_t *adapter = netdev_priv(netdev); | ||
231 | if (data) | ||
232 | adapter->flags |= SXG_RCV_IP_CSUM_ENABLED; | ||
233 | else | ||
234 | adapter->flags &= ~SXG_RCV_IP_CSUM_ENABLED; | ||
235 | /* | ||
236 | * We dont need to write to the card to do checksums. | ||
237 | * It does it anyways. | ||
238 | */ | ||
239 | return 0; | ||
240 | } | ||
241 | |||
242 | static int sxg_nic_get_regs_len(struct net_device *dev) | ||
243 | { | ||
244 | return (SXG_HWREG_MEMSIZE + SXG_UCODEREG_MEMSIZE); | ||
245 | } | ||
246 | |||
247 | static void sxg_nic_get_regs(struct net_device *netdev, | ||
248 | struct ethtool_regs *regs, void *p) | ||
249 | { | ||
250 | struct adapter_t *adapter = netdev_priv(netdev); | ||
251 | struct sxg_hw_regs *HwRegs = adapter->HwRegs; | ||
252 | struct sxg_ucode_regs *UcodeRegs = adapter->UcodeRegs; | ||
253 | u32 *buff = p; | ||
254 | |||
255 | memset(p, 0, (sizeof(struct sxg_hw_regs)+sizeof(struct sxg_ucode_regs))); | ||
256 | memcpy(buff, HwRegs, sizeof(struct sxg_hw_regs)); | ||
257 | memcpy((buff+sizeof(struct sxg_hw_regs)), UcodeRegs, sizeof(struct sxg_ucode_regs)); | ||
258 | } | ||
259 | |||
260 | static int sxg_nic_get_eeprom_len(struct net_device *netdev) | ||
261 | { | ||
262 | return (USER_VIEWABLE_EEPROM_SIZE); | ||
263 | } | ||
264 | |||
265 | static int sxg_nic_get_eeprom(struct net_device *netdev, | ||
266 | struct ethtool_eeprom *eeprom, u8 *bytes) | ||
267 | { | ||
268 | struct adapter_t *adapter = netdev_priv(netdev); | ||
269 | struct sw_cfg_data *data; | ||
270 | unsigned long i, status; | ||
271 | dma_addr_t p_addr; | ||
272 | |||
273 | data = pci_alloc_consistent(adapter->pcidev, sizeof(struct sw_cfg_data), | ||
274 | &p_addr); | ||
275 | if(!data) { | ||
276 | /* | ||
277 | * We cant get even this much memory. Raise a hell | ||
278 | * Get out of here | ||
279 | */ | ||
280 | printk(KERN_ERR"%s : Could not allocate memory for reading \ | ||
281 | EEPROM\n", __func__); | ||
282 | return -ENOMEM; | ||
283 | } | ||
284 | |||
285 | WRITE_REG(adapter->UcodeRegs[0].ConfigStat, SXG_CFG_TIMEOUT, TRUE); | ||
286 | WRITE_REG64(adapter, adapter->UcodeRegs[0].Config, p_addr, 0); | ||
287 | for(i=0; i<1000; i++) { | ||
288 | READ_REG(adapter->UcodeRegs[0].ConfigStat, status); | ||
289 | if (status != SXG_CFG_TIMEOUT) | ||
290 | break; | ||
291 | mdelay(1); /* Do we really need this */ | ||
292 | } | ||
293 | |||
294 | memset(bytes, 0, eeprom->len); | ||
295 | memcpy(bytes, data->MacAddr[0].MacAddr, sizeof(struct sxg_config_mac)); | ||
296 | memcpy(bytes+6, data->AtkFru.PartNum, 6); | ||
297 | memcpy(bytes+12, data->AtkFru.Revision, 2); | ||
298 | memcpy(bytes+14, data->AtkFru.Serial, 14); | ||
299 | |||
300 | return 0; | ||
301 | } | ||
302 | |||
303 | const struct ethtool_ops sxg_nic_ethtool_ops = { | ||
304 | .get_settings = sxg_nic_get_settings, | ||
305 | .set_settings = sxg_nic_set_settings, | ||
306 | .get_drvinfo = sxg_nic_get_drvinfo, | ||
307 | .get_regs_len = sxg_nic_get_regs_len, | ||
308 | .get_regs = sxg_nic_get_regs, | ||
309 | .get_link = ethtool_op_get_link, | ||
310 | // .get_wol = sxg_nic_get_wol, | ||
311 | .get_eeprom_len = sxg_nic_get_eeprom_len, | ||
312 | .get_eeprom = sxg_nic_get_eeprom, | ||
313 | // .get_pauseparam = sxg_nic_get_pauseparam, | ||
314 | // .set_pauseparam = sxg_nic_set_pauseparam, | ||
315 | .set_tx_csum = ethtool_op_set_tx_csum, | ||
316 | .get_sg = ethtool_op_get_sg, | ||
317 | .set_sg = ethtool_op_set_sg, | ||
318 | // .get_tso = sxg_nic_get_tso, | ||
319 | // .set_tso = sxg_nic_set_tso, | ||
320 | // .self_test = sxg_nic_diag_test, | ||
321 | .get_strings = sxg_nic_get_strings, | ||
322 | .get_ethtool_stats = sxg_nic_get_ethtool_stats, | ||
323 | .get_sset_count = sxg_nic_get_sset_count, | ||
324 | .get_rx_csum = sxg_nic_get_rx_csum, | ||
325 | .set_rx_csum = sxg_nic_set_rx_csum, | ||
326 | // .get_coalesce = sxg_nic_get_intr_coalesce, | ||
327 | // .set_coalesce = sxg_nic_set_intr_coalesce, | ||
328 | }; | ||
diff --git a/drivers/staging/sxg/sxg_os.h b/drivers/staging/sxg/sxg_os.h deleted file mode 100644 index 68e1a04b61f3..000000000000 --- a/drivers/staging/sxg/sxg_os.h +++ /dev/null | |||
@@ -1,149 +0,0 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright (C) 2000-2008 Alacritech, Inc. All rights reserved. | ||
4 | * | ||
5 | * Redistribution and use in source and binary forms, with or without | ||
6 | * modification, are permitted provided that the following conditions | ||
7 | * are met: | ||
8 | * | ||
9 | * 1. Redistributions of source code must retain the above copyright | ||
10 | * notice, this list of conditions and the following disclaimer. | ||
11 | * 2. Redistributions in binary form must reproduce the above | ||
12 | * copyright notice, this list of conditions and the following | ||
13 | * disclaimer in the documentation and/or other materials provided | ||
14 | * with the distribution. | ||
15 | * | ||
16 | * THIS SOFTWARE IS PROVIDED BY ALACRITECH, INC. ``AS IS'' AND ANY | ||
17 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
18 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | ||
19 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ALACRITECH, INC. OR | ||
20 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
21 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
22 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF | ||
23 | * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
24 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, | ||
25 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT | ||
26 | * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | ||
27 | * SUCH DAMAGE. | ||
28 | * | ||
29 | * The views and conclusions contained in the software and documentation | ||
30 | * are those of the authors and should not be interpreted as representing | ||
31 | * official policies, either expressed or implied, of Alacritech, Inc. | ||
32 | * | ||
33 | **************************************************************************/ | ||
34 | |||
35 | /* | ||
36 | * FILENAME: sxg_os.h | ||
37 | * | ||
38 | * These are the Linux-specific definitions required for the SLICOSS | ||
39 | * driver, which should allow for greater portability to other OSes. | ||
40 | */ | ||
41 | #ifndef _SLIC_OS_SPECIFIC_H_ | ||
42 | #define _SLIC_OS_SPECIFIC_H_ | ||
43 | |||
44 | #define FALSE (0) | ||
45 | #define TRUE (1) | ||
46 | |||
47 | struct list_entry { | ||
48 | struct list_entry *nle_flink; | ||
49 | struct list_entry *nle_blink; | ||
50 | }; | ||
51 | |||
52 | #define InitializeListHead(l) \ | ||
53 | (l)->nle_flink = (l)->nle_blink = (l) | ||
54 | |||
55 | #define IsListEmpty(h) \ | ||
56 | ((h)->nle_flink == (h)) | ||
57 | |||
58 | #define RemoveEntryList(e) \ | ||
59 | do { \ | ||
60 | list_entry *b; \ | ||
61 | list_entry *f; \ | ||
62 | \ | ||
63 | f = (e)->nle_flink; \ | ||
64 | b = (e)->nle_blink; \ | ||
65 | b->nle_flink = f; \ | ||
66 | f->nle_blink = b; \ | ||
67 | } while (0) | ||
68 | |||
69 | /* These two have to be inlined since they return things. */ | ||
70 | |||
71 | static inline struct list_entry *RemoveHeadList(struct list_entry *l) | ||
72 | { | ||
73 | struct list_entry *f; | ||
74 | struct list_entry *e; | ||
75 | |||
76 | e = l->nle_flink; | ||
77 | f = e->nle_flink; | ||
78 | l->nle_flink = f; | ||
79 | f->nle_blink = l; | ||
80 | |||
81 | return (e); | ||
82 | } | ||
83 | |||
84 | static inline struct list_entry *RemoveTailList(struct list_entry *l) | ||
85 | { | ||
86 | struct list_entry *b; | ||
87 | struct list_entry *e; | ||
88 | |||
89 | e = l->nle_blink; | ||
90 | b = e->nle_blink; | ||
91 | l->nle_blink = b; | ||
92 | b->nle_flink = l; | ||
93 | |||
94 | return (e); | ||
95 | } | ||
96 | |||
97 | #define InsertTailList(l, e) \ | ||
98 | do { \ | ||
99 | struct list_entry *b; \ | ||
100 | \ | ||
101 | b = (l)->nle_blink; \ | ||
102 | (e)->nle_flink = (l); \ | ||
103 | (e)->nle_blink = b; \ | ||
104 | b->nle_flink = (e); \ | ||
105 | (l)->nle_blink = (e); \ | ||
106 | } while (0) | ||
107 | |||
108 | #define InsertHeadList(l, e) \ | ||
109 | do { \ | ||
110 | struct list_entry *f; \ | ||
111 | \ | ||
112 | f = (l)->nle_flink; \ | ||
113 | (e)->nle_flink = f; \ | ||
114 | (e)->nle_blink = l; \ | ||
115 | f->nle_blink = (e); \ | ||
116 | (l)->nle_flink = (e); \ | ||
117 | } while (0) | ||
118 | |||
119 | #define ATK_DEBUG 1 | ||
120 | |||
121 | #if ATK_DEBUG | ||
122 | #define SLIC_TIMESTAMP(value) { \ | ||
123 | struct timeval timev; \ | ||
124 | do_gettimeofday(&timev); \ | ||
125 | value = timev.tv_sec*1000000 + timev.tv_usec; \ | ||
126 | } | ||
127 | #else | ||
128 | #define SLIC_TIMESTAMP(value) | ||
129 | #endif | ||
130 | |||
131 | /* SXG DEFINES */ | ||
132 | |||
133 | #ifdef ATKDBG | ||
134 | #define SXG_TIMESTAMP(value) { \ | ||
135 | struct timeval timev; \ | ||
136 | do_gettimeofday(&timev); \ | ||
137 | value = timev.tv_sec*1000000 + timev.tv_usec; \ | ||
138 | } | ||
139 | #else | ||
140 | #define SXG_TIMESTAMP(value) | ||
141 | #endif | ||
142 | |||
143 | #define WRITE_REG(reg,value,flush) \ | ||
144 | sxg_reg32_write((®), (value), (flush)) | ||
145 | #define WRITE_REG64(a,reg,value,cpu) \ | ||
146 | sxg_reg64_write((a),(®),(value),(cpu)) | ||
147 | #define READ_REG(reg,value) (value) = readl((void __iomem *)(®)) | ||
148 | |||
149 | #endif /* _SLIC_OS_SPECIFIC_H_ */ | ||
diff --git a/drivers/staging/sxg/sxgdbg.h b/drivers/staging/sxg/sxgdbg.h deleted file mode 100644 index e613a972b3d0..000000000000 --- a/drivers/staging/sxg/sxgdbg.h +++ /dev/null | |||
@@ -1,184 +0,0 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright © 2000-2008 Alacritech, Inc. All rights reserved. | ||
4 | * | ||
5 | * $Id: sxgdbg.h,v 1.1 2008/06/27 12:49:28 mook Exp $ | ||
6 | * | ||
7 | * Redistribution and use in source and binary forms, with or without | ||
8 | * modification, are permitted provided that the following conditions | ||
9 | * are met: | ||
10 | * | ||
11 | * 1. Redistributions of source code must retain the above copyright | ||
12 | * notice, this list of conditions and the following disclaimer. | ||
13 | * 2. Redistributions in binary form must reproduce the above | ||
14 | * copyright notice, this list of conditions and the following | ||
15 | * disclaimer in the documentation and/or other materials provided | ||
16 | * with the distribution. | ||
17 | * | ||
18 | * THIS SOFTWARE IS PROVIDED BY ALACRITECH, INC. ``AS IS'' AND ANY | ||
19 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
20 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | ||
21 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ALACRITECH, INC. OR | ||
22 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF | ||
25 | * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
26 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, | ||
27 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT | ||
28 | * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | ||
29 | * SUCH DAMAGE. | ||
30 | * | ||
31 | * The views and conclusions contained in the software and documentation | ||
32 | * are those of the authors and should not be interpreted as representing | ||
33 | * official policies, either expressed or implied, of Alacritech, Inc. | ||
34 | * | ||
35 | **************************************************************************/ | ||
36 | |||
37 | /* | ||
38 | * FILENAME: sxgdbg.h | ||
39 | * | ||
40 | * All debug and assertion-based definitions and macros are included | ||
41 | * in this file for the SXGOSS driver. | ||
42 | */ | ||
43 | #ifndef _SXG_DEBUG_H_ | ||
44 | #define _SXG_DEBUG_H_ | ||
45 | |||
46 | #define ATKDBG 1 | ||
47 | #define ATK_TRACE_ENABLED 0 | ||
48 | |||
49 | #define DBG_ERROR(n, args...) printk(KERN_WARNING n, ##args) | ||
50 | |||
51 | #ifdef ASSERT | ||
52 | #undef ASSERT | ||
53 | #endif | ||
54 | |||
55 | #define SXG_ASSERT_ENABLED | ||
56 | #ifdef SXG_ASSERT_ENABLED | ||
57 | #ifndef ASSERT | ||
58 | #define ASSERT(a) \ | ||
59 | { \ | ||
60 | if (!(a)) { \ | ||
61 | DBG_ERROR("ASSERT() Failure: file %s, function %s line %d\n", \ | ||
62 | __FILE__, __func__, __LINE__); \ | ||
63 | } \ | ||
64 | } | ||
65 | #endif | ||
66 | #else | ||
67 | #ifndef ASSERT | ||
68 | #define ASSERT(a) | ||
69 | #endif | ||
70 | #endif /* SXG_ASSERT_ENABLED */ | ||
71 | |||
72 | |||
73 | #ifdef ATKDBG | ||
74 | /* | ||
75 | * Global for timer granularity; every driver must have an instance | ||
76 | * of this initialized to 0 | ||
77 | */ | ||
78 | |||
79 | extern ulong ATKTimerDiv; | ||
80 | |||
81 | /* | ||
82 | * trace_entry - | ||
83 | * | ||
84 | * This structure defines an entry in the trace buffer. The | ||
85 | * first few fields mean the same from entry to entry, while | ||
86 | * the meaning of last several fields change to suit the | ||
87 | * needs of the trace entry. Typically they are function call | ||
88 | * parameters. | ||
89 | */ | ||
90 | struct trace_entry { | ||
91 | char name[8];/* 8 character name - like 's'i'm'b'a'r'c'v' */ | ||
92 | u32 time; /* Current clock tic */ | ||
93 | unsigned char cpu; /* Current CPU */ | ||
94 | unsigned char irql; /* Current IRQL */ | ||
95 | unsigned char driver;/* The driver which added the trace call */ | ||
96 | /* pad to 4 byte boundary - will probably get used */ | ||
97 | unsigned char pad2; | ||
98 | u32 arg1; /* Caller arg1 */ | ||
99 | u32 arg2; /* Caller arg2 */ | ||
100 | u32 arg3; /* Caller arg3 */ | ||
101 | u32 arg4; /* Caller arg4 */ | ||
102 | }; | ||
103 | |||
104 | /* Driver types for driver field in struct trace_entry */ | ||
105 | #define TRACE_SXG 1 | ||
106 | #define TRACE_VPCI 2 | ||
107 | #define TRACE_SLIC 3 | ||
108 | |||
109 | #define TRACE_ENTRIES 1024 | ||
110 | |||
111 | struct sxg_trace_buffer { | ||
112 | /* aid for windbg extension */ | ||
113 | unsigned int size; | ||
114 | unsigned int in; /* Where to add */ | ||
115 | unsigned int level; /* Current Trace level */ | ||
116 | spinlock_t lock; /* For MP tracing */ | ||
117 | struct trace_entry entries[TRACE_ENTRIES];/* The circular buffer */ | ||
118 | }; | ||
119 | |||
120 | /* | ||
121 | * The trace levels | ||
122 | * | ||
123 | * XXX At the moment I am only defining critical, important, and noisy. | ||
124 | * I am leaving room for more if anyone wants them. | ||
125 | */ | ||
126 | #define TRACE_NONE 0 /* For trace level - if no tracing wanted */ | ||
127 | #define TRACE_CRITICAL 1 /* minimal tracing - only critical stuff */ | ||
128 | #define TRACE_IMPORTANT 5 /* more tracing - anything important */ | ||
129 | #define TRACE_NOISY 10 /* Everything in the world */ | ||
130 | |||
131 | |||
132 | /* The macros themselves */ | ||
133 | #if ATK_TRACE_ENABLED | ||
134 | #define SXG_TRACE_INIT(buffer, tlevel) \ | ||
135 | { \ | ||
136 | memset((buffer), 0, sizeof(struct sxg_trace_buffer)); \ | ||
137 | (buffer)->level = (tlevel); \ | ||
138 | (buffer)->size = TRACE_ENTRIES; \ | ||
139 | spin_lock_init(&(buffer)->lock); \ | ||
140 | } | ||
141 | #else | ||
142 | #define SXG_TRACE_INIT(buffer, tlevel) | ||
143 | #endif | ||
144 | |||
145 | /*The trace macro. This is active only if ATK_TRACE_ENABLED is set. */ | ||
146 | #if ATK_TRACE_ENABLED | ||
147 | #define SXG_TRACE(tdriver, buffer, tlevel, tname, a1, a2, a3, a4) { \ | ||
148 | if ((buffer) && ((buffer)->level >= (tlevel))) { \ | ||
149 | unsigned int trace_irql = 0;/* ?????? FIX THIS */\ | ||
150 | unsigned int trace_len; \ | ||
151 | struct trace_entry *trace_entry; \ | ||
152 | struct timeval timev; \ | ||
153 | if(spin_trylock(&(buffer)->lock)) { \ | ||
154 | trace_entry = &(buffer)->entries[(buffer)->in]; \ | ||
155 | do_gettimeofday(&timev); \ | ||
156 | \ | ||
157 | memset(trace_entry->name, 0, 8); \ | ||
158 | trace_len = strlen(tname); \ | ||
159 | trace_len = trace_len > 8 ? 8 : trace_len; \ | ||
160 | memcpy(trace_entry->name, (tname), trace_len); \ | ||
161 | trace_entry->time = timev.tv_usec; \ | ||
162 | trace_entry->cpu = (unsigned char)(smp_processor_id() & 0xFF);\ | ||
163 | trace_entry->driver = (tdriver); \ | ||
164 | trace_entry->irql = trace_irql; \ | ||
165 | trace_entry->arg1 = (ulong)(a1); \ | ||
166 | trace_entry->arg2 = (ulong)(a2); \ | ||
167 | trace_entry->arg3 = (ulong)(a3); \ | ||
168 | trace_entry->arg4 = (ulong)(a4); \ | ||
169 | \ | ||
170 | (buffer)->in++; \ | ||
171 | if ((buffer)->in == TRACE_ENTRIES) \ | ||
172 | (buffer)->in = 0; \ | ||
173 | \ | ||
174 | spin_unlock(&(buffer)->lock); \ | ||
175 | } \ | ||
176 | } \ | ||
177 | } | ||
178 | #else | ||
179 | #define SXG_TRACE(tdriver, buffer, tlevel, tname, a1, a2, a3, a4) | ||
180 | #endif | ||
181 | |||
182 | #endif | ||
183 | |||
184 | #endif /* _SXG_DEBUG_H_ */ | ||
diff --git a/drivers/staging/sxg/sxghif.h b/drivers/staging/sxg/sxghif.h deleted file mode 100644 index e190d6add29c..000000000000 --- a/drivers/staging/sxg/sxghif.h +++ /dev/null | |||
@@ -1,1014 +0,0 @@ | |||
1 | /******************************************************************* | ||
2 | * Copyright © 1997-2007 Alacritech, Inc. All rights reserved | ||
3 | * | ||
4 | * $Id: sxghif.h,v 1.5 2008/07/24 19:18:22 chris Exp $ | ||
5 | * | ||
6 | * sxghif.h: | ||
7 | * | ||
8 | * This file contains structures and definitions for the | ||
9 | * Alacritech Sahara host interface | ||
10 | ******************************************************************/ | ||
11 | |||
12 | #define DBG 1 | ||
13 | |||
14 | /* UCODE Registers */ | ||
15 | struct sxg_ucode_regs { | ||
16 | /* Address 0 - 0x3F = Command codes 0-15 for TCB 0. Excode 0 */ | ||
17 | u32 Icr; /* Code = 0 (extended), ExCode = 0 - Int control */ | ||
18 | u32 RsvdReg1; /* Code = 1 - TOE -NA */ | ||
19 | u32 RsvdReg2; /* Code = 2 - TOE -NA */ | ||
20 | u32 RsvdReg3; /* Code = 3 - TOE -NA */ | ||
21 | u32 RsvdReg4; /* Code = 4 - TOE -NA */ | ||
22 | u32 RsvdReg5; /* Code = 5 - TOE -NA */ | ||
23 | u32 CardUp; /* Code = 6 - Microcode initialized when 1 */ | ||
24 | u32 RsvdReg7; /* Code = 7 - TOE -NA */ | ||
25 | u32 ConfigStat; /* Code = 8 - Configuration data load status */ | ||
26 | u32 RsvdReg9; /* Code = 9 - TOE -NA */ | ||
27 | u32 CodeNotUsed[6]; /* Codes 10-15 not used. ExCode = 0 */ | ||
28 | /* This brings us to ExCode 1 at address 0x40 = Interrupt status pointer */ | ||
29 | u32 Isp; /* Code = 0 (extended), ExCode = 1 */ | ||
30 | u32 PadEx1[15]; /* Codes 1-15 not used with extended codes */ | ||
31 | /* ExCode 2 = Interrupt Status Register */ | ||
32 | u32 Isr; /* Code = 0 (extended), ExCode = 2 */ | ||
33 | u32 PadEx2[15]; | ||
34 | /* ExCode 3 = Event base register. Location of event rings */ | ||
35 | u32 EventBase; /* Code = 0 (extended), ExCode = 3 */ | ||
36 | u32 PadEx3[15]; | ||
37 | /* ExCode 4 = Event ring size */ | ||
38 | u32 EventSize; /* Code = 0 (extended), ExCode = 4 */ | ||
39 | u32 PadEx4[15]; | ||
40 | /* ExCode 5 = TCB Buffers base address */ | ||
41 | u32 TcbBase; /* Code = 0 (extended), ExCode = 5 */ | ||
42 | u32 PadEx5[15]; | ||
43 | /* ExCode 6 = TCB Composite Buffers base address */ | ||
44 | u32 TcbCompBase; /* Code = 0 (extended), ExCode = 6 */ | ||
45 | u32 PadEx6[15]; | ||
46 | /* ExCode 7 = Transmit ring base address */ | ||
47 | u32 XmtBase; /* Code = 0 (extended), ExCode = 7 */ | ||
48 | u32 PadEx7[15]; | ||
49 | /* ExCode 8 = Transmit ring size */ | ||
50 | u32 XmtSize; /* Code = 0 (extended), ExCode = 8 */ | ||
51 | u32 PadEx8[15]; | ||
52 | /* ExCode 9 = Receive ring base address */ | ||
53 | u32 RcvBase; /* Code = 0 (extended), ExCode = 9 */ | ||
54 | u32 PadEx9[15]; | ||
55 | /* ExCode 10 = Receive ring size */ | ||
56 | u32 RcvSize; /* Code = 0 (extended), ExCode = 10 */ | ||
57 | u32 PadEx10[15]; | ||
58 | /* ExCode 11 = Read EEPROM/Flash Config */ | ||
59 | u32 Config; /* Code = 0 (extended), ExCode = 11 */ | ||
60 | u32 PadEx11[15]; | ||
61 | /* ExCode 12 = Multicast bits 31:0 */ | ||
62 | u32 McastLow; /* Code = 0 (extended), ExCode = 12 */ | ||
63 | u32 PadEx12[15]; | ||
64 | /* ExCode 13 = Multicast bits 63:32 */ | ||
65 | u32 McastHigh; /* Code = 0 (extended), ExCode = 13 */ | ||
66 | u32 PadEx13[15]; | ||
67 | /* ExCode 14 = Ping */ | ||
68 | u32 Ping; /* Code = 0 (extended), ExCode = 14 */ | ||
69 | u32 PadEx14[15]; | ||
70 | /* ExCode 15 = Link MTU */ | ||
71 | u32 LinkMtu; /* Code = 0 (extended), ExCode = 15 */ | ||
72 | u32 PadEx15[15]; | ||
73 | /* ExCode 16 = Download synchronization */ | ||
74 | u32 LoadSync; /* Code = 0 (extended), ExCode = 16 */ | ||
75 | u32 PadEx16[15]; | ||
76 | /* ExCode 17 = Upper DRAM address bits on 32-bit systems */ | ||
77 | u32 Upper; /* Code = 0 (extended), ExCode = 17 */ | ||
78 | u32 PadEx17[15]; | ||
79 | /* ExCode 18 = Slowpath Send Index Address */ | ||
80 | u32 SPSendIndex; /* Code = 0 (extended), ExCode = 18 */ | ||
81 | u32 PadEx18[15]; | ||
82 | /* ExCode 19 = Get ucode statistics */ | ||
83 | u32 GetUcodeStats; /* Code = 0 (extended), ExCode = 19 */ | ||
84 | u32 PadEx19[15]; | ||
85 | /* ExCode 20 = Aggregation - See sxgmisc.c:SxgSetInterruptAggregation */ | ||
86 | u32 Aggregation; /* Code = 0 (extended), ExCode = 20 */ | ||
87 | u32 PadEx20[15]; | ||
88 | /* ExCode 21 = Receive MDL push timer */ | ||
89 | u32 PushTicks; /* Code = 0 (extended), ExCode = 21 */ | ||
90 | u32 PadEx21[15]; | ||
91 | /* ExCode 22 = ACK Frequency */ | ||
92 | u32 AckFrequency; /* Code = 0 (extended), ExCode = 22 */ | ||
93 | u32 PadEx22[15]; | ||
94 | /* ExCode 23 = TOE NA */ | ||
95 | u32 RsvdReg23; | ||
96 | u32 PadEx23[15]; | ||
97 | /* ExCode 24 = TOE NA */ | ||
98 | u32 RsvdReg24; | ||
99 | u32 PadEx24[15]; | ||
100 | /* ExCode 25 = TOE NA */ | ||
101 | u32 RsvdReg25; /* Code = 0 (extended), ExCode = 25 */ | ||
102 | u32 PadEx25[15]; | ||
103 | /* ExCode 26 = Receive checksum requirements */ | ||
104 | u32 ReceiveChecksum; /* Code = 0 (extended), ExCode = 26 */ | ||
105 | u32 PadEx26[15]; | ||
106 | /* ExCode 27 = RSS Requirements */ | ||
107 | u32 Rss; /* Code = 0 (extended), ExCode = 27 */ | ||
108 | u32 PadEx27[15]; | ||
109 | /* ExCode 28 = RSS Table */ | ||
110 | u32 RssTable; /* Code = 0 (extended), ExCode = 28 */ | ||
111 | u32 PadEx28[15]; | ||
112 | /* ExCode 29 = Event ring release entries */ | ||
113 | u32 EventRelease; /* Code = 0 (extended), ExCode = 29 */ | ||
114 | u32 PadEx29[15]; | ||
115 | /* ExCode 30 = Number of receive bufferlist commands on ring 0 */ | ||
116 | u32 RcvCmd; /* Code = 0 (extended), ExCode = 30 */ | ||
117 | u32 PadEx30[15]; | ||
118 | /* ExCode 31 = slowpath transmit command - Data[31:0] = 1 */ | ||
119 | u32 XmtCmd; /* Code = 0 (extended), ExCode = 31 */ | ||
120 | u32 PadEx31[15]; | ||
121 | /* ExCode 32 = Dump command */ | ||
122 | u32 DumpCmd; /* Code = 0 (extended), ExCode = 32 */ | ||
123 | u32 PadEx32[15]; | ||
124 | /* ExCode 33 = Debug command */ | ||
125 | u32 DebugCmd; /* Code = 0 (extended), ExCode = 33 */ | ||
126 | u32 PadEx33[15]; | ||
127 | /* | ||
128 | * There are 128 possible extended commands - each of account for 16 | ||
129 | * words (including the non-relevent base command codes 1-15). | ||
130 | * Pad for the remainder of these here to bring us to the next CPU | ||
131 | * base. As extended codes are added, reduce the first array value in | ||
132 | * the following field | ||
133 | */ | ||
134 | u32 PadToNextCpu[94][16]; /* 94 = 128 - 34 (34 = Excodes 0 - 33)*/ | ||
135 | }; | ||
136 | |||
137 | /* Interrupt control register (0) values */ | ||
138 | #define SXG_ICR_DISABLE 0x00000000 | ||
139 | #define SXG_ICR_ENABLE 0x00000001 | ||
140 | #define SXG_ICR_MASK 0x00000002 | ||
141 | #define SXG_ICR_MSGID_MASK 0xFFFF0000 | ||
142 | #define SXG_ICR_MSGID_SHIFT 16 | ||
143 | #define SXG_ICR(_MessageId, _Data) \ | ||
144 | ((((_MessageId) << SXG_ICR_MSGID_SHIFT) & \ | ||
145 | SXG_ICR_MSGID_MASK) | (_Data)) | ||
146 | |||
147 | #define SXG_MIN_AGG_DEFAULT 0x0010 /* Minimum aggregation default */ | ||
148 | #define SXG_MAX_AGG_DEFAULT 0x0040 /* Maximum aggregation default */ | ||
149 | #define SXG_MAX_AGG_SHIFT 16 /* Maximum in top 16 bits of register */ | ||
150 | /* Disable interrupt aggregation on xmt */ | ||
151 | #define SXG_AGG_XMT_DISABLE 0x80000000 | ||
152 | |||
153 | /* The Microcode supports up to 16 RSS queues (RevB) */ | ||
154 | #define SXG_MAX_RSS 16 | ||
155 | #define SXG_MAX_RSS_REVA 8 | ||
156 | |||
157 | #define SXG_MAX_RSS_TABLE_SIZE 256 /* 256-byte max */ | ||
158 | |||
159 | #define SXG_RSS_REVA_TCP6 0x00000001 /* RSS TCP over IPv6 */ | ||
160 | #define SXG_RSS_REVA_TCP4 0x00000002 /* RSS TCP over IPv4 */ | ||
161 | #define SXG_RSS_IP 0x00000001 /* RSS TCP over IPv6 */ | ||
162 | #define SXG_RSS_TCP 0x00000002 /* RSS TCP over IPv4 */ | ||
163 | #define SXG_RSS_LEGACY 0x00000004 /* Line-base interrupts */ | ||
164 | #define SXG_RSS_TABLE_SIZE 0x0000FF00 /* Table size mask */ | ||
165 | |||
166 | #define SXG_RSS_TABLE_SHIFT 8 | ||
167 | #define SXG_RSS_BASE_CPU 0x00FF0000 /* Base CPU (not used) */ | ||
168 | #define SXG_RSS_BASE_SHIFT 16 | ||
169 | |||
170 | #define SXG_RCV_IP_CSUM_ENABLED 0x00000001 /* ExCode 26 (ReceiveChecksum) */ | ||
171 | #define SXG_RCV_TCP_CSUM_ENABLED 0x00000002 /* ExCode 26 (ReceiveChecksum) */ | ||
172 | |||
173 | #define SXG_XMT_CPUID_SHIFT 16 | ||
174 | |||
175 | /* | ||
176 | * Status returned by ucode in the ConfigStat reg (see above) when attempted | ||
177 | * to load configuration data from the EEPROM/Flash. | ||
178 | */ | ||
179 | #define SXG_CFG_TIMEOUT 1 /* init value - timeout if unchanged */ | ||
180 | #define SXG_CFG_LOAD_EEPROM 2 /* config data loaded from EEPROM */ | ||
181 | #define SXG_CFG_LOAD_FLASH 3 /* config data loaded from flash */ | ||
182 | #define SXG_CFG_LOAD_INVALID 4 /* no valid config data found */ | ||
183 | #define SXG_CFG_LOAD_ERROR 5 /* hardware error */ | ||
184 | |||
185 | #define SXG_CHECK_FOR_HANG_TIME 5 | ||
186 | |||
187 | /* | ||
188 | * TCB registers - This is really the same register memory area as UCODE_REGS | ||
189 | * above, but defined differently. Bits 17:06 of the address define the TCB, | ||
190 | * which means each TCB area occupies 0x40 (64) bytes, or 16 u32S. What really | ||
191 | * is happening is that these registers occupy the "PadEx[15]" areas in the | ||
192 | * struct sxg_ucode_regs definition above | ||
193 | */ | ||
194 | struct sxg_tcb_regs { | ||
195 | u32 ExCode; /* Extended codes - see SXG_UCODE_REGS */ | ||
196 | u32 Xmt; /* Code = 1 - # of Xmt descriptors added to ring */ | ||
197 | u32 Rcv; /* Code = 2 - # of Rcv descriptors added to ring */ | ||
198 | u32 Rsvd1; /* Code = 3 - TOE NA */ | ||
199 | u32 Rsvd2; /* Code = 4 - TOE NA */ | ||
200 | u32 Rsvd3; /* Code = 5 - TOE NA */ | ||
201 | u32 Invalid1; /* Code = 6 - Reserved for "CardUp" see above */ | ||
202 | u32 Rsvd4; /* Code = 7 - TOE NA */ | ||
203 | u32 Invalid2; /* Code = 8 - Reserved for "ConfigStat" see above */ | ||
204 | u32 Rsvd5; /* Code = 9 - TOE NA */ | ||
205 | u32 Pad[6]; /* Codes 10-15 - Not used. */ | ||
206 | }; | ||
207 | |||
208 | /*************************************************************************** | ||
209 | * ISR Format | ||
210 | * 31 0 | ||
211 | * _______________________________________ | ||
212 | * | | | | | | | | | | ||
213 | * |____|____|____|____|____|____|____|____| | ||
214 | * ^^^^ ^^^^ ^^^^ ^^^^ \ / | ||
215 | * ERR --|||| |||| |||| |||| ----------------- | ||
216 | * EVENT ---||| |||| |||| |||| | | ||
217 | * ----|| |||| |||| |||| |-- Crash Address | ||
218 | * UPC -----| |||| |||| |||| | ||
219 | * LEVENT -------|||| |||| |||| | ||
220 | * PDQF --------||| |||| |||| | ||
221 | * RMISS ---------|| |||| |||| | ||
222 | * BREAK ----------| |||| |||| | ||
223 | * HBEATOK ------------|||| |||| | ||
224 | * NOHBEAT -------------||| |||| | ||
225 | * ERFULL --------------|| |||| | ||
226 | * XDROP ---------------| |||| | ||
227 | * -----------------|||| | ||
228 | * -----------------||||--\ | ||
229 | * ||---|-CpuId of crash | ||
230 | * |----/ | ||
231 | ***************************************************************************/ | ||
232 | #define SXG_ISR_ERR 0x80000000 /* Error */ | ||
233 | #define SXG_ISR_EVENT 0x40000000 /* Event ring event */ | ||
234 | #define SXG_ISR_NONE1 0x20000000 /* Not used */ | ||
235 | #define SXG_ISR_UPC 0x10000000 /* Dump/debug command complete*/ | ||
236 | #define SXG_ISR_LINK 0x08000000 /* Link event */ | ||
237 | #define SXG_ISR_PDQF 0x04000000 /* Processed data queue full */ | ||
238 | #define SXG_ISR_RMISS 0x02000000 /* Drop - no host buf */ | ||
239 | #define SXG_ISR_BREAK 0x01000000 /* Breakpoint hit */ | ||
240 | #define SXG_ISR_PING 0x00800000 /* Heartbeat response */ | ||
241 | #define SXG_ISR_DEAD 0x00400000 /* Card crash */ | ||
242 | #define SXG_ISR_ERFULL 0x00200000 /* Event ring full */ | ||
243 | #define SXG_ISR_XDROP 0x00100000 /* XMT Drop - no DRAM bufs or XMT err */ | ||
244 | #define SXG_ISR_SPSEND 0x00080000 /* Slow send complete */ | ||
245 | #define SXG_ISR_CPU 0x00070000 /* Dead CPU mask */ | ||
246 | #define SXG_ISR_CPU_SHIFT 16 /* Dead CPU shift */ | ||
247 | #define SXG_ISR_CRASH 0x0000FFFF /* Crash address mask */ | ||
248 | |||
249 | /*************************************************************************** | ||
250 | * Event Ring entry | ||
251 | * | ||
252 | * 31 15 0 | ||
253 | * .___________________.___________________. | ||
254 | * |<------------ Pad 0 ------------>| | ||
255 | * |_________|_________|_________|_________|0 0x00 | ||
256 | * |<------------ Pad 1 ------------>| | ||
257 | * |_________|_________|_________|_________|4 0x04 | ||
258 | * |<------------ Pad 2 ------------>| | ||
259 | * |_________|_________|_________|_________|8 0x08 | ||
260 | * |<----------- Event Word 0 ------------>| | ||
261 | * |_________|_________|_________|_________|12 0x0c | ||
262 | * |<----------- Event Word 1 ------------>| | ||
263 | * |_________|_________|_________|_________|16 0x10 | ||
264 | * |<------------- Toeplitz ------------>| | ||
265 | * |_________|_________|_________|_________|20 0x14 | ||
266 | * |<----- Length ---->|<------ TCB Id --->| | ||
267 | * |_________|_________|_________|_________|24 0x18 | ||
268 | * |<----- Status ---->|Evnt Code|Flsh Code| | ||
269 | * |_________|_________|_________|_________|28 0x1c | ||
270 | * ^ ^^^^ ^^^^ | ||
271 | * |- VALID |||| ||||- RBUFC | ||
272 | * |||| |||-- SLOWR | ||
273 | * |||| ||--- UNUSED | ||
274 | * |||| |---- FASTC | ||
275 | * ||||------ FASTR | ||
276 | * |||------- | ||
277 | * ||-------- | ||
278 | * |--------- | ||
279 | * | ||
280 | * Slowpath status: | ||
281 | * _______________________________________ | ||
282 | * |<----- Status ---->|Evnt Code|Flsh Code| | ||
283 | * |_________|Cmd Index|_________|_________|28 0x1c | ||
284 | * ^^^ ^^^^ | ||
285 | * ||| ||||- ISTCPIP6 | ||
286 | * ||| |||-- IPONLY | ||
287 | * ||| ||--- RCVERR | ||
288 | * ||| |---- IPCBAD | ||
289 | * |||------ TCPCBAD | ||
290 | * ||------- ISTCPIP | ||
291 | * |-------- SCERR | ||
292 | * | ||
293 | ************************************************************************/ | ||
294 | #pragma pack(push, 1) | ||
295 | struct sxg_event { | ||
296 | u32 Pad[1]; /* not used */ | ||
297 | u32 SndUna; /* SndUna value */ | ||
298 | u32 Resid; /* receive MDL resid */ | ||
299 | union { | ||
300 | void * HostHandle; /* Receive host handle */ | ||
301 | u32 Rsvd1; /* TOE NA */ | ||
302 | struct { | ||
303 | u32 NotUsed; | ||
304 | u32 Rsvd2; /* TOE NA */ | ||
305 | } Flush; | ||
306 | }; | ||
307 | u32 Toeplitz; /* RSS Toeplitz hash */ | ||
308 | union { | ||
309 | ushort Rsvd3; /* TOE NA */ | ||
310 | ushort HdrOffset; /* Slowpath */ | ||
311 | }; | ||
312 | ushort Length; | ||
313 | unsigned char Rsvd4; /* TOE NA */ | ||
314 | unsigned char Code; /* Event code */ | ||
315 | unsigned char CommandIndex; /* New ring index */ | ||
316 | unsigned char Status; /* Event status */ | ||
317 | }; | ||
318 | #pragma pack(pop) | ||
319 | |||
320 | /* Event code definitions */ | ||
321 | #define EVENT_CODE_BUFFERS 0x01 /* Receive buffer list command (ring 0) */ | ||
322 | #define EVENT_CODE_SLOWRCV 0x02 /* Slowpath receive */ | ||
323 | #define EVENT_CODE_UNUSED 0x04 /* Was slowpath commands complete */ | ||
324 | |||
325 | /* Status values */ | ||
326 | #define EVENT_STATUS_VALID 0x80 /* Entry valid */ | ||
327 | |||
328 | /* Slowpath status */ | ||
329 | #define EVENT_STATUS_ERROR 0x40 /* Completed with error. Index in next byte */ | ||
330 | #define EVENT_STATUS_TCPIP4 0x20 /* TCPIPv4 frame */ | ||
331 | #define EVENT_STATUS_TCPBAD 0x10 /* Bad TCP checksum */ | ||
332 | #define EVENT_STATUS_IPBAD 0x08 /* Bad IP checksum */ | ||
333 | #define EVENT_STATUS_RCVERR 0x04 /* Slowpath receive error */ | ||
334 | #define EVENT_STATUS_IPONLY 0x02 /* IP frame */ | ||
335 | #define EVENT_STATUS_TCPIP6 0x01 /* TCPIPv6 frame */ | ||
336 | #define EVENT_STATUS_TCPIP 0x21 /* Combination of v4 and v6 */ | ||
337 | |||
338 | /* | ||
339 | * Event ring | ||
340 | * Size must be power of 2, between 128 and 16k | ||
341 | */ | ||
342 | #define EVENT_RING_SIZE 4096 | ||
343 | #define EVENT_RING_BATCH 16 /* Hand entries back 16 at a time. */ | ||
344 | /* Stop processing events after 4096 (256 * 16) */ | ||
345 | #define EVENT_BATCH_LIMIT 256 | ||
346 | |||
347 | struct sxg_event_ring { | ||
348 | struct sxg_event Ring[EVENT_RING_SIZE]; | ||
349 | }; | ||
350 | |||
351 | /* TCB Buffers */ | ||
352 | /* Maximum number of TCBS supported by hardware/microcode */ | ||
353 | #define SXG_MAX_TCB 4096 | ||
354 | /* Minimum TCBs before we fail initialization */ | ||
355 | #define SXG_MIN_TCB 512 | ||
356 | /* | ||
357 | * TCB Hash | ||
358 | * The bucket is determined by bits 11:4 of the toeplitz if we support 4k | ||
359 | * offloaded connections, 10:4 if we support 2k and so on. | ||
360 | */ | ||
361 | #define SXG_TCB_BUCKET_SHIFT 4 | ||
362 | #define SXG_TCB_PER_BUCKET 16 | ||
363 | #define SXG_TCB_BUCKET_MASK 0xFF0 /* Bucket portion of TCB ID */ | ||
364 | #define SXG_TCB_ELEMENT_MASK 0x00F /* Element within bucket */ | ||
365 | #define SXG_TCB_BUCKETS 256 /* 256 * 16 = 4k */ | ||
366 | |||
367 | #define SXG_TCB_BUFFER_SIZE 512 /* ASSERT format is correct */ | ||
368 | |||
369 | #define SXG_TCB_RCVQ_SIZE 736 | ||
370 | |||
371 | #define SXG_TCB_COMPOSITE_BUFFER_SIZE 1024 | ||
372 | |||
373 | #define SXG_LOCATE_TCP_FRAME_HDR(_TcpObject, _IPv6) \ | ||
374 | (((_TcpObject)->VlanId) ? \ | ||
375 | ((_IPv6) ? /* Vlan frame header = yes */ \ | ||
376 | &(_TcpObject)->CompBuffer->Frame.HasVlan.TcpIp6.SxgTcp: \ | ||
377 | &(_TcpObject)->CompBuffer->Frame.HasVlan.TcpIp.SxgTcp): \ | ||
378 | ((_IPv6) ? /* Vlan frame header = No */ \ | ||
379 | &(_TcpObject)->CompBuffer->Frame.NoVlan.TcpIp6.SxgTcp : \ | ||
380 | &(_TcpObject)->CompBuffer->Frame.NoVlan.TcpIp.SxgTcp)) | ||
381 | |||
382 | #define SXG_LOCATE_IP_FRAME_HDR(_TcpObject) \ | ||
383 | (_TcpObject)->VlanId ? \ | ||
384 | &(_TcpObject)->CompBuffer->Frame.HasVlan.TcpIp.Ip: \ | ||
385 | &(_TcpObject)->CompBuffer->Frame.NoVlan.TcpIp.Ip | ||
386 | |||
387 | #define SXG_LOCATE_IP6_FRAME_HDR(TcpObject) \ | ||
388 | (_TcpObject)->VlanId ? \ | ||
389 | &(_TcpObject)->CompBuffer->Frame.HasVlan.TcpIp6.Ip: \ | ||
390 | &(_TcpObject)->CompBuffer->Frame.NoVlan.TcpIp6.Ip | ||
391 | |||
392 | #if DBG | ||
393 | /* | ||
394 | * Horrible kludge to distinguish dumb-nic, slowpath, and | ||
395 | * fastpath traffic. Decrement the HopLimit by one | ||
396 | * for slowpath, two for fastpath. This assumes the limit is measurably | ||
397 | * greater than two, which I think is reasonable. | ||
398 | * Obviously this is DBG only. Maybe remove later, or #if 0 so we | ||
399 | * can set it when needed | ||
400 | */ | ||
401 | #define SXG_DBG_HOP_LIMIT(_TcpObject, _FastPath) { \ | ||
402 | PIPV6_HDR _Ip6FrameHdr; \ | ||
403 | if ((_TcpObject)->IPv6) { \ | ||
404 | _Ip6FrameHdr = SXG_LOCATE_IP6_FRAME_HDR((_TcpObject)); \ | ||
405 | if (_FastPath) { \ | ||
406 | _Ip6FrameHdr->HopLimit = \ | ||
407 | (_TcpObject)->Cached.TtlOrHopLimit - 2; \ | ||
408 | } else { \ | ||
409 | _Ip6FrameHdr->HopLimit = \ | ||
410 | (_TcpObject)->Cached.TtlOrHopLimit - 1; \ | ||
411 | } \ | ||
412 | } \ | ||
413 | } | ||
414 | #else | ||
415 | /* Do nothing with free build */ | ||
416 | #define SXG_DBG_HOP_LIMIT(_TcpObject, _FastPath) | ||
417 | #endif | ||
418 | |||
419 | /* Receive and transmit rings */ | ||
420 | #define SXG_MAX_RING_SIZE 256 | ||
421 | #define SXG_XMT_RING_SIZE 128 /* Start with 128 */ | ||
422 | #define SXG_RCV_RING_SIZE 128 /* Start with 128 */ | ||
423 | #define SXG_MAX_ENTRIES 4096 | ||
424 | #define SXG_JUMBO_RCV_RING_SIZE 32 | ||
425 | |||
426 | /* Structure and macros to manage a ring */ | ||
427 | struct sxg_ring_info { | ||
428 | /* Where we add entries - Note unsigned char:RING_SIZE */ | ||
429 | unsigned char Head; | ||
430 | unsigned char Tail; /* Where we pull off completed entries */ | ||
431 | ushort Size; /* Ring size - Must be multiple of 2 */ | ||
432 | void * Context[SXG_MAX_RING_SIZE]; /* Shadow ring */ | ||
433 | }; | ||
434 | |||
435 | #define SXG_INITIALIZE_RING(_ring, _size) { \ | ||
436 | (_ring).Head = 0; \ | ||
437 | (_ring).Tail = 0; \ | ||
438 | (_ring).Size = (_size); \ | ||
439 | } | ||
440 | |||
441 | #define SXG_ADVANCE_INDEX(_index, _size) \ | ||
442 | ((_index) = ((_index) + 1) & ((_size) - 1)) | ||
443 | #define SXG_PREVIOUS_INDEX(_index, _size) \ | ||
444 | (((_index) - 1) &((_size) - 1)) | ||
445 | #define SXG_RING_EMPTY(_ring) ((_ring)->Head == (_ring)->Tail) | ||
446 | #define SXG_RING_FULL(_ring) \ | ||
447 | ((((_ring)->Head + 1) & ((_ring)->Size - 1)) == (_ring)->Tail) | ||
448 | #define SXG_RING_ADVANCE_HEAD(_ring) \ | ||
449 | SXG_ADVANCE_INDEX((_ring)->Head, ((_ring)->Size)) | ||
450 | #define SXG_RING_RETREAT_HEAD(_ring) ((_ring)->Head = \ | ||
451 | SXG_PREVIOUS_INDEX((_ring)->Head, (_ring)->Size)) | ||
452 | #define SXG_RING_ADVANCE_TAIL(_ring) { \ | ||
453 | ASSERT((_ring)->Tail != (_ring)->Head); \ | ||
454 | SXG_ADVANCE_INDEX((_ring)->Tail, ((_ring)->Size)); \ | ||
455 | } | ||
456 | /* | ||
457 | * Set cmd to the next available ring entry, set the shadow context | ||
458 | * entry and advance the ring. | ||
459 | * The appropriate lock must be held when calling this macro | ||
460 | */ | ||
461 | #define SXG_GET_CMD(_ring, _ringinfo, _cmd, _context) { \ | ||
462 | if(SXG_RING_FULL(_ringinfo)) { \ | ||
463 | (_cmd) = NULL; \ | ||
464 | } else { \ | ||
465 | (_cmd) = &(_ring)->Descriptors[(_ringinfo)->Head]; \ | ||
466 | (_ringinfo)->Context[(_ringinfo)->Head] = (void *)(_context);\ | ||
467 | SXG_RING_ADVANCE_HEAD(_ringinfo); \ | ||
468 | } \ | ||
469 | } | ||
470 | |||
471 | /* | ||
472 | * Abort the previously allocated command by retreating the head. | ||
473 | * NOTE - The appopriate lock MUST NOT BE DROPPED between the SXG_GET_CMD | ||
474 | * and SXG_ABORT_CMD calls. | ||
475 | */ | ||
476 | #define SXG_ABORT_CMD(_ringinfo) { \ | ||
477 | ASSERT(!(SXG_RING_EMPTY(_ringinfo))); \ | ||
478 | SXG_RING_RETREAT_HEAD(_ringinfo); \ | ||
479 | (_ringinfo)->Context[(_ringinfo)->Head] = NULL; \ | ||
480 | } | ||
481 | |||
482 | /* | ||
483 | * For the given ring, return a pointer to the tail cmd and context, | ||
484 | * clear the context and advance the tail | ||
485 | */ | ||
486 | #define SXG_RETURN_CMD(_ring, _ringinfo, _cmd, _context) { \ | ||
487 | (_cmd) = &(_ring)->Descriptors[(_ringinfo)->Tail]; \ | ||
488 | (_context) = (_ringinfo)->Context[(_ringinfo)->Tail]; \ | ||
489 | (_ringinfo)->Context[(_ringinfo)->Tail] = NULL; \ | ||
490 | SXG_RING_ADVANCE_TAIL(_ringinfo); \ | ||
491 | } | ||
492 | |||
493 | /* | ||
494 | * For a given ring find out how much the first pointer is ahead of | ||
495 | * the second pointer. "ahead" recognises the fact that the ring can wrap | ||
496 | */ | ||
497 | static inline int sxg_ring_get_forward_diff (struct sxg_ring_info *ringinfo, | ||
498 | int a, int b) { | ||
499 | if ((a < 0 || a > ringinfo->Size ) || (b < 0 || b > ringinfo->Size)) | ||
500 | return -1; | ||
501 | if (a > b) /* _a is lagging _b and _b has not wrapped around */ | ||
502 | return (a - b); | ||
503 | else | ||
504 | return ((ringinfo->Size - (b - a))); | ||
505 | } | ||
506 | |||
507 | /*************************************************************** | ||
508 | * Host Command Buffer - commands to INIC via the Cmd Rings | ||
509 | * | ||
510 | * 31 15 0 | ||
511 | * .___________________.___________________. | ||
512 | * |<-------------- Sgl Low -------------->| | ||
513 | * |_________|_________|_________|_________|0 0x00 | ||
514 | * |<-------------- Sgl High ------------->| | ||
515 | * |_________|_________|_________|_________|4 0x04 | ||
516 | * |<------------- Sge 0 Low ----------->| | ||
517 | * |_________|_________|_________|_________|8 0x08 | ||
518 | * |<------------- Sge 0 High ----------->| | ||
519 | * |_________|_________|_________|_________|12 0x0c | ||
520 | * |<------------ Sge 0 Length ---------->| | ||
521 | * |_________|_________|_________|_________|16 0x10 | ||
522 | * |<----------- Window Update ----------->| | ||
523 | * |<-------- SP 1st SGE offset ---------->| | ||
524 | * |_________|_________|_________|_________|20 0x14 | ||
525 | * |<----------- Total Length ------------>| | ||
526 | * |_________|_________|_________|_________|24 0x18 | ||
527 | * |<----- LCnt ------>|<----- Flags ----->| | ||
528 | * |_________|_________|_________|_________|28 0x1c | ||
529 | ****************************************************************/ | ||
530 | #pragma pack(push, 1) | ||
531 | struct sxg_cmd { | ||
532 | dma64_addr_t Sgl; /* Physical address of SGL */ | ||
533 | union { | ||
534 | struct { | ||
535 | dma64_addr_t FirstSgeAddress; /* Address of first SGE */ | ||
536 | u32 FirstSgeLength; /* Length of first SGE */ | ||
537 | union { | ||
538 | u32 Rsvd1; /* TOE NA */ | ||
539 | u32 SgeOffset; /* Slowpath - 2nd SGE offset */ | ||
540 | /* MDL completion - clobbers update */ | ||
541 | u32 Resid; | ||
542 | }; | ||
543 | union { | ||
544 | u32 TotalLength; /* Total transfer length */ | ||
545 | u32 Mss; /* LSO MSS */ | ||
546 | }; | ||
547 | } Buffer; | ||
548 | }; | ||
549 | union { | ||
550 | struct { | ||
551 | unsigned char Flags:4; /* slowpath flags */ | ||
552 | unsigned char IpHl:4; /* Ip header length (>>2) */ | ||
553 | unsigned char MacLen; /* Mac header len */ | ||
554 | } CsumFlags; | ||
555 | struct { | ||
556 | ushort Flags:4; /* slowpath flags */ | ||
557 | ushort TcpHdrOff:7; /* TCP */ | ||
558 | ushort MacLen:5; /* Mac header len */ | ||
559 | } LsoFlags; | ||
560 | ushort Flags; /* flags */ | ||
561 | }; | ||
562 | union { | ||
563 | ushort SgEntries; /* SG entry count including first sge */ | ||
564 | struct { | ||
565 | unsigned char Status; /* Copied from event status */ | ||
566 | unsigned char NotUsed; | ||
567 | } Status; | ||
568 | }; | ||
569 | }; | ||
570 | #pragma pack(pop) | ||
571 | |||
572 | #pragma pack(push, 1) | ||
573 | struct vlan_hdr { | ||
574 | ushort VlanTci; | ||
575 | ushort VlanTpid; | ||
576 | }; | ||
577 | #pragma pack(pop) | ||
578 | |||
579 | /******************************************************************** | ||
580 | * Slowpath Flags: | ||
581 | * | ||
582 | * | ||
583 | * LSS Flags: | ||
584 | * .--- | ||
585 | * /.--- TCP Large segment send | ||
586 | * //.--- | ||
587 | * ///.--- | ||
588 | * 3 1 1 //// | ||
589 | * 1 5 0 |||| | ||
590 | * .___________________.____________vvvv. | ||
591 | * | |MAC | TCP | | | ||
592 | * | LCnt |hlen|hdroff|Flgs| | ||
593 | * |___________________|||||||||||||____| | ||
594 | * | ||
595 | * | ||
596 | * Checksum Flags | ||
597 | * | ||
598 | * .--- | ||
599 | * /.--- | ||
600 | * //.--- Checksum TCP | ||
601 | * ///.--- Checksum IP | ||
602 | * 3 1 //// No bits - normal send | ||
603 | * 1 5 7 |||| | ||
604 | * .___________________._______________vvvv. | ||
605 | * | | Offload | IP | | | ||
606 | * | LCnt |MAC hlen |Hlen|Flgs| | ||
607 | * |___________________|____|____|____|____| | ||
608 | * | ||
609 | *****************************************************************/ | ||
610 | /* Slowpath CMD flags */ | ||
611 | #define SXG_SLOWCMD_CSUM_IP 0x01 /* Checksum IP */ | ||
612 | #define SXG_SLOWCMD_CSUM_TCP 0x02 /* Checksum TCP */ | ||
613 | #define SXG_SLOWCMD_LSO 0x04 /* Large segment send */ | ||
614 | |||
615 | struct sxg_xmt_ring { | ||
616 | struct sxg_cmd Descriptors[SXG_XMT_RING_SIZE]; | ||
617 | }; | ||
618 | |||
619 | struct sxg_rcv_ring { | ||
620 | struct sxg_cmd Descriptors[SXG_RCV_RING_SIZE]; | ||
621 | }; | ||
622 | |||
623 | /* | ||
624 | * Share memory buffer types - Used to identify asynchronous | ||
625 | * shared memory allocation | ||
626 | */ | ||
627 | enum sxg_buffer_type { | ||
628 | SXG_BUFFER_TYPE_RCV, /* Receive buffer */ | ||
629 | SXG_BUFFER_TYPE_SGL /* SGL buffer */ | ||
630 | }; | ||
631 | |||
632 | /* State for SXG buffers */ | ||
633 | #define SXG_BUFFER_FREE 0x01 | ||
634 | #define SXG_BUFFER_BUSY 0x02 | ||
635 | #define SXG_BUFFER_ONCARD 0x04 | ||
636 | #define SXG_BUFFER_UPSTREAM 0x08 | ||
637 | |||
638 | /* | ||
639 | * Receive data buffers | ||
640 | * | ||
641 | * Receive data buffers are given to the Sahara card 128 at a time. | ||
642 | * This is accomplished by filling in a "receive descriptor block" | ||
643 | * with 128 "receive descriptors". Each descriptor consists of | ||
644 | * a physical address, which the card uses as the address to | ||
645 | * DMA data into, and a virtual address, which is given back | ||
646 | * to the host in the "HostHandle" portion of an event. | ||
647 | * The receive descriptor data structure is defined below | ||
648 | * as sxg_rcv_data_descriptor, and the corresponding block | ||
649 | * is defined as sxg_rcv_descriptor_block. | ||
650 | * | ||
651 | * This receive descriptor block is given to the card by filling | ||
652 | * in the Sgl field of a sxg_cmd entry from pAdapt->RcvRings[0] | ||
653 | * with the physical address of the receive descriptor block. | ||
654 | * | ||
655 | * Both the receive buffers and the receive descriptor blocks | ||
656 | * require additional data structures to maintain them | ||
657 | * on a free queue and contain other information associated with them. | ||
658 | * Those data structures are defined as the sxg_rcv_data_buffer_hdr | ||
659 | * and sxg_rcv_descriptor_block_hdr respectively. | ||
660 | * | ||
661 | * Since both the receive buffers and the receive descriptor block | ||
662 | * must be accessible by the card, both must be allocated out of | ||
663 | * shared memory. To ensure that we always have a descriptor | ||
664 | * block available for every 128 buffers, we allocate all of | ||
665 | * these resources together in a single block. This entire | ||
666 | * block is managed by a struct sxg_rcv_block_hdr, who's sole purpose | ||
667 | * is to maintain address information so that the entire block | ||
668 | * can be free later. | ||
669 | * | ||
670 | * Further complicating matters is the fact that the receive | ||
671 | * buffers must be variable in length in order to accomodate | ||
672 | * jumbo frame configurations. We configure the buffer | ||
673 | * length so that the buffer and it's corresponding struct | ||
674 | * sxg_rcv_data_buffer_hdr structure add up to an even | ||
675 | * boundary. Then we place the remaining data structures after 128 | ||
676 | * of them as shown in the following diagram: | ||
677 | * | ||
678 | * _________________________________________ | ||
679 | * | | | ||
680 | * | Variable length receive buffer #1 | | ||
681 | * |_________________________________________| | ||
682 | * | | | ||
683 | * | sxg_rcv_data_buffer_hdr #1 | | ||
684 | * |_________________________________________| <== Even 2k or 10k boundary | ||
685 | * | | | ||
686 | * | ... repeat 2-128 .. | | ||
687 | * |_________________________________________| | ||
688 | * | | | ||
689 | * | struct sxg_rcv_descriptor_block | | ||
690 | * | Contains sxg_rcv_data_descriptor * 128 | | ||
691 | * |_________________________________________| | ||
692 | * | | | ||
693 | * | struct sxg_rcv_descriptor_block_hdr | | ||
694 | * |_________________________________________| | ||
695 | * | | | ||
696 | * | struct sxg_rcv_block_hdr | | ||
697 | * |_________________________________________| | ||
698 | * | ||
699 | * Memory consumption: | ||
700 | * Non-jumbo: | ||
701 | * Buffers and sxg_rcv_data_buffer_hdr = 2k * 128 = 256k | ||
702 | * + struct sxg_rcv_descriptor_block = 2k | ||
703 | * + struct sxg_rcv_descriptor_block_hdr = ~32 | ||
704 | * + struct sxg_rcv_block_hdr = ~32 | ||
705 | * => Total = ~258k/block | ||
706 | * | ||
707 | * Jumbo: | ||
708 | * Buffers and sxg_rcv_data_buffer_hdr = 10k * 128 = 1280k | ||
709 | * + struct sxg_rcv_descriptor_block = 2k | ||
710 | * + struct sxg_rcv_descriptor_block_hdr = ~32 | ||
711 | * + struct sxg_rcv_block_hdr = ~32 | ||
712 | * => Total = ~1282k/block | ||
713 | * | ||
714 | */ | ||
715 | #define SXG_RCV_DATA_BUFFERS 8192 /* Amount to give to the card */ | ||
716 | #define SXG_INITIAL_RCV_DATA_BUFFERS 16384 /* Initial pool of buffers */ | ||
717 | /* Minimum amount and when to get more */ | ||
718 | #define SXG_MIN_RCV_DATA_BUFFERS 4096 | ||
719 | #define SXG_MAX_RCV_BLOCKS 256 /* = 32k receive buffers */ | ||
720 | /* Amount to give to the card in case of jumbo frames */ | ||
721 | #define SXG_JUMBO_RCV_DATA_BUFFERS 2048 | ||
722 | /* Initial pool of buffers in case of jumbo buffers */ | ||
723 | #define SXG_INITIAL_JUMBO_RCV_DATA_BUFFERS 4096 | ||
724 | #define SXG_MIN_JUMBO_RCV_DATA_BUFFERS 1024 | ||
725 | |||
726 | /* Receive buffer header */ | ||
727 | struct sxg_rcv_data_buffer_hdr { | ||
728 | dma64_addr_t PhysicalAddress; /* Buffer physical address */ | ||
729 | /* | ||
730 | * Note - DO NOT USE the VirtualAddress field to locate data. | ||
731 | * Use the sxg.h:SXG_RECEIVE_DATA_LOCATION macro instead. | ||
732 | */ | ||
733 | struct list_entry FreeList; /* Free queue of buffers */ | ||
734 | unsigned char State; /* See SXG_BUFFER state above */ | ||
735 | struct sk_buff * skb; /* Double mapped (nbl and pkt)*/ | ||
736 | }; | ||
737 | |||
738 | /* | ||
739 | * SxgSlowReceive uses the PACKET (skb) contained | ||
740 | * in the struct sxg_rcv_data_buffer_hdr when indicating dumb-nic data | ||
741 | */ | ||
742 | #define SxgDumbRcvPacket skb | ||
743 | |||
744 | /* Space for struct sxg_rcv_data_buffer_hdr */ | ||
745 | #define SXG_RCV_DATA_HDR_SIZE sizeof(struct sxg_rcv_data_buffer_hdr) | ||
746 | /* Non jumbo = 2k including HDR */ | ||
747 | #define SXG_RCV_DATA_BUFFER_SIZE 2048 | ||
748 | /* jumbo = 10k including HDR */ | ||
749 | #define SXG_RCV_JUMBO_BUFFER_SIZE 10240 | ||
750 | |||
751 | /* Receive data descriptor */ | ||
752 | struct sxg_rcv_data_descriptor { | ||
753 | union { | ||
754 | struct sk_buff *VirtualAddress; /* Host handle */ | ||
755 | u64 ForceTo8Bytes; /*Force x86 to 8-byte boundary*/ | ||
756 | }; | ||
757 | dma64_addr_t PhysicalAddress; | ||
758 | }; | ||
759 | |||
760 | /* Receive descriptor block */ | ||
761 | #define SXG_RCV_DESCRIPTORS_PER_BLOCK 128 | ||
762 | #define SXG_RCV_DESCRIPTOR_BLOCK_SIZE 2048 /* For sanity check */ | ||
763 | |||
764 | struct sxg_rcv_descriptor_block { | ||
765 | struct sxg_rcv_data_descriptor Descriptors[SXG_RCV_DESCRIPTORS_PER_BLOCK]; | ||
766 | }; | ||
767 | |||
768 | /* Receive descriptor block header */ | ||
769 | struct sxg_rcv_descriptor_block_hdr { | ||
770 | void *VirtualAddress; /* start of 2k buffer */ | ||
771 | dma64_addr_t PhysicalAddress;/* and it's physical address */ | ||
772 | struct list_entry FreeList;/* free queue of descriptor blocks */ | ||
773 | unsigned char State; /* see sxg_buffer state above */ | ||
774 | }; | ||
775 | |||
776 | /* Receive block header */ | ||
777 | struct sxg_rcv_block_hdr { | ||
778 | void *VirtualAddress; /* Start of virtual memory */ | ||
779 | dma64_addr_t PhysicalAddress;/* ..and it's physical address*/ | ||
780 | struct list_entry AllList; /* Queue of all SXG_RCV_BLOCKS*/ | ||
781 | }; | ||
782 | |||
783 | /* Macros to determine data structure offsets into receive block */ | ||
784 | #define SXG_RCV_BLOCK_SIZE(_Buffersize) \ | ||
785 | (((_Buffersize) * SXG_RCV_DESCRIPTORS_PER_BLOCK) + \ | ||
786 | (sizeof(struct sxg_rcv_descriptor_block)) + \ | ||
787 | (sizeof(struct sxg_rcv_descriptor_block_hdr)) + \ | ||
788 | (sizeof(struct sxg_rcv_block_hdr))) | ||
789 | #define SXG_RCV_BUFFER_DATA_SIZE(_Buffersize) \ | ||
790 | ((_Buffersize) - SXG_RCV_DATA_HDR_SIZE) | ||
791 | #define SXG_RCV_DATA_BUFFER_HDR_OFFSET(_Buffersize) \ | ||
792 | ((_Buffersize) - SXG_RCV_DATA_HDR_SIZE) | ||
793 | #define SXG_RCV_DESCRIPTOR_BLOCK_OFFSET(_Buffersize) \ | ||
794 | ((_Buffersize) * SXG_RCV_DESCRIPTORS_PER_BLOCK) | ||
795 | #define SXG_RCV_DESCRIPTOR_BLOCK_HDR_OFFSET(_Buffersize) \ | ||
796 | (((_Buffersize) * SXG_RCV_DESCRIPTORS_PER_BLOCK) + \ | ||
797 | (sizeof(struct sxg_rcv_descriptor_block))) | ||
798 | #define SXG_RCV_BLOCK_HDR_OFFSET(_Buffersize) \ | ||
799 | (((_Buffersize) * SXG_RCV_DESCRIPTORS_PER_BLOCK) + \ | ||
800 | (sizeof(struct sxg_rcv_descriptor_block)) + \ | ||
801 | (sizeof(struct sxg_rcv_descriptor_block_hdr))) | ||
802 | |||
803 | /* Scatter gather list buffer */ | ||
804 | #define SXG_INITIAL_SGL_BUFFERS 8192 /* Initial pool of SGL buffers */ | ||
805 | #define SXG_MIN_SGL_BUFFERS 2048 /* Minimum amount and when to get more*/ | ||
806 | /* Maximum to allocate (note ADAPT:ushort) */ | ||
807 | #define SXG_MAX_SGL_BUFFERS 16384 | ||
808 | |||
809 | /* | ||
810 | * SXG_SGL_POOL_PROPERTIES - This structure is used to define a pool of SGL | ||
811 | * buffers. These buffers are allocated out of shared memory and used to | ||
812 | * contain a physical scatter gather list structure that is shared | ||
813 | * with the card. | ||
814 | * | ||
815 | * We split our SGL buffers into multiple pools based on size. The motivation | ||
816 | * is that some applications perform very large I/Os (1MB for example), so | ||
817 | * we need to be able to allocate an SGL to accommodate such a request. | ||
818 | * But such an SGL would require 256 24-byte SG entries - ~6k. | ||
819 | * Given that the vast majority of I/Os are much smaller than 1M, allocating | ||
820 | * a single pool of SGL buffers would be a horribly inefficient use of | ||
821 | * memory. | ||
822 | * | ||
823 | * The following structure includes two fields relating to its size. | ||
824 | * The NBSize field specifies the largest NET_BUFFER that can be handled | ||
825 | * by the particular pool. The SGEntries field defines the size, in | ||
826 | * entries, of the SGL for that pool. The SGEntries is determined by | ||
827 | * dividing the NBSize by the expected page size (4k), and then padding | ||
828 | * it by some appropriate amount as insurance (20% or so..??). | ||
829 | */ | ||
830 | struct sxg_sgl_pool_properties { | ||
831 | u32 NBSize; /* Largest NET_BUFFER size for this pool */ | ||
832 | ushort SGEntries; /* Number of entries in SGL */ | ||
833 | ushort InitialBuffers; /* Number to allocate at initializationtime */ | ||
834 | ushort MinBuffers; /* When to get more */ | ||
835 | ushort MaxBuffers; /* When to stop */ | ||
836 | ushort PerCpuThreshold;/* See sxgh.h:SXG_RESOURCES */ | ||
837 | }; | ||
838 | |||
839 | /* | ||
840 | * At the moment I'm going to statically initialize 4 pools: | ||
841 | * 100k buffer pool: The vast majority of the expected buffers are expected | ||
842 | * to be less than or equal to 100k. At 30 entries per and | ||
843 | * 8k initial buffers amounts to ~4MB of memory | ||
844 | * NOTE - This used to be 64K with 20 entries, but during | ||
845 | * WHQL NDIS 6.0 Testing (2c_mini6stress) MS does their | ||
846 | * best to send absurd NBL's with ridiculous SGLs, we | ||
847 | * have received 400byte sends contained in SGL's that | ||
848 | * have 28 entries | ||
849 | * 1M buffer pool: Buffers between 64k and 1M. Allocate 256 initial | ||
850 | * buffers with 300 entries each => ~2MB of memory | ||
851 | * 5M buffer pool: Not expected often, if at all. 32 initial buffers | ||
852 | * at 1500 entries each => ~1MB of memory | ||
853 | * 10M buffer pool: Not expected at all, except under pathelogical conditions. | ||
854 | * Allocate one at initialization time. | ||
855 | * Note - 10M is the current limit of what we can realistically | ||
856 | * support due to the sahara SGL bug described in the | ||
857 | * SAHARA SGL WORKAROUND below. We will likely adjust the | ||
858 | * number of pools and/or pool properties over time. | ||
859 | */ | ||
860 | #define SXG_NUM_SGL_POOLS 4 | ||
861 | #define INITIALIZE_SGL_POOL_PROPERTIES \ | ||
862 | struct sxg_sgl_pool_properties SxgSglPoolProperties[SXG_NUM_SGL_POOLS] =\ | ||
863 | { \ | ||
864 | { 102400, 30, 8192, 2048, 16384, 256}, \ | ||
865 | { 1048576, 300, 256, 128, 1024, 16}, \ | ||
866 | { 5252880, 1500, 32, 16, 512, 0}, \ | ||
867 | {10485760, 2700, 2, 4, 32, 0}, \ | ||
868 | }; | ||
869 | |||
870 | extern struct sxg_sgl_pool_properties SxgSglPoolProperties[]; | ||
871 | |||
872 | #define SXG_MAX_SGL_BUFFER_SIZE \ | ||
873 | SxgSglPoolProperties[SXG_NUM_SGL_POOLS - 1].NBSize | ||
874 | |||
875 | /* | ||
876 | * SAHARA SGL WORKAROUND!! | ||
877 | * The current Sahara card uses a 16-bit counter when advancing | ||
878 | * SGL address locations. This means that if an SGL crosses | ||
879 | * a 64k boundary, the hardware will actually skip back to | ||
880 | * the start of the previous 64k boundary, with obviously | ||
881 | * undesirable results. | ||
882 | * | ||
883 | * We currently workaround this issue by allocating SGL buffers | ||
884 | * in 64k blocks and skipping over buffers that straddle the boundary. | ||
885 | */ | ||
886 | #define SXG_INVALID_SGL(phys_addr,len) \ | ||
887 | (((phys_addr >> 16) != ( (phys_addr + len) >> 16 ))) | ||
888 | |||
889 | /* | ||
890 | * Allocate SGLs in blocks so we can skip over invalid entries. | ||
891 | * We allocation 64k worth of SGL buffers, including the | ||
892 | * struct sxg_sgl_block_hdr, plus one for padding | ||
893 | */ | ||
894 | #define SXG_SGL_BLOCK_SIZE 65536 | ||
895 | #define SXG_SGL_ALLOCATION_SIZE(_Pool) \ | ||
896 | SXG_SGL_BLOCK_SIZE + SXG_SGL_SIZE(_Pool) | ||
897 | |||
898 | struct sxg_sgl_block_hdr { | ||
899 | ushort Pool; /* Associated SGL pool */ | ||
900 | /* struct sxg_scatter_gather blocks */ | ||
901 | struct list_entry List; | ||
902 | dma64_addr_t PhysicalAddress;/* physical address */ | ||
903 | }; | ||
904 | |||
905 | /* | ||
906 | * The following definition denotes the maximum block of memory that the | ||
907 | * card can DMA to.It is specified in the call to NdisMRegisterScatterGatherDma. | ||
908 | * For now, use the same value as used in the Slic/Oasis driver, which | ||
909 | * is 128M. That should cover any expected MDL that I can think of. | ||
910 | */ | ||
911 | #define SXG_MAX_PHYS_MAP (1024 * 1024 * 128) | ||
912 | |||
913 | /* Self identifying structure type */ | ||
914 | enum SXG_SGL_TYPE { | ||
915 | SXG_SGL_DUMB, /* Dumb NIC SGL */ | ||
916 | SXG_SGL_SLOW, /* Slowpath protocol header - see below */ | ||
917 | SXG_SGL_CHIMNEY /* Chimney offload SGL */ | ||
918 | }; | ||
919 | |||
920 | /* | ||
921 | * The ucode expects an NDIS SGL structure that | ||
922 | * is formatted for an x64 system. When running | ||
923 | * on an x64 system, we can simply hand the NDIS SGL | ||
924 | * to the card directly. For x86 systems we must reconstruct | ||
925 | * the SGL. The following structure defines an x64 | ||
926 | * formatted SGL entry | ||
927 | */ | ||
928 | struct sxg_x64_sge { | ||
929 | dma64_addr_t Address; /* same as wdm.h */ | ||
930 | u32 Length; /* same as wdm.h */ | ||
931 | u32 CompilerPad; /* The compiler pads to 8-bytes */ | ||
932 | u64 Reserved; /* u32 * in wdm.h. Force to 8 bytes */ | ||
933 | }; | ||
934 | |||
935 | /* | ||
936 | * Our SGL structure - Essentially the same as | ||
937 | * wdm.h:SCATTER_GATHER_LIST. Note the variable number of | ||
938 | * elements based on the pool specified above | ||
939 | */ | ||
940 | struct sxg_x64_sgl { | ||
941 | u32 NumberOfElements; | ||
942 | u32 *Reserved; | ||
943 | struct sxg_x64_sge Elements[1]; /* Variable */ | ||
944 | }; | ||
945 | |||
946 | struct sxg_scatter_gather { | ||
947 | enum SXG_SGL_TYPE Type; /* FIRST! Dumb-nic or offload */ | ||
948 | ushort Pool; /* Associated SGL pool */ | ||
949 | ushort Entries; /* SGL total entries */ | ||
950 | void * adapter; /* Back pointer to adapter */ | ||
951 | /* Free struct sxg_scatter_gather blocks */ | ||
952 | struct list_entry FreeList; | ||
953 | /* All struct sxg_scatter_gather blocks */ | ||
954 | struct list_entry AllList; | ||
955 | dma64_addr_t PhysicalAddress;/* physical address */ | ||
956 | unsigned char State; /* See SXG_BUFFER state above */ | ||
957 | unsigned char CmdIndex; /* Command ring index */ | ||
958 | struct sk_buff *DumbPacket; /* Associated Packet */ | ||
959 | /* For asynchronous completions */ | ||
960 | u32 Direction; | ||
961 | u32 CurOffset; /* Current SGL offset */ | ||
962 | u32 SglRef; /* SGL reference count */ | ||
963 | struct vlan_hdr VlanTag; /* VLAN tag to be inserted into SGL */ | ||
964 | struct sxg_x64_sgl *pSgl; /* SGL Addr. Possibly &Sgl */ | ||
965 | struct sxg_x64_sgl Sgl; /* SGL handed to card */ | ||
966 | }; | ||
967 | |||
968 | /* | ||
969 | * Note - the "- 1" is because struct sxg_scatter_gather=>struct sxg_x64_sgl | ||
970 | * includes 1 SGE.. | ||
971 | */ | ||
972 | #define SXG_SGL_SIZE(_Pool) \ | ||
973 | (sizeof(struct sxg_scatter_gather) + \ | ||
974 | ((SxgSglPoolProperties[_Pool].SGEntries - 1) * \ | ||
975 | sizeof(struct sxg_x64_sge))) | ||
976 | |||
977 | /* Force NDIS to give us it's own buffer so we can reformat to our own */ | ||
978 | #define SXG_SGL_BUFFER(_SxgSgl) NULL | ||
979 | #define SXG_SGL_BUFFER_LENGTH(_SxgSgl) 0 | ||
980 | #define SXG_SGL_BUF_SIZE 0 | ||
981 | |||
982 | /* | ||
983 | #if defined(CONFIG_X86_64) | ||
984 | #define SXG_SGL_BUFFER(_SxgSgl) (&_SxgSgl->Sgl) | ||
985 | #define SXG_SGL_BUFFER_LENGTH(_SxgSgl) ((_SxgSgl)->Entries * \ | ||
986 | sizeof(struct sxg_x64_sge)) | ||
987 | #define SXG_SGL_BUF_SIZE sizeof(struct sxg_x64_sgl) | ||
988 | #elif defined(CONFIG_X86) | ||
989 | // Force NDIS to give us it's own buffer so we can reformat to our own | ||
990 | #define SXG_SGL_BUFFER(_SxgSgl) NULL | ||
991 | #define SXG_SGL_BUFFER_LENGTH(_SxgSgl) 0 | ||
992 | #define SXG_SGL_BUF_SIZE 0 | ||
993 | #else | ||
994 | #error staging: sxg: driver is for X86 only! | ||
995 | #endif | ||
996 | */ | ||
997 | /* Microcode statistics */ | ||
998 | struct sxg_ucode_stats { | ||
999 | u32 RPDQOflow; /* PDQ overflow (unframed ie dq & drop 1st) */ | ||
1000 | u32 XDrops; /* Xmt drops due to no xmt buffer */ | ||
1001 | u32 ERDrops; /* Rcv drops due to ER full */ | ||
1002 | u32 NBDrops; /* Rcv drops due to out of host buffers */ | ||
1003 | u32 PQDrops; /* Rcv drops due to PDQ full */ | ||
1004 | /* Rcv drops due to bad frame: no link addr match, frlen > max */ | ||
1005 | u32 BFDrops; | ||
1006 | u32 UPDrops; /* Rcv drops due to UPFq full */ | ||
1007 | u32 XNoBufs; /* Xmt drop due to no DRAM Xmit buffer or PxyBuf */ | ||
1008 | }; | ||
1009 | |||
1010 | /* | ||
1011 | * Macros for handling the Offload engine values | ||
1012 | */ | ||
1013 | /* Number of positions to shift Network Header Length before passing to card */ | ||
1014 | #define SXG_NW_HDR_LEN_SHIFT 2 | ||
diff --git a/drivers/staging/sxg/sxghw.h b/drivers/staging/sxg/sxghw.h deleted file mode 100644 index 81f81d4b0ad0..000000000000 --- a/drivers/staging/sxg/sxghw.h +++ /dev/null | |||
@@ -1,1020 +0,0 @@ | |||
1 | /************************************************************* | ||
2 | * Copyright © 1997-2007 Alacritech, Inc. All rights reserved | ||
3 | * | ||
4 | * $Id: sxghw.h,v 1.2 2008/07/24 17:24:23 chris Exp $ | ||
5 | * | ||
6 | * sxghw.h: | ||
7 | * | ||
8 | * This file contains structures and definitions for the | ||
9 | * Alacritech Sahara hardware | ||
10 | * | ||
11 | **********************************************************/ | ||
12 | |||
13 | |||
14 | /* PCI Configuration space */ | ||
15 | /* PCI Vendor ID */ | ||
16 | #define SXG_VENDOR_ID 0x139A /* Alacritech's Vendor ID */ | ||
17 | |||
18 | /* PCI Device ID */ | ||
19 | #define SXG_DEVICE_ID 0x0009 /* Sahara Device ID */ | ||
20 | |||
21 | |||
22 | /* Type of ASIC in use */ | ||
23 | enum asic_type { | ||
24 | SAHARA_REV_A, | ||
25 | SAHARA_REV_B | ||
26 | }; | ||
27 | |||
28 | /* Type of Xcvr in fiber card */ | ||
29 | enum xcvr_type { | ||
30 | XCVR_UNKNOWN, | ||
31 | XCVR_NONE, | ||
32 | XCVR_SR, | ||
33 | XCVR_LR, | ||
34 | XCVR_LRM, | ||
35 | XCVR_CR | ||
36 | }; | ||
37 | /* | ||
38 | * Subsystem IDs. | ||
39 | * | ||
40 | * The subsystem ID value is broken into bit fields as follows: | ||
41 | * Bits [15:12] - Function | ||
42 | * Bits [11:8] - OEM and/or operating system. | ||
43 | * Bits [7:0] - Base SID. | ||
44 | */ | ||
45 | |||
46 | /* SSID field (bit) masks */ | ||
47 | #define SSID_BASE_MASK 0x00FF /* Base subsystem ID mask */ | ||
48 | #define SSID_OEM_MASK 0x0F00 /* Subsystem OEM mask */ | ||
49 | #define SSID_FUNC_MASK 0xF000 /* Subsystem function mask */ | ||
50 | |||
51 | /* Base SSID's */ | ||
52 | /* 100022 Sahara prototype (XenPak) board */ | ||
53 | #define SSID_SAHARA_PROTO 0x0018 | ||
54 | #define SSID_SAHARA_FIBER 0x0019 /* 100023 Sahara 1-port fiber board */ | ||
55 | #define SSID_SAHARA_COPPER 0x001A /* 100024 Sahara 1-port copper board */ | ||
56 | |||
57 | /* Useful SSID macros */ | ||
58 | /* isolate base SSID bits */ | ||
59 | #define SSID_BASE(ssid) ((ssid) & SSID_BASE_MASK) | ||
60 | /* isolate SSID OEM bits */ | ||
61 | #define SSID_OEM(ssid) ((ssid) & SSID_OEM_MASK) | ||
62 | /* isolate SSID function bits */ | ||
63 | #define SSID_FUNC(ssid) ((ssid) & SSID_FUNC_MASK) | ||
64 | |||
65 | |||
66 | /* HW Register Space */ | ||
67 | #define SXG_HWREG_MEMSIZE 0x4000 /* 16k */ | ||
68 | |||
69 | #pragma pack(push, 1) | ||
70 | struct sxg_hw_regs { | ||
71 | u32 Reset; /* Write 0xdead to invoke soft reset */ | ||
72 | u32 Pad1; /* No register defined at offset 4 */ | ||
73 | u32 InterruptMask0; /* Deassert legacy interrupt on function 0 */ | ||
74 | u32 InterruptMask1; /* Deassert legacy interrupt on function 1 */ | ||
75 | u32 UcodeDataLow; /* Store microcode instruction bits 31-0 */ | ||
76 | u32 UcodeDataMiddle; /* Store microcode instruction bits 63-32 */ | ||
77 | u32 UcodeDataHigh; /* Store microcode instruction bits 95-64 */ | ||
78 | u32 UcodeAddr; /* Store microcode address - See flags below */ | ||
79 | u32 PadTo0x80[24]; /* Pad to Xcv configuration registers */ | ||
80 | u32 MacConfig0; /* 0x80 - AXGMAC Configuration Register 0 */ | ||
81 | u32 MacConfig1; /* 0x84 - AXGMAC Configuration Register 1 */ | ||
82 | u32 MacConfig2; /* 0x88 - AXGMAC Configuration Register 2 */ | ||
83 | u32 MacConfig3; /* 0x8C - AXGMAC Configuration Register 3 */ | ||
84 | u32 MacAddressLow; /* 0x90 - AXGMAC MAC Station Address - octets 1-4 */ | ||
85 | u32 MacAddressHigh; /* 0x94 - AXGMAC MAC Station Address - octets 5-6 */ | ||
86 | u32 MacReserved1[2]; /* 0x98 - AXGMAC Reserved */ | ||
87 | u32 MacMaxFrameLen; /* 0xA0 - AXGMAC Maximum Frame Length */ | ||
88 | u32 MacReserved2[2]; /* 0xA4 - AXGMAC Reserved */ | ||
89 | u32 MacRevision; /* 0xAC - AXGMAC Revision Level Register */ | ||
90 | u32 MacReserved3[4]; /* 0xB0 - AXGMAC Reserved */ | ||
91 | u32 MacAmiimCmd; /* 0xC0 - AXGMAC AMIIM Command Register */ | ||
92 | u32 MacAmiimField; /* 0xC4 - AXGMAC AMIIM Field Register */ | ||
93 | u32 MacAmiimConfig; /* 0xC8 - AXGMAC AMIIM Configuration Register */ | ||
94 | u32 MacAmiimLink; /* 0xCC - AXGMAC AMIIM Link Fail Vector Register */ | ||
95 | u32 MacAmiimIndicator; /* 0xD0 - AXGMAC AMIIM Indicator Registor */ | ||
96 | u32 PadTo0x100[11]; /* 0xD4 - 0x100 - Pad */ | ||
97 | u32 XmtConfig; /* 0x100 - Transmit Configuration Register */ | ||
98 | u32 RcvConfig; /* 0x104 - Receive Configuration Register 1 */ | ||
99 | u32 LinkAddress0Low; /* 0x108 - Link address 0 */ | ||
100 | u32 LinkAddress0High; /* 0x10C - Link address 0 */ | ||
101 | u32 LinkAddress1Low; /* 0x110 - Link address 1 */ | ||
102 | u32 LinkAddress1High; /* 0x114 - Link address 1 */ | ||
103 | u32 LinkAddress2Low; /* 0x118 - Link address 2 */ | ||
104 | u32 LinkAddress2High; /* 0x11C - Link address 2 */ | ||
105 | u32 LinkAddress3Low; /* 0x120 - Link address 3 */ | ||
106 | u32 LinkAddress3High; /* 0x124 - Link address 3 */ | ||
107 | u32 ToeplitzKey[10]; /* 0x128 - 0x150 - Toeplitz key */ | ||
108 | u32 SocketKey[10]; /* 0x150 - 0x178 - Socket Key */ | ||
109 | u32 LinkStatus; /* 0x178 - Link status */ | ||
110 | u32 ClearStats; /* 0x17C - Clear Stats */ | ||
111 | u32 XmtErrorsLow; /* 0x180 - Transmit stats - errors */ | ||
112 | u32 XmtErrorsHigh; /* 0x184 - Transmit stats - errors */ | ||
113 | u32 XmtFramesLow; /* 0x188 - Transmit stats - frame count */ | ||
114 | u32 XmtFramesHigh; /* 0x18C - Transmit stats - frame count */ | ||
115 | u32 XmtBytesLow; /* 0x190 - Transmit stats - byte count */ | ||
116 | u32 XmtBytesHigh; /* 0x194 - Transmit stats - byte count */ | ||
117 | u32 XmtTcpSegmentsLow; /* 0x198 - Transmit stats - TCP segments */ | ||
118 | u32 XmtTcpSegmentsHigh; /* 0x19C - Transmit stats - TCP segments */ | ||
119 | u32 XmtTcpBytesLow; /* 0x1A0 - Transmit stats - TCP bytes */ | ||
120 | u32 XmtTcpBytesHigh; /* 0x1A4 - Transmit stats - TCP bytes */ | ||
121 | u32 RcvErrorsLow; /* 0x1A8 - Receive stats - errors */ | ||
122 | u32 RcvErrorsHigh; /* 0x1AC - Receive stats - errors */ | ||
123 | u32 RcvFramesLow; /* 0x1B0 - Receive stats - frame count */ | ||
124 | u32 RcvFramesHigh; /* 0x1B4 - Receive stats - frame count */ | ||
125 | u32 RcvBytesLow; /* 0x1B8 - Receive stats - byte count */ | ||
126 | u32 RcvBytesHigh; /* 0x1BC - Receive stats - byte count */ | ||
127 | u32 RcvTcpSegmentsLow; /* 0x1C0 - Receive stats - TCP segments */ | ||
128 | u32 RcvTcpSegmentsHigh; /* 0x1C4 - Receive stats - TCP segments */ | ||
129 | u32 RcvTcpBytesLow; /* 0x1C8 - Receive stats - TCP bytes */ | ||
130 | u32 RcvTcpBytesHigh; /* 0x1CC - Receive stats - TCP bytes */ | ||
131 | u32 PadTo0x200[12]; /* 0x1D0 - 0x200 - Pad */ | ||
132 | u32 Software[1920]; /* 0x200 - 0x2000 - Software defined (not used) */ | ||
133 | u32 MsixTable[1024]; /* 0x2000 - 0x3000 - MSIX Table */ | ||
134 | u32 MsixBitArray[1024]; /* 0x3000 - 0x4000 - MSIX Pending Bit Array */ | ||
135 | }; | ||
136 | #pragma pack(pop) | ||
137 | |||
138 | /* Microcode Address Flags */ | ||
139 | #define MICROCODE_ADDRESS_GO 0x80000000 /* Start microcode */ | ||
140 | #define MICROCODE_ADDRESS_WRITE 0x40000000 /* Store microcode */ | ||
141 | #define MICROCODE_ADDRESS_READ 0x20000000 /* Read microcode */ | ||
142 | #define MICROCODE_ADDRESS_PARITY 0x10000000/* Parity error detected */ | ||
143 | #define MICROCODE_ADDRESS_MASK 0x00001FFF /* Address bits */ | ||
144 | |||
145 | /* Link Address Registers */ | ||
146 | /* Applied to link address high */ | ||
147 | #define LINK_ADDRESS_ENABLE 0x80000000 | ||
148 | |||
149 | /* Microsoft register space size */ | ||
150 | #define SXG_UCODEREG_MEMSIZE 0x40000 /* 256k */ | ||
151 | |||
152 | /* | ||
153 | * Sahara microcode register address format. The command code, | ||
154 | * extended command code, and associated processor are encoded in | ||
155 | * the address bits as follows | ||
156 | */ | ||
157 | #define SXG_ADDRESS_CODE_SHIFT 2 /* Base command code */ | ||
158 | #define SXG_ADDRESS_CODE_MASK 0x0000003C | ||
159 | /* Extended (or sub) command code */ | ||
160 | #define SXG_ADDRESS_EXCODE_SHIFT 6 | ||
161 | #define SXG_ADDRESS_EXCODE_MASK 0x00001FC0 | ||
162 | #define SXG_ADDRESS_CPUID_SHIFT 13 /* CPU */ | ||
163 | #define SXG_ADDRESS_CPUID_MASK 0x0003E000 | ||
164 | /* Used to sanity check UCODE_REGS structure */ | ||
165 | #define SXG_REGISTER_SIZE_PER_CPU 0x00002000 | ||
166 | |||
167 | /* Sahara receive sequencer status values */ | ||
168 | #define SXG_RCV_STATUS_ATTN 0x80000000 /* Attention */ | ||
169 | #define SXG_RCV_STATUS_TRANSPORT_MASK 0x3F000000 /* Transport mask */ | ||
170 | #define SXG_RCV_STATUS_TRANSPORT_ERROR 0x20000000 /* Transport error */ | ||
171 | /* Transport cksum error */ | ||
172 | #define SXG_RCV_STATUS_TRANSPORT_CSUM 0x23000000 | ||
173 | /* Transport underflow */ | ||
174 | #define SXG_RCV_STATUS_TRANSPORT_UFLOW 0x22000000 | ||
175 | /* Transport header length */ | ||
176 | #define SXG_RCV_STATUS_TRANSPORT_HDRLEN 0x20000000 | ||
177 | /* Transport flags detected */ | ||
178 | #define SXG_RCV_STATUS_TRANSPORT_FLAGS 0x10000000 | ||
179 | /* Transport options detected */ | ||
180 | #define SXG_RCV_STATUS_TRANSPORT_OPTS 0x08000000 | ||
181 | #define SXG_RCV_STATUS_TRANSPORT_SESS_MASK 0x07000000 /* Transport DDP */ | ||
182 | #define SXG_RCV_STATUS_TRANSPORT_DDP 0x06000000 /* Transport DDP */ | ||
183 | #define SXG_RCV_STATUS_TRANSPORT_iSCSI 0x05000000 /* Transport iSCSI */ | ||
184 | #define SXG_RCV_STATUS_TRANSPORT_NFS 0x04000000 /* Transport NFS */ | ||
185 | #define SXG_RCV_STATUS_TRANSPORT_FTP 0x03000000 /* Transport FTP */ | ||
186 | #define SXG_RCV_STATUS_TRANSPORT_HTTP 0x02000000 /* Transport HTTP */ | ||
187 | #define SXG_RCV_STATUS_TRANSPORT_SMB 0x01000000 /* Transport SMB */ | ||
188 | #define SXG_RCV_STATUS_NETWORK_MASK 0x00FF0000 /* Network mask */ | ||
189 | #define SXG_RCV_STATUS_NETWORK_ERROR 0x00800000 /* Network error */ | ||
190 | /* Network cksum error */ | ||
191 | #define SXG_RCV_STATUS_NETWORK_CSUM 0x00830000 | ||
192 | /* Network underflow error */ | ||
193 | #define SXG_RCV_STATUS_NETWORK_UFLOW 0x00820000 | ||
194 | /* Network header length */ | ||
195 | #define SXG_RCV_STATUS_NETWORK_HDRLEN 0x00800000 | ||
196 | /* Network overflow detected */ | ||
197 | #define SXG_RCV_STATUS_NETWORK_OFLOW 0x00400000 | ||
198 | /* Network multicast detected */ | ||
199 | #define SXG_RCV_STATUS_NETWORK_MCAST 0x00200000 | ||
200 | /* Network options detected */ | ||
201 | #define SXG_RCV_STATUS_NETWORK_OPTIONS 0x00100000 | ||
202 | /* Network offset detected */ | ||
203 | #define SXG_RCV_STATUS_NETWORK_OFFSET 0x00080000 | ||
204 | /* Network fragment detected */ | ||
205 | #define SXG_RCV_STATUS_NETWORK_FRAGMENT 0x00040000 | ||
206 | /* Network transport type mask */ | ||
207 | #define SXG_RCV_STATUS_NETWORK_TRANS_MASK 0x00030000 | ||
208 | #define SXG_RCV_STATUS_NETWORK_UDP 0x00020000 /* UDP */ | ||
209 | #define SXG_RCV_STATUS_NETWORK_TCP 0x00010000 /* TCP */ | ||
210 | #define SXG_RCV_STATUS_IPONLY 0x00008000 /* IP-only not TCP */ | ||
211 | /* Receive priority */ | ||
212 | #define SXG_RCV_STATUS_PKT_PRI 0x00006000 | ||
213 | /* Receive priority shift */ | ||
214 | #define SXG_RCV_STATUS_PKT_PRI_SHFT 13 | ||
215 | /* MAC Receive RAM parity error */ | ||
216 | #define SXG_RCV_STATUS_PARITY 0x00001000 | ||
217 | /* Link address detection mask */ | ||
218 | #define SXG_RCV_STATUS_ADDRESS_MASK 0x00000F00 | ||
219 | |||
220 | #define SXG_RCV_STATUS_ADDRESS_D 0x00000B00 /* Link address D */ | ||
221 | #define SXG_RCV_STATUS_ADDRESS_C 0x00000A00 /* Link address C */ | ||
222 | #define SXG_RCV_STATUS_ADDRESS_B 0x00000900 /* Link address B */ | ||
223 | #define SXG_RCV_STATUS_ADDRESS_A 0x00000800 /* Link address A */ | ||
224 | /* Link address broadcast */ | ||
225 | #define SXG_RCV_STATUS_ADDRESS_BCAST 0x00000300 | ||
226 | /* Link address multicast */ | ||
227 | #define SXG_RCV_STATUS_ADDRESS_MCAST 0x00000200 | ||
228 | /* Link control multicast */ | ||
229 | #define SXG_RCV_STATUS_ADDRESS_CMCAST 0x00000100 | ||
230 | /* Link status mask */ | ||
231 | #define SXG_RCV_STATUS_LINK_MASK 0x000000FF | ||
232 | #define SXG_RCV_STATUS_LINK_ERROR 0x00000080 /* Link error */ | ||
233 | /* Link status mask */ | ||
234 | #define SXG_RCV_STATUS_LINK_MASK 0x000000FF | ||
235 | /* RcvMacQ parity error */ | ||
236 | #define SXG_RCV_STATUS_LINK_PARITY 0x00000087 | ||
237 | #define SXG_RCV_STATUS_LINK_EARLY 0x00000086 /* Data early */ | ||
238 | #define SXG_RCV_STATUS_LINK_BUFOFLOW 0x00000085 /* Buffer overflow */ | ||
239 | #define SXG_RCV_STATUS_LINK_CODE 0x00000084 /* Link code error */ | ||
240 | #define SXG_RCV_STATUS_LINK_DRIBBLE 0x00000083 /* Dribble nibble */ | ||
241 | #define SXG_RCV_STATUS_LINK_CRC 0x00000082 /* CRC error */ | ||
242 | #define SXG_RCV_STATUS_LINK_OFLOW 0x00000081 /* Link overflow */ | ||
243 | #define SXG_RCV_STATUS_LINK_UFLOW 0x00000080 /* Link underflow */ | ||
244 | #define SXG_RCV_STATUS_LINK_8023 0x00000020 /* 802.3 */ | ||
245 | #define SXG_RCV_STATUS_LINK_SNAP 0x00000010 /* Snap */ | ||
246 | #define SXG_RCV_STATUS_LINK_VLAN 0x00000008 /* VLAN */ | ||
247 | /* Network type mask */ | ||
248 | #define SXG_RCV_STATUS_LINK_TYPE_MASK 0x00000007 | ||
249 | #define SXG_RCV_STATUS_LINK_CONTROL 0x00000003 /* Control packet */ | ||
250 | #define SXG_RCV_STATUS_LINK_IPV6 0x00000002 /* IPv6 packet */ | ||
251 | #define SXG_RCV_STATUS_LINK_IPV4 0x00000001 /* IPv4 packet */ | ||
252 | |||
253 | /* Sahara receive and transmit configuration registers */ | ||
254 | /* RcvConfig register reset */ | ||
255 | #define RCV_CONFIG_RESET 0x80000000 | ||
256 | /* Enable the receive logic */ | ||
257 | #define RCV_CONFIG_ENABLE 0x40000000 | ||
258 | /* Enable the receive parser */ | ||
259 | #define RCV_CONFIG_ENPARSE 0x20000000 | ||
260 | /* Enable the socket detector */ | ||
261 | #define RCV_CONFIG_SOCKET 0x10000000 | ||
262 | #define RCV_CONFIG_RCVBAD 0x08000000 /* Receive all bad frames */ | ||
263 | /* Receive all control frames */ | ||
264 | #define RCV_CONFIG_CONTROL 0x04000000 | ||
265 | /* Enable pause transmit when attn */ | ||
266 | #define RCV_CONFIG_RCVPAUSE 0x02000000 | ||
267 | /* Include TCP port w/ IPv6 toeplitz */ | ||
268 | #define RCV_CONFIG_TZIPV6 0x01000000 | ||
269 | /* Include TCP port w/ IPv4 toeplitz */ | ||
270 | #define RCV_CONFIG_TZIPV4 0x00800000 | ||
271 | #define RCV_CONFIG_FLUSH 0x00400000 /* Flush buffers */ | ||
272 | #define RCV_CONFIG_PRIORITY_MASK 0x00300000 /* Priority level */ | ||
273 | #define RCV_CONFIG_CONN_MASK 0x000C0000 /* Number of connections */ | ||
274 | #define RCV_CONFIG_CONN_4K 0x00000000 /* 4k connections */ | ||
275 | #define RCV_CONFIG_CONN_2K 0x00040000 /* 2k connections */ | ||
276 | #define RCV_CONFIG_CONN_1K 0x00080000 /* 1k connections */ | ||
277 | #define RCV_CONFIG_CONN_512 0x000C0000 /* 512 connections */ | ||
278 | #define RCV_CONFIG_HASH_MASK 0x00030000 /* Hash depth */ | ||
279 | #define RCV_CONFIG_HASH_8 0x00000000 /* Hash depth 8 */ | ||
280 | #define RCV_CONFIG_HASH_16 0x00010000 /* Hash depth 16 */ | ||
281 | #define RCV_CONFIG_HASH_4 0x00020000 /* Hash depth 4 */ | ||
282 | #define RCV_CONFIG_HASH_2 0x00030000 /* Hash depth 2 */ | ||
283 | /* Buffer length bits 15:4. ie multiple of 16. */ | ||
284 | #define RCV_CONFIG_BUFLEN_MASK 0x0000FFE0 | ||
285 | /* Disable socket detection on attn */ | ||
286 | #define RCV_CONFIG_SKT_DIS 0x00000008 | ||
287 | #define RCV_CONFIG_HIPRICTL 0x00000002 /* Ctrl frames on high-prioirty RcvQ */ | ||
288 | #define RCV_CONFIG_NEWSTATUSFMT 0x00000001 /* Use RevB status format */ | ||
289 | /* | ||
290 | * Macro to determine RCV_CONFIG_BUFLEN based on maximum frame size. | ||
291 | * We add 18 bytes for Sahara receive status and padding, plus 4 bytes for CRC, | ||
292 | * and round up to nearest 32 byte boundary | ||
293 | */ | ||
294 | #define RCV_CONFIG_BUFSIZE(_MaxFrame) \ | ||
295 | ((((_MaxFrame) + 22) + 31) & RCV_CONFIG_BUFLEN_MASK) | ||
296 | |||
297 | /* XmtConfig register reset */ | ||
298 | #define XMT_CONFIG_RESET 0x80000000 | ||
299 | #define XMT_CONFIG_ENABLE 0x40000000 /* Enable transmit logic */ | ||
300 | /* Inhibit MAC RAM parity error */ | ||
301 | #define XMT_CONFIG_MAC_PARITY 0x20000000 | ||
302 | /* Inhibit D2F buffer parity error */ | ||
303 | #define XMT_CONFIG_BUF_PARITY 0x10000000 | ||
304 | /* Inhibit 1T SRAM parity error */ | ||
305 | #define XMT_CONFIG_MEM_PARITY 0x08000000 | ||
306 | #define XMT_CONFIG_INVERT_PARITY 0x04000000 /* Invert MAC RAM parity */ | ||
307 | #define XMT_CONFIG_INITIAL_IPID 0x0000FFFF /* Initial IPID */ | ||
308 | |||
309 | /* | ||
310 | * A-XGMAC Registers - Occupy 0x80 - 0xD4 of the struct sxg_hw_regs | ||
311 | * | ||
312 | * Full register descriptions can be found in axgmac.pdf | ||
313 | */ | ||
314 | /* A-XGMAC Configuration Register 0 */ | ||
315 | #define AXGMAC_CFG0_SUB_RESET 0x80000000 /* Sub module reset */ | ||
316 | #define AXGMAC_CFG0_RCNTRL_RESET 0x00400000 /* Receive control reset */ | ||
317 | #define AXGMAC_CFG0_RFUNC_RESET 0x00200000 /* Receive function reset */ | ||
318 | #define AXGMAC_CFG0_TCNTRL_RESET 0x00040000 /* Transmit control reset */ | ||
319 | #define AXGMAC_CFG0_TFUNC_RESET 0x00020000 /* Transmit function reset */ | ||
320 | #define AXGMAC_CFG0_MII_RESET 0x00010000 /* MII Management reset */ | ||
321 | |||
322 | /* A-XGMAC Configuration Register 1 */ | ||
323 | /* Allow the sending of Pause frames */ | ||
324 | #define AXGMAC_CFG1_XMT_PAUSE 0x80000000 | ||
325 | #define AXGMAC_CFG1_XMT_EN 0x40000000 /* Enable transmit */ | ||
326 | /* Allow the detection of Pause frames */ | ||
327 | #define AXGMAC_CFG1_RCV_PAUSE 0x20000000 | ||
328 | #define AXGMAC_CFG1_RCV_EN 0x10000000 /* Enable receive */ | ||
329 | /* Current transmit state - READ ONLY */ | ||
330 | #define AXGMAC_CFG1_XMT_STATE 0x04000000 | ||
331 | /* Current receive state - READ ONLY */ | ||
332 | #define AXGMAC_CFG1_RCV_STATE 0x01000000 | ||
333 | /* Only pause for 64 slot on XOFF */ | ||
334 | #define AXGMAC_CFG1_XOFF_SHORT 0x00001000 | ||
335 | /* Delay transmit FCS 1 4-byte word */ | ||
336 | #define AXGMAC_CFG1_XMG_FCS1 0x00000400 | ||
337 | /* Delay transmit FCS 2 4-byte words */ | ||
338 | #define AXGMAC_CFG1_XMG_FCS2 0x00000800 | ||
339 | /* Delay transmit FCS 3 4-byte words */ | ||
340 | #define AXGMAC_CFG1_XMG_FCS3 0x00000C00 | ||
341 | /* Delay receive FCS 1 4-byte word */ | ||
342 | #define AXGMAC_CFG1_RCV_FCS1 0x00000100 | ||
343 | /* Delay receive FCS 2 4-byte words */ | ||
344 | #define AXGMAC_CFG1_RCV_FCS2 0x00000200 | ||
345 | /* Delay receive FCS 3 4-byte words */ | ||
346 | #define AXGMAC_CFG1_RCV_FCS3 0x00000300 | ||
347 | /* Per-packet override enable */ | ||
348 | #define AXGMAC_CFG1_PKT_OVERRIDE 0x00000080 | ||
349 | #define AXGMAC_CFG1_SWAP 0x00000040 /* Byte swap enable */ | ||
350 | /* ASSERT srdrpfrm on short frame (<64) */ | ||
351 | #define AXGMAC_CFG1_SHORT_ASSERT 0x00000020 | ||
352 | /* RCV only 802.3AE when CLEAR */ | ||
353 | #define AXGMAC_CFG1_RCV_STRICT 0x00000010 | ||
354 | #define AXGMAC_CFG1_CHECK_LEN 0x00000008 /* Verify frame length */ | ||
355 | #define AXGMAC_CFG1_GEN_FCS 0x00000004 /* Generate FCS */ | ||
356 | #define AXGMAC_CFG1_PAD_MASK 0x00000003 /* Mask for pad bits */ | ||
357 | #define AXGMAC_CFG1_PAD_64 0x00000001 /* Pad frames to 64 bytes */ | ||
358 | /* Detect VLAN and pad to 68 bytes */ | ||
359 | #define AXGMAC_CFG1_PAD_VLAN 0x00000002 | ||
360 | #define AXGMAC_CFG1_PAD_68 0x00000003 /* Pad to 68 bytes */ | ||
361 | |||
362 | /* A-XGMAC Configuration Register 2 */ | ||
363 | /* Generate single pause frame (test) */ | ||
364 | #define AXGMAC_CFG2_GEN_PAUSE 0x80000000 | ||
365 | /* Manual link fault sequence */ | ||
366 | #define AXGMAC_CFG2_LF_MANUAL 0x08000000 | ||
367 | /* Auto link fault sequence */ | ||
368 | #define AXGMAC_CFG2_LF_AUTO 0x04000000 | ||
369 | /* Remote link fault (READ ONLY) */ | ||
370 | #define AXGMAC_CFG2_LF_REMOTE 0x02000000 | ||
371 | /* Local link fault (READ ONLY) */ | ||
372 | #define AXGMAC_CFG2_LF_LOCAL 0x01000000 | ||
373 | #define AXGMAC_CFG2_IPG_MASK 0x001F0000 /* Inter packet gap */ | ||
374 | #define AXGMAC_CFG2_IPG_SHIFT 16 | ||
375 | #define AXGMAC_CFG2_PAUSE_XMT 0x00008000 /* Pause transmit module */ | ||
376 | /* Enable IPG extension algorithm */ | ||
377 | #define AXGMAC_CFG2_IPG_EXTEN 0x00000020 | ||
378 | #define AXGMAC_CFG2_IPGEX_MASK 0x0000001F /* IPG extension */ | ||
379 | |||
380 | /* A-XGMAC Configuration Register 3 */ | ||
381 | /* Receive frame drop filter */ | ||
382 | #define AXGMAC_CFG3_RCV_DROP 0xFFFF0000 | ||
383 | /* Receive frame don't care filter */ | ||
384 | #define AXGMAC_CFG3_RCV_DONT_CARE 0x0000FFFF | ||
385 | |||
386 | /* A-XGMAC Station Address Register - Octets 1-4 */ | ||
387 | #define AXGMAC_SARLOW_OCTET_ONE 0xFF000000 /* First octet */ | ||
388 | #define AXGMAC_SARLOW_OCTET_TWO 0x00FF0000 /* Second octet */ | ||
389 | #define AXGMAC_SARLOW_OCTET_THREE 0x0000FF00 /* Third octet */ | ||
390 | #define AXGMAC_SARLOW_OCTET_FOUR 0x000000FF /* Fourth octet */ | ||
391 | |||
392 | /* A-XGMAC Station Address Register - Octets 5-6 */ | ||
393 | #define AXGMAC_SARHIGH_OCTET_FIVE 0xFF000000 /* Fifth octet */ | ||
394 | #define AXGMAC_SARHIGH_OCTET_SIX 0x00FF0000 /* Sixth octet */ | ||
395 | |||
396 | /* A-XGMAC Maximum frame length register */ | ||
397 | /* Maximum transmit frame length */ | ||
398 | #define AXGMAC_MAXFRAME_XMT 0x3FFF0000 | ||
399 | #define AXGMAC_MAXFRAME_XMT_SHIFT 16 | ||
400 | /* Maximum receive frame length */ | ||
401 | #define AXGMAC_MAXFRAME_RCV 0x0000FFFF | ||
402 | /* | ||
403 | * This register doesn't need to be written for standard MTU. | ||
404 | * For jumbo, I'll just statically define the value here. This | ||
405 | * value sets the receive byte count to 9036 (0x234C) and the | ||
406 | * transmit WORD count to 2259 (0x8D3). These values include 22 | ||
407 | * bytes of padding beyond the jumbo MTU of 9014 | ||
408 | */ | ||
409 | #define AXGMAC_MAXFRAME_JUMBO 0x08D3234C | ||
410 | |||
411 | /* A-XGMAC Revision level */ | ||
412 | #define AXGMAC_REVISION_MASK 0x0000FFFF /* Revision level */ | ||
413 | |||
414 | /* A-XGMAC AMIIM Command Register */ | ||
415 | #define AXGMAC_AMIIM_CMD_START 0x00000008 /* Command start */ | ||
416 | #define AXGMAC_AMIIM_CMD_MASK 0x00000007 /* Command */ | ||
417 | /* 10/100/1000 Mbps Phy Write */ | ||
418 | #define AXGMAC_AMIIM_CMD_LEGACY_WRITE 1 | ||
419 | /* 10/100/1000 Mbps Phy Read */ | ||
420 | #define AXGMAC_AMIIM_CMD_LEGACY_READ 2 | ||
421 | #define AXGMAC_AMIIM_CMD_MONITOR_SINGLE 3 /* Monitor single PHY */ | ||
422 | /* Monitor multiple contiguous PHYs */ | ||
423 | #define AXGMAC_AMIIM_CMD_MONITOR_MULTIPLE 4 | ||
424 | /* Present AMIIM Field Reg */ | ||
425 | #define AXGMAC_AMIIM_CMD_10G_OPERATION 5 | ||
426 | /* Clear Link Fail Bit in MIIM */ | ||
427 | #define AXGMAC_AMIIM_CMD_CLEAR_LINK_FAIL 6 | ||
428 | |||
429 | /* A-XGMAC AMIIM Field Register */ | ||
430 | #define AXGMAC_AMIIM_FIELD_ST 0xC0000000 /* 2-bit ST field */ | ||
431 | #define AXGMAC_AMIIM_FIELD_ST_SHIFT 30 | ||
432 | #define AXGMAC_AMIIM_FIELD_OP 0x30000000 /* 2-bit OP field */ | ||
433 | #define AXGMAC_AMIIM_FIELD_OP_SHIFT 28 | ||
434 | /* Port address field (hstphyadx in spec) */ | ||
435 | #define AXGMAC_AMIIM_FIELD_PORT_ADDR 0x0F800000 | ||
436 | #define AXGMAC_AMIIM_FIELD_PORT_SHIFT 23 | ||
437 | /* Device address field (hstregadx in spec) */ | ||
438 | #define AXGMAC_AMIIM_FIELD_DEV_ADDR 0x007C0000 | ||
439 | #define AXGMAC_AMIIM_FIELD_DEV_SHIFT 18 | ||
440 | #define AXGMAC_AMIIM_FIELD_TA 0x00030000 /* 2-bit TA field */ | ||
441 | #define AXGMAC_AMIIM_FIELD_TA_SHIFT 16 | ||
442 | #define AXGMAC_AMIIM_FIELD_DATA 0x0000FFFF /* Data field */ | ||
443 | |||
444 | /* Values for the AXGMAC_AMIIM_FIELD_OP field in the A-XGMAC AMIIM Field Register */ | ||
445 | #define MIIM_OP_ADDR 0 /* MIIM Address set operation */ | ||
446 | #define MIIM_OP_WRITE 1 /* MIIM Write register operation */ | ||
447 | #define MIIM_OP_READ 2 /* MIIM Read register operation */ | ||
448 | #define MIIM_OP_ADDR_SHIFT (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | ||
449 | |||
450 | /* | ||
451 | * Values for the AXGMAC_AMIIM_FIELD_PORT_ADDR field in the A-XGMAC AMIIM | ||
452 | * Field Register | ||
453 | */ | ||
454 | #define MIIM_PORT_NUM 1 /* All Sahara MIIM modules use port 1 */ | ||
455 | |||
456 | /* | ||
457 | * Values for the AXGMAC_AMIIM_FIELD_DEV_ADDR field in the A-XGMAC AMIIM | ||
458 | * Field Register | ||
459 | */ | ||
460 | /* PHY PMA/PMD module MIIM device number */ | ||
461 | #define MIIM_DEV_PHY_PMA 1 | ||
462 | /* PHY PCS module MIIM device number */ | ||
463 | #define MIIM_DEV_PHY_PCS 3 | ||
464 | /* PHY XS module MIIM device number */ | ||
465 | #define MIIM_DEV_PHY_XS 4 | ||
466 | #define MIIM_DEV_XGXS 5 /* XGXS MIIM device number */ | ||
467 | |||
468 | /* | ||
469 | * Values for the AXGMAC_AMIIM_FIELD_TA field in the A-XGMAC AMIIM Field | ||
470 | * Register | ||
471 | */ | ||
472 | #define MIIM_TA_10GB 2 /* set to 2 for 10 GB operation */ | ||
473 | |||
474 | /* A-XGMAC AMIIM Configuration Register */ | ||
475 | /* Bypass preamble of mngmt frame */ | ||
476 | #define AXGMAC_AMIIM_CFG_NOPREAM 0x00000080 | ||
477 | /* half-clock duration of MDC output */ | ||
478 | #define AXGMAC_AMIIM_CFG_HALF_CLOCK 0x0000007F | ||
479 | |||
480 | /* A-XGMAC AMIIM Indicator Register */ | ||
481 | /* Link status from legacy PHY or MMD */ | ||
482 | #define AXGMAC_AMIIM_INDC_LINK 0x00000010 | ||
483 | /* Multiple phy operation in progress */ | ||
484 | #define AXGMAC_AMIIM_INDC_MPHY 0x00000008 | ||
485 | /* Single phy operation in progress */ | ||
486 | #define AXGMAC_AMIIM_INDC_SPHY 0x00000004 | ||
487 | /* Single or multiple monitor cmd */ | ||
488 | #define AXGMAC_AMIIM_INDC_MON 0x00000002 | ||
489 | /* Set until cmd operation complete */ | ||
490 | #define AXGMAC_AMIIM_INDC_BUSY 0x00000001 | ||
491 | |||
492 | /* Link Status and Control Register */ | ||
493 | #define LS_PHY_CLR_RESET 0x80000000 /* Clear reset signal to PHY */ | ||
494 | #define LS_SERDES_POWER_DOWN 0x40000000 /* Power down the Sahara Serdes */ | ||
495 | #define LS_XGXS_ENABLE 0x20000000 /* Enable the XAUI XGXS logic */ | ||
496 | /* Hold XAUI XGXS logic reset until Serdes is up */ | ||
497 | #define LS_XGXS_CTL 0x10000000 | ||
498 | /* When 0, XAUI Serdes is up and initialization is complete */ | ||
499 | #define LS_SERDES_DOWN 0x08000000 | ||
500 | /* When 0, Trace Serdes is up and initialization is complete */ | ||
501 | #define LS_TRACE_DOWN 0x04000000 | ||
502 | /* Set PHY clock to 25 MHz (else 156.125 MHz) */ | ||
503 | #define LS_PHY_CLK_25MHZ 0x02000000 | ||
504 | #define LS_PHY_CLK_EN 0x01000000 /* Enable clock to PHY */ | ||
505 | #define LS_XAUI_LINK_UP 0x00000010 /* XAUI link is up */ | ||
506 | /* XAUI link status has changed */ | ||
507 | #define LS_XAUI_LINK_CHNG 0x00000008 | ||
508 | #define LS_LINK_ALARM 0x00000004 /* Link alarm pin */ | ||
509 | /* Mask link attention control bits */ | ||
510 | #define LS_ATTN_CTRL_MASK 0x00000003 | ||
511 | #define LS_ATTN_ALARM 0x00000000 /* 00 => Attn on link alarm */ | ||
512 | /* 01 => Attn on link alarm or status change */ | ||
513 | #define LS_ATTN_ALARM_OR_STAT_CHNG 0x00000001 | ||
514 | /* 10 => Attn on link status change */ | ||
515 | #define LS_ATTN_STAT_CHNG 0x00000002 | ||
516 | #define LS_ATTN_NONE 0x00000003 /* 11 => no Attn */ | ||
517 | |||
518 | /* Link Address High Registers */ | ||
519 | #define LINK_ADDR_ENABLE 0x80000000 /* Enable this link address */ | ||
520 | |||
521 | |||
522 | /* | ||
523 | * XGXS XAUI XGMII Extender registers | ||
524 | * | ||
525 | * Full register descriptions can be found in mxgxs.pdf | ||
526 | */ | ||
527 | /* XGXS Register Map */ | ||
528 | #define XGXS_ADDRESS_CONTROL1 0x0000 /* XS Control 1 */ | ||
529 | #define XGXS_ADDRESS_STATUS1 0x0001 /* XS Status 1 */ | ||
530 | #define XGXS_ADDRESS_DEVID_LOW 0x0002 /* XS Device ID (low) */ | ||
531 | #define XGXS_ADDRESS_DEVID_HIGH 0x0003 /* XS Device ID (high) */ | ||
532 | #define XGXS_ADDRESS_SPEED 0x0004 /* XS Speed ability */ | ||
533 | #define XGXS_ADDRESS_DEV_LOW 0x0005 /* XS Devices in package */ | ||
534 | #define XGXS_ADDRESS_DEV_HIGH 0x0006 /* XS Devices in package */ | ||
535 | #define XGXS_ADDRESS_STATUS2 0x0008 /* XS Status 2 */ | ||
536 | #define XGXS_ADDRESS_PKGID_lOW 0x000E /* XS Package Identifier */ | ||
537 | #define XGXS_ADDRESS_PKGID_HIGH 0x000F /* XS Package Identifier */ | ||
538 | #define XGXS_ADDRESS_LANE_STATUS 0x0018 /* 10G XGXS Lane Status */ | ||
539 | #define XGXS_ADDRESS_TEST_CTRL 0x0019 /* 10G XGXS Test Control */ | ||
540 | #define XGXS_ADDRESS_RESET_LO1 0x8000 /* Vendor-Specific Reset Lo 1 */ | ||
541 | #define XGXS_ADDRESS_RESET_LO2 0x8001 /* Vendor-Specific Reset Lo 2 */ | ||
542 | #define XGXS_ADDRESS_RESET_HI1 0x8002 /* Vendor-Specific Reset Hi 1 */ | ||
543 | #define XGXS_ADDRESS_RESET_HI2 0x8003 /* Vendor-Specific Reset Hi 2 */ | ||
544 | |||
545 | /* XS Control 1 register bit definitions */ | ||
546 | #define XGXS_CONTROL1_RESET 0x8000 /* Reset - self clearing */ | ||
547 | #define XGXS_CONTROL1_LOOPBACK 0x4000 /* Enable loopback */ | ||
548 | #define XGXS_CONTROL1_SPEED1 0x2000 /* 0 = unspecified, 1 = 10Gb+ */ | ||
549 | #define XGXS_CONTROL1_LOWPOWER 0x0400 /* 1 = Low power mode */ | ||
550 | #define XGXS_CONTROL1_SPEED2 0x0040 /* Same as SPEED1 (?) */ | ||
551 | /* Everything reserved except zero (?) */ | ||
552 | #define XGXS_CONTROL1_SPEED 0x003C | ||
553 | |||
554 | /* XS Status 1 register bit definitions */ | ||
555 | #define XGXS_STATUS1_FAULT 0x0080 /* Fault detected */ | ||
556 | #define XGXS_STATUS1_LINK 0x0004 /* 1 = Link up */ | ||
557 | #define XGXS_STATUS1_LOWPOWER 0x0002 /* 1 = Low power supported */ | ||
558 | |||
559 | /* XS Speed register bit definitions */ | ||
560 | #define XGXS_SPEED_10G 0x0001 /* 1 = 10G capable */ | ||
561 | |||
562 | /* XS Devices register bit definitions */ | ||
563 | #define XGXS_DEVICES_DTE 0x0020 /* DTE XS Present */ | ||
564 | #define XGXS_DEVICES_PHY 0x0010 /* PHY XS Present */ | ||
565 | #define XGXS_DEVICES_PCS 0x0008 /* PCS Present */ | ||
566 | #define XGXS_DEVICES_WIS 0x0004 /* WIS Present */ | ||
567 | #define XGXS_DEVICES_PMD 0x0002 /* PMD/PMA Present */ | ||
568 | #define XGXS_DEVICES_CLAUSE22 0x0001 /* Clause 22 registers present*/ | ||
569 | |||
570 | /* XS Devices High register bit definitions */ | ||
571 | #define XGXS_DEVICES_VENDOR2 0x8000 /* Vendor specific device 2 */ | ||
572 | #define XGXS_DEVICES_VENDOR1 0x4000 /* Vendor specific device 1 */ | ||
573 | |||
574 | /* XS Status 2 register bit definitions */ | ||
575 | #define XGXS_STATUS2_DEV_MASK 0xC000 /* Device present mask */ | ||
576 | #define XGXS_STATUS2_DEV_RESPOND 0x8000 /* Device responding */ | ||
577 | #define XGXS_STATUS2_XMT_FAULT 0x0800 /* Transmit fault */ | ||
578 | #define XGXS_STATUS2_RCV_FAULT 0x0400 /* Receive fault */ | ||
579 | |||
580 | /* XS Package ID High register bit definitions */ | ||
581 | #define XGXS_PKGID_HIGH_ORG 0xFC00 /* Organizationally Unique */ | ||
582 | #define XGXS_PKGID_HIGH_MFG 0x03F0 /* Manufacturer Model */ | ||
583 | #define XGXS_PKGID_HIGH_REV 0x000F /* Revision Number */ | ||
584 | |||
585 | /* XS Lane Status register bit definitions */ | ||
586 | #define XGXS_LANE_PHY 0x1000 /* PHY/DTE lane alignment status */ | ||
587 | #define XGXS_LANE_PATTERN 0x0800 /* Pattern testing ability */ | ||
588 | #define XGXS_LANE_LOOPBACK 0x0400 /* PHY loopback ability */ | ||
589 | #define XGXS_LANE_SYNC3 0x0008 /* Lane 3 sync */ | ||
590 | #define XGXS_LANE_SYNC2 0x0004 /* Lane 2 sync */ | ||
591 | #define XGXS_LANE_SYNC1 0x0002 /* Lane 1 sync */ | ||
592 | #define XGXS_LANE_SYNC0 0x0001 /* Lane 0 sync */ | ||
593 | |||
594 | /* XS Test Control register bit definitions */ | ||
595 | #define XGXS_TEST_PATTERN_ENABLE 0x0004 /* Test pattern enabled */ | ||
596 | #define XGXS_TEST_PATTERN_MASK 0x0003 /* Test patterns */ | ||
597 | #define XGXS_TEST_PATTERN_RSVD 0x0003 /* Test pattern - reserved */ | ||
598 | #define XGXS_TEST_PATTERN_MIX 0x0002 /* Test pattern - mixed */ | ||
599 | #define XGXS_TEST_PATTERN_LOW 0x0001 /* Test pattern - low */ | ||
600 | #define XGXS_TEST_PATTERN_HIGH 0x0001 /* Test pattern - high */ | ||
601 | |||
602 | /* | ||
603 | * External MDIO Bus Registers | ||
604 | * | ||
605 | * Full register descriptions can be found in PHY/XENPAK/IEEE specs | ||
606 | */ | ||
607 | /* | ||
608 | * LASI (Link Alarm Status Interrupt) Registers (located in | ||
609 | * MIIM_DEV_PHY_PMA device) | ||
610 | */ | ||
611 | #define LASI_RX_ALARM_CONTROL 0x9000 /* LASI RX_ALARM Control */ | ||
612 | #define LASI_TX_ALARM_CONTROL 0x9001 /* LASI TX_ALARM Control */ | ||
613 | #define LASI_CONTROL 0x9002 /* LASI Control */ | ||
614 | #define LASI_RX_ALARM_STATUS 0x9003 /* LASI RX_ALARM Status */ | ||
615 | #define LASI_TX_ALARM_STATUS 0x9004 /* LASI TX_ALARM Status */ | ||
616 | #define LASI_STATUS 0x9005 /* LASI Status */ | ||
617 | |||
618 | /* LASI_CONTROL bit definitions */ | ||
619 | /* Enable RX_ALARM interrupts */ | ||
620 | #define LASI_CTL_RX_ALARM_ENABLE 0x0004 | ||
621 | /* Enable TX_ALARM interrupts */ | ||
622 | #define LASI_CTL_TX_ALARM_ENABLE 0x0002 | ||
623 | /* Enable Link Status interrupts */ | ||
624 | #define LASI_CTL_LS_ALARM_ENABLE 0x0001 | ||
625 | |||
626 | /* LASI_STATUS bit definitions */ | ||
627 | #define LASI_STATUS_RX_ALARM 0x0004 /* RX_ALARM status */ | ||
628 | #define LASI_STATUS_TX_ALARM 0x0002 /* TX_ALARM status */ | ||
629 | #define LASI_STATUS_LS_ALARM 0x0001 /* Link Status */ | ||
630 | |||
631 | /* PHY registers - PMA/PMD (device 1) */ | ||
632 | #define PHY_PMA_CONTROL1 0x0000 /* PMA/PMD Control 1 */ | ||
633 | #define PHY_PMA_STATUS1 0x0001 /* PMA/PMD Status 1 */ | ||
634 | #define PHY_PMA_RCV_DET 0x000A /* PMA/PMD Receive Signal Detect */ | ||
635 | /* other PMA/PMD registers exist and can be defined as needed */ | ||
636 | |||
637 | /* PHY registers - PCS (device 3) */ | ||
638 | #define PHY_PCS_CONTROL1 0x0000 /* PCS Control 1 */ | ||
639 | #define PHY_PCS_STATUS1 0x0001 /* PCS Status 1 */ | ||
640 | #define PHY_PCS_10G_STATUS1 0x0020 /* PCS 10GBASE-R Status 1 */ | ||
641 | /* other PCS registers exist and can be defined as needed */ | ||
642 | |||
643 | /* PHY registers - XS (device 4) */ | ||
644 | #define PHY_XS_CONTROL1 0x0000 /* XS Control 1 */ | ||
645 | #define PHY_XS_STATUS1 0x0001 /* XS Status 1 */ | ||
646 | #define PHY_XS_LANE_STATUS 0x0018 /* XS Lane Status */ | ||
647 | /* other XS registers exist and can be defined as needed */ | ||
648 | |||
649 | /* PHY_PMA_CONTROL1 register bit definitions */ | ||
650 | #define PMA_CONTROL1_RESET 0x8000 /* PMA/PMD reset */ | ||
651 | |||
652 | /* PHY_PMA_RCV_DET register bit definitions */ | ||
653 | #define PMA_RCV_DETECT 0x0001 /* PMA/PMD receive signal detect */ | ||
654 | |||
655 | /* PHY_PCS_10G_STATUS1 register bit definitions */ | ||
656 | #define PCS_10B_BLOCK_LOCK 0x0001 /* PCS 10GBASE-R locked to receive blocks */ | ||
657 | |||
658 | /* PHY_XS_LANE_STATUS register bit definitions */ | ||
659 | #define XS_LANE_ALIGN 0x1000 /* XS transmit lanes aligned */ | ||
660 | |||
661 | #define XCVR_VENDOR_LEN 16 /* xcvr vendor len */ | ||
662 | #define XCVR_MODEL_LEN 16 /* xcvr model len */ | ||
663 | |||
664 | /* PHY Microcode download data structure */ | ||
665 | struct phy_ucode { | ||
666 | ushort Addr; | ||
667 | ushort Data; | ||
668 | }; | ||
669 | |||
670 | /* Slow Bus Register Definitions */ | ||
671 | |||
672 | /* Module 0 registers */ | ||
673 | #define GPIO_L_IN 0x15 /* GPIO input (low) */ | ||
674 | #define GPIO_L_OUT 0x16 /* GPIO output (low) */ | ||
675 | #define GPIO_L_DIR 0x17 /* GPIO direction (low) */ | ||
676 | #define GPIO_H_IN 0x19 /* GPIO input (high) */ | ||
677 | #define GPIO_H_OUT 0x1A /* GPIO output (high) */ | ||
678 | #define GPIO_H_DIR 0x1B /* GPIO direction (high) */ | ||
679 | |||
680 | /* Definitions for other slow bus registers can be added as needed */ | ||
681 | |||
682 | |||
683 | /* | ||
684 | * Transmit Sequencer Command Descriptor definitions | ||
685 | * | ||
686 | * This descriptor must be placed in GRAM. The address of this descriptor | ||
687 | * (along with a couple of control bits) is pushed onto the PxhCmdQ or PxlCmdQ | ||
688 | * (Proxy high or low command queue). This data is read by the Proxy Sequencer, | ||
689 | * which pushes it onto the XmtCmdQ, which is (eventually) read by the Transmit | ||
690 | * Sequencer, causing a packet to be transmitted. Not all fields are valid for | ||
691 | * all commands - see the Sahara spec for details. Note that this structure is | ||
692 | * only valid when compiled on a little endian machine. | ||
693 | */ | ||
694 | #pragma pack(push, 1) | ||
695 | struct xmt_desc { | ||
696 | ushort XmtLen; /* word 0, bits [15:0] - transmit length */ | ||
697 | /* word 0, bits [23:16] - transmit control byte */ | ||
698 | unsigned char XmtCtl; | ||
699 | /* word 0, bits [31:24] - transmit command plus misc. */ | ||
700 | unsigned char Cmd; | ||
701 | /* word 1, bits [31:0] - transmit buffer ID */ | ||
702 | u32 XmtBufId; | ||
703 | /* word 2, bits [7:0] - byte address of TCP header */ | ||
704 | unsigned char TcpStrt; | ||
705 | /* word 2, bits [15:8] - byte address of IP header */ | ||
706 | unsigned char IpStrt; | ||
707 | /* word 2, bits [31:16] - partial IP checksum */ | ||
708 | ushort IpCkSum; | ||
709 | /* word 3, bits [15:0] - partial TCP checksum */ | ||
710 | ushort TcpCkSum; | ||
711 | ushort Rsvd1; /* word 3, bits [31:16] - PAD */ | ||
712 | u32 Rsvd2; /* word 4, bits [31:0] - PAD */ | ||
713 | u32 Rsvd3; /* word 5, bits [31:0] - PAD */ | ||
714 | u32 Rsvd4; /* word 6, bits [31:0] - PAD */ | ||
715 | u32 Rsvd5; /* word 7, bits [31:0] - PAD */ | ||
716 | }; | ||
717 | #pragma pack(pop) | ||
718 | |||
719 | /* struct xmt_desc Cmd byte definitions */ | ||
720 | /* command codes */ | ||
721 | #define XMT_DESC_CMD_RAW_SEND 0 /* raw send descriptor */ | ||
722 | #define XMT_DESC_CMD_CSUM_INSERT 1 /* checksum insert descriptor */ | ||
723 | #define XMT_DESC_CMD_FORMAT 2 /* format descriptor */ | ||
724 | #define XMT_DESC_CMD_PRIME 3 /* prime descriptor */ | ||
725 | /* comand code shift (shift to bits [31:30] in word 0) */ | ||
726 | #define XMT_DESC_CMD_CODE_SHFT 6 | ||
727 | /* shifted command codes */ | ||
728 | #define XMT_RAW_SEND (XMT_DESC_CMD_RAW_SEND << XMT_DESC_CMD_CODE_SHFT) | ||
729 | #define XMT_CSUM_INSERT (XMT_DESC_CMD_CSUM_INSERT << XMT_DESC_CMD_CODE_SHFT) | ||
730 | #define XMT_FORMAT (XMT_DESC_CMD_FORMAT << XMT_DESC_CMD_CODE_SHFT) | ||
731 | #define XMT_PRIME (XMT_DESC_CMD_PRIME << XMT_DESC_CMD_CODE_SHFT) | ||
732 | |||
733 | /* | ||
734 | * struct xmt_desc Control Byte (XmtCtl) definitions | ||
735 | * NOTE: These bits do not work on Sahara (Rev A)! | ||
736 | */ | ||
737 | /* current frame is a pause control frame (for statistics) */ | ||
738 | #define XMT_CTL_PAUSE_FRAME 0x80 | ||
739 | /* current frame is a control frame (for statistics) */ | ||
740 | #define XMT_CTL_CONTROL_FRAME 0x40 | ||
741 | #define XMT_CTL_PER_PKT_QUAL 0x20 /* per packet qualifier */ | ||
742 | #define XMT_CTL_PAD_MODE_NONE 0x00 /* do not pad frame */ | ||
743 | #define XMT_CTL_PAD_MODE_64 0x08 /* pad frame to 64 bytes */ | ||
744 | /* pad frame to 64 bytes, and VLAN frames to 68 bytes */ | ||
745 | #define XMT_CTL_PAD_MODE_VLAN_68 0x10 | ||
746 | #define XMT_CTL_PAD_MODE_68 0x18 /* pad frame to 68 bytes */ | ||
747 | /* generate FCS (CRC) for this frame */ | ||
748 | #define XMT_CTL_GEN_FCS 0x04 | ||
749 | #define XMT_CTL_DELAY_FCS_0 0x00 /* do not delay FCS calcution */ | ||
750 | /* delay FCS calculation by 1 (4-byte) word */ | ||
751 | #define XMT_CTL_DELAY_FCS_1 0x01 | ||
752 | /* delay FCS calculation by 2 (4-byte) words */ | ||
753 | #define XMT_CTL_DELAY_FCS_2 0x02 | ||
754 | /* delay FCS calculation by 3 (4-byte) words */ | ||
755 | #define XMT_CTL_DELAY_FCS_3 0x03 | ||
756 | |||
757 | /* struct xmt_desc XmtBufId definition */ | ||
758 | /* | ||
759 | * The Xmt buffer ID is formed by dividing the buffer (DRAM) address | ||
760 | * by 256 (or << 8) | ||
761 | */ | ||
762 | |||
763 | #define XMT_BUF_ID_SHFT 8 | ||
764 | |||
765 | /* Receiver Sequencer Definitions */ | ||
766 | |||
767 | /* Receive Event Queue (queues 3 - 6) bit definitions */ | ||
768 | /* bit mask for the Receive Buffer ID */ | ||
769 | #define RCV_EVTQ_RBFID_MASK 0x0000FFFF | ||
770 | |||
771 | /* Receive Buffer ID definition */ | ||
772 | /* | ||
773 | * The Rcv buffer ID is formed by dividing the buffer (DRAM) address | ||
774 | * by 32 (or << 5) | ||
775 | */ | ||
776 | #define RCV_BUF_ID_SHFT 5 | ||
777 | |||
778 | /* | ||
779 | * Format of the 18 byte Receive Buffer returned by the | ||
780 | * Receive Sequencer for received packets | ||
781 | */ | ||
782 | #pragma pack(push, 1) | ||
783 | struct rcv_buf_hdr { | ||
784 | u32 Status; /* Status word from Rcv Seq Parser */ | ||
785 | ushort Length; /* Rcv packet byte count */ | ||
786 | union { | ||
787 | ushort TcpCsum; /* TCP checksum */ | ||
788 | struct { | ||
789 | /* lower 8 bits of the TCP checksum */ | ||
790 | unsigned char TcpCsumL; | ||
791 | /* Link hash (multicast frames only) */ | ||
792 | unsigned char LinkHash; | ||
793 | }; | ||
794 | }; | ||
795 | ushort SktHash; /* Socket hash */ | ||
796 | unsigned char TcpHdrOffset; /* TCP header offset into packet */ | ||
797 | unsigned char IpHdrOffset; /* IP header offset into packet */ | ||
798 | u32 TpzHash; /* Toeplitz hash */ | ||
799 | ushort Reserved; /* Reserved */ | ||
800 | }; | ||
801 | #pragma pack(pop) | ||
802 | |||
803 | /* Queue definitions */ | ||
804 | |||
805 | /* Ingress (read only) queue numbers */ | ||
806 | #define PXY_BUF_Q 0 /* Proxy Buffer Queue */ | ||
807 | #define HST_EVT_Q 1 /* Host Event Queue */ | ||
808 | #define XMT_BUF_Q 2 /* Transmit Buffer Queue */ | ||
809 | #define SKT_EVL_Q 3 /* RcvSqr Socket Event Low Priority Queue */ | ||
810 | #define RCV_EVL_Q 4 /* RcvSqr Rcv Event Low Priority Queue */ | ||
811 | #define SKT_EVH_Q 5 /* RcvSqr Socket Event High Priority Queue */ | ||
812 | #define RCV_EVH_Q 6 /* RcvSqr Rcv Event High Priority Queue */ | ||
813 | #define DMA_RSP_Q 7 /* Dma Response Queue - one per CPU context */ | ||
814 | /* Local (read/write) queue numbers */ | ||
815 | #define LOCAL_A_Q 8 /* Spare local Queue */ | ||
816 | #define LOCAL_B_Q 9 /* Spare local Queue */ | ||
817 | #define LOCAL_C_Q 10 /* Spare local Queue */ | ||
818 | #define FSM_EVT_Q 11 /* Finite-State-Machine Event Queue */ | ||
819 | #define SBF_PAL_Q 12 /* System Buffer Physical Address (low) Queue */ | ||
820 | #define SBF_PAH_Q 13 /* System Buffer Physical Address (high) Queue*/ | ||
821 | #define SBF_VAL_Q 14 /* System Buffer Virtual Address (low) Queue */ | ||
822 | #define SBF_VAH_Q 15 /* System Buffer Virtual Address (high) Queue */ | ||
823 | /* Egress (write only) queue numbers */ | ||
824 | #define H2G_CMD_Q 16 /* Host to GlbRam DMA Command Queue */ | ||
825 | #define H2D_CMD_Q 17 /* Host to DRAM DMA Command Queue */ | ||
826 | #define G2H_CMD_Q 18 /* GlbRam to Host DMA Command Queue */ | ||
827 | #define G2D_CMD_Q 19 /* GlbRam to DRAM DMA Command Queue */ | ||
828 | #define D2H_CMD_Q 20 /* DRAM to Host DMA Command Queue */ | ||
829 | #define D2G_CMD_Q 21 /* DRAM to GlbRam DMA Command Queue */ | ||
830 | #define D2D_CMD_Q 22 /* DRAM to DRAM DMA Command Queue */ | ||
831 | #define PXL_CMD_Q 23 /* Low Priority Proxy Command Queue */ | ||
832 | #define PXH_CMD_Q 24 /* High Priority Proxy Command Queue */ | ||
833 | #define RSQ_CMD_Q 25 /* Receive Sequencer Command Queue */ | ||
834 | #define RCV_BUF_Q 26 /* Receive Buffer Queue */ | ||
835 | |||
836 | /* Bit definitions for the Proxy Command queues (PXL_CMD_Q and PXH_CMD_Q) */ | ||
837 | /* enable copy of xmt descriptor to xmt command queue */ | ||
838 | #define PXY_COPY_EN 0x00200000 | ||
839 | #define PXY_SIZE_16 0x00000000 /* copy 16 bytes */ | ||
840 | #define PXY_SIZE_32 0x00100000 /* copy 32 bytes */ | ||
841 | |||
842 | /* SXG EEPROM/Flash Configuration Definitions */ | ||
843 | |||
844 | /* Location of configuration data in EEPROM or Flash */ | ||
845 | /* start addr for config info in EEPROM */ | ||
846 | #define EEPROM_CONFIG_START_ADDR 0x00 | ||
847 | /* start addr for config info in Flash */ | ||
848 | #define FLASH_CONFIG_START_ADDR 0x80 | ||
849 | |||
850 | /* Configuration data section defines */ | ||
851 | #define HW_CFG_SECTION_SIZE 512 /* size of H/W section */ | ||
852 | #define HW_CFG_SECTION_SIZE_A 256 /* size of H/W section (Sahara rev A) */ | ||
853 | /* starting location (offset) of S/W section */ | ||
854 | #define SW_CFG_SECTION_START 512 | ||
855 | /* starting location (offset) of S/W section (Sahara rev A) */ | ||
856 | #define SW_CFG_SECTION_START_A 256 | ||
857 | #define SW_CFG_SECTION_SIZE 128 /* size of S/W section */ | ||
858 | /* | ||
859 | * H/W configuration data magic word Goes in Addr field of first | ||
860 | * struct hw_cfg_data entry | ||
861 | */ | ||
862 | #define HW_CFG_MAGIC_WORD 0xA5A5 | ||
863 | /* | ||
864 | * H/W configuration data terminator Goes in Addr field of last | ||
865 | * struct hw_cfg_data entry | ||
866 | */ | ||
867 | #define HW_CFG_TERMINATOR 0xFFFF | ||
868 | |||
869 | #define SW_CFG_MAGIC_WORD 0x5A5A /* S/W configuration data magic word */ | ||
870 | |||
871 | #pragma pack(push, 1) | ||
872 | /* | ||
873 | * Structure for an element of H/W configuration data. | ||
874 | * Read by the Sahara hardware | ||
875 | */ | ||
876 | struct hw_cfg_data { | ||
877 | ushort Addr; | ||
878 | ushort Data; | ||
879 | }; | ||
880 | |||
881 | /* | ||
882 | * Number of struct hw_cfg_data structures to put in the configuration data | ||
883 | * data structure (struct sxg_config or struct sxg_config_a). The number is | ||
884 | * computed to fill the entire H/W config section of the structure. | ||
885 | */ | ||
886 | #define NUM_HW_CFG_ENTRIES \ | ||
887 | (HW_CFG_SECTION_SIZE / sizeof(struct hw_cfg_data)) | ||
888 | #define NUM_HW_CFG_ENTRIES_A \ | ||
889 | (HW_CFG_SECTION_SIZE_A / sizeof(struct hw_cfg_data)) | ||
890 | |||
891 | /* MAC address structure */ | ||
892 | struct sxg_config_mac { | ||
893 | unsigned char MacAddr[6]; /* MAC Address */ | ||
894 | }; | ||
895 | |||
896 | /* FRU data structure */ | ||
897 | struct atk_fru { | ||
898 | unsigned char PartNum[6]; | ||
899 | unsigned char Revision[2]; | ||
900 | unsigned char Serial[14]; | ||
901 | }; | ||
902 | |||
903 | /* OEM FRU Format types */ | ||
904 | #define ATK_FRU_FORMAT 0x0000 | ||
905 | #define CPQ_FRU_FORMAT 0x0001 | ||
906 | #define DELL_FRU_FORMAT 0x0002 | ||
907 | #define HP_FRU_FORMAT 0x0003 | ||
908 | #define IBM_FRU_FORMAT 0x0004 | ||
909 | #define EMC_FRU_FORMAT 0x0005 | ||
910 | #define NO_FRU_FORMAT 0xFFFF | ||
911 | |||
912 | #define ATK_OEM_ASSY_SIZE 10 /* assy num is 9 chars plus \0 */ | ||
913 | |||
914 | /* OEM FRU structure for Alacritech */ | ||
915 | struct atk_oem { | ||
916 | unsigned char Assy[ATK_OEM_ASSY_SIZE]; | ||
917 | }; | ||
918 | |||
919 | #define OEM_EEPROM_FRUSIZE 74 /* size of OEM fru info - size */ | ||
920 | /* chosen to fill out the S/W section */ | ||
921 | |||
922 | union oem_fru { /* OEM FRU information */ | ||
923 | unsigned char OemFru[OEM_EEPROM_FRUSIZE]; | ||
924 | struct atk_oem AtkOem; | ||
925 | }; | ||
926 | |||
927 | /* Structure to hold the S/W configuration data. */ | ||
928 | struct sw_cfg_data { | ||
929 | ushort MagicWord; /* Magic word for section 2 */ | ||
930 | ushort Version; /* Format version */ | ||
931 | struct sxg_config_mac MacAddr[4]; /* space for 4 MAC addresses */ | ||
932 | struct atk_fru AtkFru; /* FRU information */ | ||
933 | ushort OemFruFormat; /* OEM FRU format type */ | ||
934 | union oem_fru OemFru; /* OEM FRU information */ | ||
935 | ushort Checksum; /* Checksum of section 2 */ | ||
936 | }; | ||
937 | |||
938 | |||
939 | /* EEPROM/Flash Format */ | ||
940 | struct sxg_config { | ||
941 | /* H/W Section - Read by Sahara hardware (512 bytes) */ | ||
942 | struct hw_cfg_data HwCfg[NUM_HW_CFG_ENTRIES]; | ||
943 | /* S/W Section - Other configuration data (128 bytes) */ | ||
944 | struct sw_cfg_data SwCfg; | ||
945 | }; | ||
946 | |||
947 | #ifdef WINDOWS_COMPILER | ||
948 | /* | ||
949 | * The following macro is something of a kludge, but it is the only way | ||
950 | * that I could find to catch certain programming errors at compile time. | ||
951 | * If the asserted condition is true, then nothing happens. If false, then | ||
952 | * the compiler tries to typedef an array with -1 members, which generates | ||
953 | * an error. Unfortunately, the error message is meaningless, but at least | ||
954 | * it catches the problem. This macro would be unnecessary if the compiler | ||
955 | * allowed the sizeof and offsetof macros to be used in the #if directive. | ||
956 | */ | ||
957 | #define compile_time_assert(cond) \ | ||
958 | typedef char comp_error[(cond) ? 1 : -1] | ||
959 | |||
960 | /* | ||
961 | * A compiler error on either of the next two lines indicates that the struct sxg_config | ||
962 | * structure was built incorrectly. Unfortunately, the error message produced | ||
963 | * is meaningless. But this is apparently the only way to catch this problem | ||
964 | * at compile time. | ||
965 | */ | ||
966 | compile_time_assert (offsetof(struct sxg_config, SwCfg) == SW_CFG_SECTION_START); | ||
967 | compile_time_assert (sizeof(struct sxg_config) == HW_CFG_SECTION_SIZE | ||
968 | + SW_CFG_SECTION_SIZE); | ||
969 | |||
970 | compile_time_assert (offsetof(struct sxg_config_a, SwCfg) | ||
971 | == SW_CFG_SECTION_START_A); | ||
972 | compile_time_assert (sizeof(struct sxg_config_a) == HW_CFG_SECTION_SIZE_A | ||
973 | + SW_CFG_SECTION_SIZE); | ||
974 | #endif | ||
975 | /* | ||
976 | * Structure used to pass information between driver and user-mode | ||
977 | * control application | ||
978 | */ | ||
979 | struct adapt_userinfo { | ||
980 | bool LinkUp; | ||
981 | /* use LinkUp - any need for other states? */ | ||
982 | /* u32 LinkState; */ | ||
983 | u32 LinkSpeed; /* not currently needed */ | ||
984 | u32 LinkDuplex; /* not currently needed */ | ||
985 | enum xcvr_type XcvrType; /* type of xcvr on fiber card */ | ||
986 | /* fiber card xcvr vendor */ | ||
987 | unsigned char XcvrVendor[XCVR_VENDOR_LEN]; | ||
988 | unsigned char XcvrMode[XCVR_MODEL_LEN]; | ||
989 | u32 Port; /* not currently needed */ | ||
990 | u32 PhysPort; /* not currently needed */ | ||
991 | ushort PciLanes; | ||
992 | unsigned char MacAddr[6]; | ||
993 | unsigned char CurrMacAddr[6]; | ||
994 | struct atk_fru AtkFru; | ||
995 | ushort OemFruFormat; | ||
996 | union oem_fru OemFru; | ||
997 | }; | ||
998 | |||
999 | #pragma pack(pop) | ||
1000 | |||
1001 | /* Miscellaneous Hardware definitions */ | ||
1002 | |||
1003 | /* Hardware Type definitions */ | ||
1004 | |||
1005 | /* Sahara (ASIC level) defines */ | ||
1006 | #define SAHARA_GRAM_SIZE 0x020000 /* GRAM size - 128 KB */ | ||
1007 | #define SAHARA_DRAM_SIZE 0x200000 /* DRAM size - 2 MB */ | ||
1008 | /* QRAM size - 16K entries (64 KB) */ | ||
1009 | #define SAHARA_QRAM_SIZE 0x004000 | ||
1010 | /* WCS - 8K instructions (x 108 bits) */ | ||
1011 | #define SAHARA_WCS_SIZE 0x002000 | ||
1012 | |||
1013 | /* Arabia (board level) defines */ | ||
1014 | #define FLASH_SIZE 0x080000 /* 512 KB (4 Mb) */ | ||
1015 | /* EEPROM size (bytes), including xfmr area */ | ||
1016 | #define EEPROM_SIZE_XFMR 1024 | ||
1017 | /* EEPROM size excluding xfmr area (512 + 128) */ | ||
1018 | #define EEPROM_SIZE_NO_XFMR 640 | ||
1019 | /* EEPROM size for Sahara rev A */ | ||
1020 | #define EEPROM_SIZE_REV_A 512 | ||
diff --git a/drivers/staging/sxg/sxgphycode-1.2.h b/drivers/staging/sxg/sxgphycode-1.2.h deleted file mode 100644 index b5448b9b2787..000000000000 --- a/drivers/staging/sxg/sxgphycode-1.2.h +++ /dev/null | |||
@@ -1,130 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright ? 1997-2008 Alacritech, Inc. All rights reserved | ||
3 | * | ||
4 | * $Id: sxgphycode.h,v 1.2 2008/10/02 01:44:07 Exp $ | ||
5 | * | ||
6 | * sxgphycode.h: | ||
7 | * | ||
8 | * This file PHY microcode and register initialization data. | ||
9 | */ | ||
10 | |||
11 | /********************************************************************** | ||
12 | * PHY Microcode | ||
13 | **********************************************************************/ | ||
14 | // | ||
15 | // The following contains both PHY microcode and PHY register | ||
16 | // initialization data. It is specific to both the PHY and the | ||
17 | // type of transceiver. | ||
18 | // | ||
19 | |||
20 | // Download for AEL2005C PHY with SR/LR transceiver (10GBASE-SR or 10GBASE-LR) | ||
21 | // AEL2005 SR firmware rev 18 (microInit_mdio_SR_AEL2005C_18.tx). | ||
22 | static struct phy_ucode PhyUcode[] = { | ||
23 | // NOTE: An address of 0 is a special case. When the download routine | ||
24 | // sees an address of 0, it does not write to the PHY. Instead, it delays | ||
25 | // the download. The length of the delay (in ms) is given in the data field. | ||
26 | // Delays are required at certain points. | ||
27 | |||
28 | // Platform-specific MDIO Patches: | ||
29 | // (include patches for 10G RX polarity flip, 50Mhz Synth, etc) | ||
30 | // Addr Data | ||
31 | {0xc017, 0xfeb0}, // flip RX_LOS polarity (mandatory patch for SFP+ applications) | ||
32 | {0xC001, 0x0428}, // flip RX serial polarity | ||
33 | |||
34 | {0xc013, 0xf341}, // invert lxmit clock (mandatory patch) | ||
35 | {0xc210, 0x8000}, // reset datapath (mandatory patch) | ||
36 | {0xc210, 0x8100}, // reset datapath (mandatory patch) | ||
37 | {0xc210, 0x8000}, // reset datapath (mandatory patch) | ||
38 | {0xc210, 0x0000}, // reset datapath (mandatory patch) | ||
39 | {0x0000, 0x0032}, // wait for 50ms for datapath reset to complete. (mandatory patch) | ||
40 | |||
41 | // Transceiver-specific MDIO Patches: | ||
42 | {0xc003, 0x0181}, // (bit 7) enable the CDR inc setting in 1.C005 (mandatory patch for SR code) | ||
43 | {0xc010, 0x448a}, // (bit 14) mask out high BER input from the LOS signal in 1.000A (mandatory patch for SR code) | ||
44 | |||
45 | // Transceiver-specific Microcontroller Initialization: | ||
46 | {0xc04a, 0x5200}, // activate microcontroller and pause | ||
47 | {0x0000, 0x0032}, // wait 50ms for microcontroller before writing in code. | ||
48 | |||
49 | // code block starts here: | ||
50 | {0xcc00, 0x2ff4}, {0xcc01, 0x3cd4}, {0xcc02, 0x2015}, {0xcc03, 0x3125}, | ||
51 | {0xcc04, 0x6524}, {0xcc05, 0x27ff}, {0xcc06, 0x300f}, {0xcc07, 0x2c8b}, | ||
52 | {0xcc08, 0x300b}, {0xcc09, 0x4009}, {0xcc0a, 0x400e}, {0xcc0b, 0x2f12}, | ||
53 | {0xcc0c, 0x3002}, {0xcc0d, 0x1002}, {0xcc0e, 0x2112}, {0xcc0f, 0x3012}, | ||
54 | {0xcc10, 0x1002}, {0xcc11, 0x2572}, {0xcc12, 0x3012}, {0xcc13, 0x1002}, | ||
55 | {0xcc14, 0xd01e}, {0xcc15, 0x2772}, {0xcc16, 0x3012}, {0xcc17, 0x1002}, | ||
56 | {0xcc18, 0x2004}, {0xcc19, 0x3c84}, {0xcc1a, 0x6436}, {0xcc1b, 0x2007}, | ||
57 | {0xcc1c, 0x3f87}, {0xcc1d, 0x8676}, {0xcc1e, 0x40b7}, {0xcc1f, 0xa746}, | ||
58 | {0xcc20, 0x4047}, {0xcc21, 0x5673}, {0xcc22, 0x2982}, {0xcc23, 0x3002}, | ||
59 | {0xcc24, 0x13d2}, {0xcc25, 0x8bbd}, {0xcc26, 0x2802}, {0xcc27, 0x3012}, | ||
60 | {0xcc28, 0x1002}, {0xcc29, 0x2032}, {0xcc2a, 0x3012}, {0xcc2b, 0x1002}, | ||
61 | {0xcc2c, 0x5cc3}, {0xcc2d, 0x0314}, {0xcc2e, 0x2942}, {0xcc2f, 0x3002}, | ||
62 | {0xcc30, 0x1002}, {0xcc31, 0xd019}, {0xcc32, 0x2fd2}, {0xcc33, 0x3002}, | ||
63 | {0xcc34, 0x1002}, {0xcc35, 0x2a04}, {0xcc36, 0x3c74}, {0xcc37, 0x6435}, | ||
64 | {0xcc38, 0x2fa4}, {0xcc39, 0x3cd4}, {0xcc3a, 0x6624}, {0xcc3b, 0x5563}, | ||
65 | {0xcc3c, 0x2d42}, {0xcc3d, 0x3002}, {0xcc3e, 0x13d2}, {0xcc3f, 0x464d}, | ||
66 | {0xcc40, 0x2802}, {0xcc41, 0x3012}, {0xcc42, 0x1002}, {0xcc43, 0x2fd2}, | ||
67 | {0xcc44, 0x3002}, {0xcc45, 0x1002}, {0xcc46, 0x2fb4}, {0xcc47, 0x3cd4}, | ||
68 | {0xcc48, 0x6624}, {0xcc49, 0x5563}, {0xcc4a, 0x2d42}, {0xcc4b, 0x3002}, | ||
69 | {0xcc4c, 0x13d2}, {0xcc4d, 0x2e72}, {0xcc4e, 0x3002}, {0xcc4f, 0x1002}, | ||
70 | {0xcc50, 0x2f72}, {0xcc51, 0x3002}, {0xcc52, 0x1002}, {0xcc53, 0x0004}, | ||
71 | {0xcc54, 0x2942}, {0xcc55, 0x3002}, {0xcc56, 0x1002}, {0xcc57, 0x2032}, | ||
72 | {0xcc58, 0x3012}, {0xcc59, 0x1002}, {0xcc5a, 0x5cc3}, {0xcc5b, 0x0317}, | ||
73 | {0xcc5c, 0x2f12}, {0xcc5d, 0x3002}, {0xcc5e, 0x1002}, {0xcc5f, 0x2942}, | ||
74 | {0xcc60, 0x3002}, {0xcc61, 0x1002}, {0xcc62, 0x22cd}, {0xcc63, 0x301d}, | ||
75 | {0xcc64, 0x2802}, {0xcc65, 0x3012}, {0xcc66, 0x1002}, {0xcc67, 0x20b2}, | ||
76 | {0xcc68, 0x3012}, {0xcc69, 0x1002}, {0xcc6a, 0x5aa3}, {0xcc6b, 0x2dc2}, | ||
77 | {0xcc6c, 0x3002}, {0xcc6d, 0x1312}, {0xcc6e, 0x2d02}, {0xcc6f, 0x3002}, | ||
78 | {0xcc70, 0x1002}, {0xcc71, 0x2807}, {0xcc72, 0x31a7}, {0xcc73, 0x20c4}, | ||
79 | {0xcc74, 0x3c24}, {0xcc75, 0x6724}, {0xcc76, 0x1002}, {0xcc77, 0x2807}, | ||
80 | {0xcc78, 0x3187}, {0xcc79, 0x20c4}, {0xcc7a, 0x3c24}, {0xcc7b, 0x6724}, | ||
81 | {0xcc7c, 0x1002}, {0xcc7d, 0x2514}, {0xcc7e, 0x3c64}, {0xcc7f, 0x6436}, | ||
82 | {0xcc80, 0xdff4}, {0xcc81, 0x6436}, {0xcc82, 0x1002}, {0xcc83, 0x40a4}, | ||
83 | {0xcc84, 0x643c}, {0xcc85, 0x4016}, {0xcc86, 0x8c6c}, {0xcc87, 0x2b24}, | ||
84 | {0xcc88, 0x3c24}, {0xcc89, 0x6435}, {0xcc8a, 0x1002}, {0xcc8b, 0x2b24}, | ||
85 | {0xcc8c, 0x3c24}, {0xcc8d, 0x643a}, {0xcc8e, 0x4025}, {0xcc8f, 0x8a5a}, | ||
86 | {0xcc90, 0x1002}, {0xcc91, 0x26d1}, {0xcc92, 0x3011}, {0xcc93, 0x1001}, | ||
87 | {0xcc94, 0xc7a0}, {0xcc95, 0x0100}, {0xcc96, 0xc502}, {0xcc97, 0x53ac}, | ||
88 | {0xcc98, 0xc503}, {0xcc99, 0xd5d5}, {0xcc9a, 0xc600}, {0xcc9b, 0x2a6d}, | ||
89 | {0xcc9c, 0xc601}, {0xcc9d, 0x2a4c}, {0xcc9e, 0xc602}, {0xcc9f, 0x0111}, | ||
90 | {0xcca0, 0xc60c}, {0xcca1, 0x5900}, {0xcca2, 0xc710}, {0xcca3, 0x0700}, | ||
91 | {0xcca4, 0xc718}, {0xcca5, 0x0700}, {0xcca6, 0xc720}, {0xcca7, 0x4700}, | ||
92 | {0xcca8, 0xc801}, {0xcca9, 0x7f50}, {0xccaa, 0xc802}, {0xccab, 0x7760}, | ||
93 | {0xccac, 0xc803}, {0xccad, 0x7fce}, {0xccae, 0xc804}, {0xccaf, 0x5700}, | ||
94 | {0xccb0, 0xc805}, {0xccb1, 0x5f11}, {0xccb2, 0xc806}, {0xccb3, 0x4751}, | ||
95 | {0xccb4, 0xc807}, {0xccb5, 0x57e1}, {0xccb6, 0xc808}, {0xccb7, 0x2700}, | ||
96 | {0xccb8, 0xc809}, {0xccb9, 0x0000}, {0xccba, 0xc821}, {0xccbb, 0x0002}, | ||
97 | {0xccbc, 0xc822}, {0xccbd, 0x0014}, {0xccbe, 0xc832}, {0xccbf, 0x1186}, | ||
98 | {0xccc0, 0xc847}, {0xccc1, 0x1e02}, {0xccc2, 0xc013}, {0xccc3, 0xf341}, | ||
99 | {0xccc4, 0xc01a}, {0xccc5, 0x0446}, {0xccc6, 0xc024}, {0xccc7, 0x1000}, | ||
100 | {0xccc8, 0xc025}, {0xccc9, 0x0a00}, {0xccca, 0xc026}, {0xcccb, 0x0c0c}, | ||
101 | {0xcccc, 0xc027}, {0xcccd, 0x0c0c}, {0xccce, 0xc029}, {0xcccf, 0x00a0}, | ||
102 | {0xccd0, 0xc030}, {0xccd1, 0x0a00}, {0xccd2, 0xc03c}, {0xccd3, 0x001c}, | ||
103 | {0xccd4, 0xc005}, {0xccd5, 0x7a06}, {0xccd6, 0x0000}, {0xccd7, 0x26d1}, | ||
104 | {0xccd8, 0x3011}, {0xccd9, 0x1001}, {0xccda, 0xc620}, {0xccdb, 0x0000}, | ||
105 | {0xccdc, 0xc621}, {0xccdd, 0x003f}, {0xccde, 0xc622}, {0xccdf, 0x0000}, | ||
106 | {0xcce0, 0xc623}, {0xcce1, 0x0000}, {0xcce2, 0xc624}, {0xcce3, 0x0000}, | ||
107 | {0xcce4, 0xc625}, {0xcce5, 0x0000}, {0xcce6, 0xc627}, {0xcce7, 0x0000}, | ||
108 | {0xcce8, 0xc628}, {0xcce9, 0x0000}, {0xccea, 0xc62c}, {0xcceb, 0x0000}, | ||
109 | {0xccec, 0x0000}, {0xcced, 0x2806}, {0xccee, 0x3cb6}, {0xccef, 0xc161}, | ||
110 | {0xccf0, 0x6134}, {0xccf1, 0x6135}, {0xccf2, 0x5443}, {0xccf3, 0x0303}, | ||
111 | {0xccf4, 0x6524}, {0xccf5, 0x000b}, {0xccf6, 0x1002}, {0xccf7, 0x2104}, | ||
112 | {0xccf8, 0x3c24}, {0xccf9, 0x2105}, {0xccfa, 0x3805}, {0xccfb, 0x6524}, | ||
113 | {0xccfc, 0xdff4}, {0xccfd, 0x4005}, {0xccfe, 0x6524}, {0xccff, 0x1002}, | ||
114 | {0xcd00, 0x5dd3}, {0xcd01, 0x0306}, {0xcd02, 0x2ff7}, {0xcd03, 0x38f7}, | ||
115 | {0xcd04, 0x60b7}, {0xcd05, 0xdffd}, {0xcd06, 0x000a}, {0xcd07, 0x1002}, | ||
116 | {0xcd08, 0x0000}, | ||
117 | // end of code block | ||
118 | |||
119 | // Unpause the microcontroller to start program | ||
120 | {0xca00, 0x0080}, | ||
121 | {0xca12, 0x0000}, | ||
122 | {0x0000, 0x000A}, // wait 10ms just to be safe | ||
123 | |||
124 | // Configure the LED's | ||
125 | {0xc214, 0x0099}, // configure the LED drivers (for Sahara rev B) | ||
126 | {0xc216, 0x0400}, // configure the one LED | ||
127 | {0xc217, 0x0000}, // don't drive the 2nd LED (if it exists) | ||
128 | |||
129 | {0xffff, 0xffff} // table terminator | ||
130 | }; | ||