aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/tehuti/Kconfig26
-rw-r--r--drivers/net/ethernet/tehuti/Makefile5
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2470
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.h561
6 files changed, 3064 insertions, 0 deletions
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 16c206e19a3b..8375b8b5dd73 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -31,5 +31,6 @@ source "drivers/net/ethernet/racal/Kconfig"
31source "drivers/net/ethernet/sfc/Kconfig" 31source "drivers/net/ethernet/sfc/Kconfig"
32source "drivers/net/ethernet/smsc/Kconfig" 32source "drivers/net/ethernet/smsc/Kconfig"
33source "drivers/net/ethernet/sun/Kconfig" 33source "drivers/net/ethernet/sun/Kconfig"
34source "drivers/net/ethernet/tehuti/Kconfig"
34 35
35endif # ETHERNET 36endif # ETHERNET
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index a52dc26bd9ed..26324a115905 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -22,3 +22,4 @@ obj-$(CONFIG_NET_VENDOR_RACAL) += racal/
22obj-$(CONFIG_SFC) += sfc/ 22obj-$(CONFIG_SFC) += sfc/
23obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/ 23obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/
24obj-$(CONFIG_NET_VENDOR_SUN) += sun/ 24obj-$(CONFIG_NET_VENDOR_SUN) += sun/
25obj-$(CONFIG_NET_VENDOR_TEHUTI) += tehuti/
diff --git a/drivers/net/ethernet/tehuti/Kconfig b/drivers/net/ethernet/tehuti/Kconfig
new file mode 100644
index 000000000000..914ad4059eae
--- /dev/null
+++ b/drivers/net/ethernet/tehuti/Kconfig
@@ -0,0 +1,26 @@
1#
2# Tehuti network device configuration
3#
4
5config NET_VENDOR_TEHUTI
6 bool "Tehuti devices"
7 depends on PCI
8 ---help---
9 If you have a network (Ethernet) card belonging to this class, say Y
10 and read the Ethernet-HOWTO, available from
11 <http://www.tldp.org/docs.html#howto>.
12
13 Note that the answer to this question doesn't directly affect the
14 kernel: saying N will just cause the configurator to skip all
15 the questions about Tehuti cards. If you say Y, you will be asked for
16 your specific card in the following questions.
17
18if NET_VENDOR_TEHUTI
19
20config TEHUTI
21 tristate "Tehuti Networks 10G Ethernet"
22 depends on PCI
23 ---help---
24 Tehuti Networks 10G Ethernet NIC
25
26endif # NET_VENDOR_TEHUTI
diff --git a/drivers/net/ethernet/tehuti/Makefile b/drivers/net/ethernet/tehuti/Makefile
new file mode 100644
index 000000000000..f995421ddbc8
--- /dev/null
+++ b/drivers/net/ethernet/tehuti/Makefile
@@ -0,0 +1,5 @@
1#
2# Makefile for the Tehuti network device drivers.
3#
4
5obj-$(CONFIG_TEHUTI) += tehuti.o
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
new file mode 100644
index 000000000000..749bbf18dc6a
--- /dev/null
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -0,0 +1,2470 @@
1/*
2 * Tehuti Networks(R) Network Driver
3 * ethtool interface implementation
4 * Copyright (C) 2007 Tehuti Networks Ltd. All rights reserved
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12/*
13 * RX HW/SW interaction overview
14 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
15 * There are 2 types of RX communication channels between driver and NIC.
16 * 1) RX Free Fifo - RXF - holds descriptors of empty buffers to accept incoming
17 * traffic. This Fifo is filled by SW and is readen by HW. Each descriptor holds
18 * info about buffer's location, size and ID. An ID field is used to identify a
19 * buffer when it's returned with data via RXD Fifo (see below)
20 * 2) RX Data Fifo - RXD - holds descriptors of full buffers. This Fifo is
21 * filled by HW and is readen by SW. Each descriptor holds status and ID.
22 * HW pops descriptor from RXF Fifo, stores ID, fills buffer with incoming data,
23 * via dma moves it into host memory, builds new RXD descriptor with same ID,
24 * pushes it into RXD Fifo and raises interrupt to indicate new RX data.
25 *
26 * Current NIC configuration (registers + firmware) makes NIC use 2 RXF Fifos.
27 * One holds 1.5K packets and another - 26K packets. Depending on incoming
28 * packet size, HW desides on a RXF Fifo to pop buffer from. When packet is
29 * filled with data, HW builds new RXD descriptor for it and push it into single
30 * RXD Fifo.
31 *
32 * RX SW Data Structures
33 * ~~~~~~~~~~~~~~~~~~~~~
34 * skb db - used to keep track of all skbs owned by SW and their dma addresses.
35 * For RX case, ownership lasts from allocating new empty skb for RXF until
36 * accepting full skb from RXD and passing it to OS. Each RXF Fifo has its own
37 * skb db. Implemented as array with bitmask.
38 * fifo - keeps info about fifo's size and location, relevant HW registers,
39 * usage and skb db. Each RXD and RXF Fifo has its own fifo structure.
40 * Implemented as simple struct.
41 *
42 * RX SW Execution Flow
43 * ~~~~~~~~~~~~~~~~~~~~
44 * Upon initialization (ifconfig up) driver creates RX fifos and initializes
45 * relevant registers. At the end of init phase, driver enables interrupts.
46 * NIC sees that there is no RXF buffers and raises
47 * RD_INTR interrupt, isr fills skbs and Rx begins.
48 * Driver has two receive operation modes:
49 * NAPI - interrupt-driven mixed with polling
50 * interrupt-driven only
51 *
52 * Interrupt-driven only flow is following. When buffer is ready, HW raises
53 * interrupt and isr is called. isr collects all available packets
54 * (bdx_rx_receive), refills skbs (bdx_rx_alloc_skbs) and exit.
55
56 * Rx buffer allocation note
57 * ~~~~~~~~~~~~~~~~~~~~~~~~~
58 * Driver cares to feed such amount of RxF descriptors that respective amount of
59 * RxD descriptors can not fill entire RxD fifo. The main reason is lack of
60 * overflow check in Bordeaux for RxD fifo free/used size.
61 * FIXME: this is NOT fully implemented, more work should be done
62 *
63 */
64
65#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
66
67#include "tehuti.h"
68
69static DEFINE_PCI_DEVICE_TABLE(bdx_pci_tbl) = {
70 {0x1FC9, 0x3009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
71 {0x1FC9, 0x3010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
72 {0x1FC9, 0x3014, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
73 {0}
74};
75
76MODULE_DEVICE_TABLE(pci, bdx_pci_tbl);
77
78/* Definitions needed by ISR or NAPI functions */
79static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f);
80static void bdx_tx_cleanup(struct bdx_priv *priv);
81static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget);
82
83/* Definitions needed by FW loading */
84static void bdx_tx_push_desc_safe(struct bdx_priv *priv, void *data, int size);
85
86/* Definitions needed by hw_start */
87static int bdx_tx_init(struct bdx_priv *priv);
88static int bdx_rx_init(struct bdx_priv *priv);
89
90/* Definitions needed by bdx_close */
91static void bdx_rx_free(struct bdx_priv *priv);
92static void bdx_tx_free(struct bdx_priv *priv);
93
94/* Definitions needed by bdx_probe */
95static void bdx_set_ethtool_ops(struct net_device *netdev);
96
97/*************************************************************************
98 * Print Info *
99 *************************************************************************/
100
101static void print_hw_id(struct pci_dev *pdev)
102{
103 struct pci_nic *nic = pci_get_drvdata(pdev);
104 u16 pci_link_status = 0;
105 u16 pci_ctrl = 0;
106
107 pci_read_config_word(pdev, PCI_LINK_STATUS_REG, &pci_link_status);
108 pci_read_config_word(pdev, PCI_DEV_CTRL_REG, &pci_ctrl);
109
110 pr_info("%s%s\n", BDX_NIC_NAME,
111 nic->port_num == 1 ? "" : ", 2-Port");
112 pr_info("srom 0x%x fpga %d build %u lane# %d max_pl 0x%x mrrs 0x%x\n",
113 readl(nic->regs + SROM_VER), readl(nic->regs + FPGA_VER) & 0xFFF,
114 readl(nic->regs + FPGA_SEED),
115 GET_LINK_STATUS_LANES(pci_link_status),
116 GET_DEV_CTRL_MAXPL(pci_ctrl), GET_DEV_CTRL_MRRS(pci_ctrl));
117}
118
119static void print_fw_id(struct pci_nic *nic)
120{
121 pr_info("fw 0x%x\n", readl(nic->regs + FW_VER));
122}
123
124static void print_eth_id(struct net_device *ndev)
125{
126 netdev_info(ndev, "%s, Port %c\n",
127 BDX_NIC_NAME, (ndev->if_port == 0) ? 'A' : 'B');
128
129}
130
131/*************************************************************************
132 * Code *
133 *************************************************************************/
134
135#define bdx_enable_interrupts(priv) \
136 do { WRITE_REG(priv, regIMR, IR_RUN); } while (0)
137#define bdx_disable_interrupts(priv) \
138 do { WRITE_REG(priv, regIMR, 0); } while (0)
139
140/* bdx_fifo_init
141 * create TX/RX descriptor fifo for host-NIC communication.
142 * 1K extra space is allocated at the end of the fifo to simplify
143 * processing of descriptors that wraps around fifo's end
144 * @priv - NIC private structure
145 * @f - fifo to initialize
146 * @fsz_type - fifo size type: 0-4KB, 1-8KB, 2-16KB, 3-32KB
147 * @reg_XXX - offsets of registers relative to base address
148 *
149 * Returns 0 on success, negative value on failure
150 *
151 */
152static int
153bdx_fifo_init(struct bdx_priv *priv, struct fifo *f, int fsz_type,
154 u16 reg_CFG0, u16 reg_CFG1, u16 reg_RPTR, u16 reg_WPTR)
155{
156 u16 memsz = FIFO_SIZE * (1 << fsz_type);
157
158 memset(f, 0, sizeof(struct fifo));
159 /* pci_alloc_consistent gives us 4k-aligned memory */
160 f->va = pci_alloc_consistent(priv->pdev,
161 memsz + FIFO_EXTRA_SPACE, &f->da);
162 if (!f->va) {
163 pr_err("pci_alloc_consistent failed\n");
164 RET(-ENOMEM);
165 }
166 f->reg_CFG0 = reg_CFG0;
167 f->reg_CFG1 = reg_CFG1;
168 f->reg_RPTR = reg_RPTR;
169 f->reg_WPTR = reg_WPTR;
170 f->rptr = 0;
171 f->wptr = 0;
172 f->memsz = memsz;
173 f->size_mask = memsz - 1;
174 WRITE_REG(priv, reg_CFG0, (u32) ((f->da & TX_RX_CFG0_BASE) | fsz_type));
175 WRITE_REG(priv, reg_CFG1, H32_64(f->da));
176
177 RET(0);
178}
179
180/* bdx_fifo_free - free all resources used by fifo
181 * @priv - NIC private structure
182 * @f - fifo to release
183 */
184static void bdx_fifo_free(struct bdx_priv *priv, struct fifo *f)
185{
186 ENTER;
187 if (f->va) {
188 pci_free_consistent(priv->pdev,
189 f->memsz + FIFO_EXTRA_SPACE, f->va, f->da);
190 f->va = NULL;
191 }
192 RET();
193}
194
195/*
196 * bdx_link_changed - notifies OS about hw link state.
197 * @bdx_priv - hw adapter structure
198 */
199static void bdx_link_changed(struct bdx_priv *priv)
200{
201 u32 link = READ_REG(priv, regMAC_LNK_STAT) & MAC_LINK_STAT;
202
203 if (!link) {
204 if (netif_carrier_ok(priv->ndev)) {
205 netif_stop_queue(priv->ndev);
206 netif_carrier_off(priv->ndev);
207 netdev_err(priv->ndev, "Link Down\n");
208 }
209 } else {
210 if (!netif_carrier_ok(priv->ndev)) {
211 netif_wake_queue(priv->ndev);
212 netif_carrier_on(priv->ndev);
213 netdev_err(priv->ndev, "Link Up\n");
214 }
215 }
216}
217
218static void bdx_isr_extra(struct bdx_priv *priv, u32 isr)
219{
220 if (isr & IR_RX_FREE_0) {
221 bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0);
222 DBG("RX_FREE_0\n");
223 }
224
225 if (isr & IR_LNKCHG0)
226 bdx_link_changed(priv);
227
228 if (isr & IR_PCIE_LINK)
229 netdev_err(priv->ndev, "PCI-E Link Fault\n");
230
231 if (isr & IR_PCIE_TOUT)
232 netdev_err(priv->ndev, "PCI-E Time Out\n");
233
234}
235
236/* bdx_isr - Interrupt Service Routine for Bordeaux NIC
237 * @irq - interrupt number
238 * @ndev - network device
239 * @regs - CPU registers
240 *
241 * Return IRQ_NONE if it was not our interrupt, IRQ_HANDLED - otherwise
242 *
243 * It reads ISR register to know interrupt reasons, and proceed them one by one.
244 * Reasons of interest are:
245 * RX_DESC - new packet has arrived and RXD fifo holds its descriptor
246 * RX_FREE - number of free Rx buffers in RXF fifo gets low
247 * TX_FREE - packet was transmited and RXF fifo holds its descriptor
248 */
249
250static irqreturn_t bdx_isr_napi(int irq, void *dev)
251{
252 struct net_device *ndev = dev;
253 struct bdx_priv *priv = netdev_priv(ndev);
254 u32 isr;
255
256 ENTER;
257 isr = (READ_REG(priv, regISR) & IR_RUN);
258 if (unlikely(!isr)) {
259 bdx_enable_interrupts(priv);
260 return IRQ_NONE; /* Not our interrupt */
261 }
262
263 if (isr & IR_EXTRA)
264 bdx_isr_extra(priv, isr);
265
266 if (isr & (IR_RX_DESC_0 | IR_TX_FREE_0)) {
267 if (likely(napi_schedule_prep(&priv->napi))) {
268 __napi_schedule(&priv->napi);
269 RET(IRQ_HANDLED);
270 } else {
271 /* NOTE: we get here if intr has slipped into window
272 * between these lines in bdx_poll:
273 * bdx_enable_interrupts(priv);
274 * return 0;
275 * currently intrs are disabled (since we read ISR),
276 * and we have failed to register next poll.
277 * so we read the regs to trigger chip
278 * and allow further interupts. */
279 READ_REG(priv, regTXF_WPTR_0);
280 READ_REG(priv, regRXD_WPTR_0);
281 }
282 }
283
284 bdx_enable_interrupts(priv);
285 RET(IRQ_HANDLED);
286}
287
288static int bdx_poll(struct napi_struct *napi, int budget)
289{
290 struct bdx_priv *priv = container_of(napi, struct bdx_priv, napi);
291 int work_done;
292
293 ENTER;
294 bdx_tx_cleanup(priv);
295 work_done = bdx_rx_receive(priv, &priv->rxd_fifo0, budget);
296 if ((work_done < budget) ||
297 (priv->napi_stop++ >= 30)) {
298 DBG("rx poll is done. backing to isr-driven\n");
299
300 /* from time to time we exit to let NAPI layer release
301 * device lock and allow waiting tasks (eg rmmod) to advance) */
302 priv->napi_stop = 0;
303
304 napi_complete(napi);
305 bdx_enable_interrupts(priv);
306 }
307 return work_done;
308}
309
310/* bdx_fw_load - loads firmware to NIC
311 * @priv - NIC private structure
312 * Firmware is loaded via TXD fifo, so it must be initialized first.
313 * Firware must be loaded once per NIC not per PCI device provided by NIC (NIC
314 * can have few of them). So all drivers use semaphore register to choose one
315 * that will actually load FW to NIC.
316 */
317
318static int bdx_fw_load(struct bdx_priv *priv)
319{
320 const struct firmware *fw = NULL;
321 int master, i;
322 int rc;
323
324 ENTER;
325 master = READ_REG(priv, regINIT_SEMAPHORE);
326 if (!READ_REG(priv, regINIT_STATUS) && master) {
327 rc = request_firmware(&fw, "tehuti/bdx.bin", &priv->pdev->dev);
328 if (rc)
329 goto out;
330 bdx_tx_push_desc_safe(priv, (char *)fw->data, fw->size);
331 mdelay(100);
332 }
333 for (i = 0; i < 200; i++) {
334 if (READ_REG(priv, regINIT_STATUS)) {
335 rc = 0;
336 goto out;
337 }
338 mdelay(2);
339 }
340 rc = -EIO;
341out:
342 if (master)
343 WRITE_REG(priv, regINIT_SEMAPHORE, 1);
344 if (fw)
345 release_firmware(fw);
346
347 if (rc) {
348 netdev_err(priv->ndev, "firmware loading failed\n");
349 if (rc == -EIO)
350 DBG("VPC = 0x%x VIC = 0x%x INIT_STATUS = 0x%x i=%d\n",
351 READ_REG(priv, regVPC),
352 READ_REG(priv, regVIC),
353 READ_REG(priv, regINIT_STATUS), i);
354 RET(rc);
355 } else {
356 DBG("%s: firmware loading success\n", priv->ndev->name);
357 RET(0);
358 }
359}
360
361static void bdx_restore_mac(struct net_device *ndev, struct bdx_priv *priv)
362{
363 u32 val;
364
365 ENTER;
366 DBG("mac0=%x mac1=%x mac2=%x\n",
367 READ_REG(priv, regUNC_MAC0_A),
368 READ_REG(priv, regUNC_MAC1_A), READ_REG(priv, regUNC_MAC2_A));
369
370 val = (ndev->dev_addr[0] << 8) | (ndev->dev_addr[1]);
371 WRITE_REG(priv, regUNC_MAC2_A, val);
372 val = (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]);
373 WRITE_REG(priv, regUNC_MAC1_A, val);
374 val = (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]);
375 WRITE_REG(priv, regUNC_MAC0_A, val);
376
377 DBG("mac0=%x mac1=%x mac2=%x\n",
378 READ_REG(priv, regUNC_MAC0_A),
379 READ_REG(priv, regUNC_MAC1_A), READ_REG(priv, regUNC_MAC2_A));
380 RET();
381}
382
383/* bdx_hw_start - inits registers and starts HW's Rx and Tx engines
384 * @priv - NIC private structure
385 */
386static int bdx_hw_start(struct bdx_priv *priv)
387{
388 int rc = -EIO;
389 struct net_device *ndev = priv->ndev;
390
391 ENTER;
392 bdx_link_changed(priv);
393
394 /* 10G overall max length (vlan, eth&ip header, ip payload, crc) */
395 WRITE_REG(priv, regFRM_LENGTH, 0X3FE0);
396 WRITE_REG(priv, regPAUSE_QUANT, 0x96);
397 WRITE_REG(priv, regRX_FIFO_SECTION, 0x800010);
398 WRITE_REG(priv, regTX_FIFO_SECTION, 0xE00010);
399 WRITE_REG(priv, regRX_FULLNESS, 0);
400 WRITE_REG(priv, regTX_FULLNESS, 0);
401 WRITE_REG(priv, regCTRLST,
402 regCTRLST_BASE | regCTRLST_RX_ENA | regCTRLST_TX_ENA);
403
404 WRITE_REG(priv, regVGLB, 0);
405 WRITE_REG(priv, regMAX_FRAME_A,
406 priv->rxf_fifo0.m.pktsz & MAX_FRAME_AB_VAL);
407
408 DBG("RDINTCM=%08x\n", priv->rdintcm); /*NOTE: test script uses this */
409 WRITE_REG(priv, regRDINTCM0, priv->rdintcm);
410 WRITE_REG(priv, regRDINTCM2, 0); /*cpu_to_le32(rcm.val)); */
411
412 DBG("TDINTCM=%08x\n", priv->tdintcm); /*NOTE: test script uses this */
413 WRITE_REG(priv, regTDINTCM0, priv->tdintcm); /* old val = 0x300064 */
414
415 /* Enable timer interrupt once in 2 secs. */
416 /*WRITE_REG(priv, regGTMR0, ((GTMR_SEC * 2) & GTMR_DATA)); */
417 bdx_restore_mac(priv->ndev, priv);
418
419 WRITE_REG(priv, regGMAC_RXF_A, GMAC_RX_FILTER_OSEN |
420 GMAC_RX_FILTER_AM | GMAC_RX_FILTER_AB);
421
422#define BDX_IRQ_TYPE ((priv->nic->irq_type == IRQ_MSI) ? 0 : IRQF_SHARED)
423
424 rc = request_irq(priv->pdev->irq, bdx_isr_napi, BDX_IRQ_TYPE,
425 ndev->name, ndev);
426 if (rc)
427 goto err_irq;
428 bdx_enable_interrupts(priv);
429
430 RET(0);
431
432err_irq:
433 RET(rc);
434}
435
436static void bdx_hw_stop(struct bdx_priv *priv)
437{
438 ENTER;
439 bdx_disable_interrupts(priv);
440 free_irq(priv->pdev->irq, priv->ndev);
441
442 netif_carrier_off(priv->ndev);
443 netif_stop_queue(priv->ndev);
444
445 RET();
446}
447
448static int bdx_hw_reset_direct(void __iomem *regs)
449{
450 u32 val, i;
451 ENTER;
452
453 /* reset sequences: read, write 1, read, write 0 */
454 val = readl(regs + regCLKPLL);
455 writel((val | CLKPLL_SFTRST) + 0x8, regs + regCLKPLL);
456 udelay(50);
457 val = readl(regs + regCLKPLL);
458 writel(val & ~CLKPLL_SFTRST, regs + regCLKPLL);
459
460 /* check that the PLLs are locked and reset ended */
461 for (i = 0; i < 70; i++, mdelay(10))
462 if ((readl(regs + regCLKPLL) & CLKPLL_LKD) == CLKPLL_LKD) {
463 /* do any PCI-E read transaction */
464 readl(regs + regRXD_CFG0_0);
465 return 0;
466 }
467 pr_err("HW reset failed\n");
468 return 1; /* failure */
469}
470
471static int bdx_hw_reset(struct bdx_priv *priv)
472{
473 u32 val, i;
474 ENTER;
475
476 if (priv->port == 0) {
477 /* reset sequences: read, write 1, read, write 0 */
478 val = READ_REG(priv, regCLKPLL);
479 WRITE_REG(priv, regCLKPLL, (val | CLKPLL_SFTRST) + 0x8);
480 udelay(50);
481 val = READ_REG(priv, regCLKPLL);
482 WRITE_REG(priv, regCLKPLL, val & ~CLKPLL_SFTRST);
483 }
484 /* check that the PLLs are locked and reset ended */
485 for (i = 0; i < 70; i++, mdelay(10))
486 if ((READ_REG(priv, regCLKPLL) & CLKPLL_LKD) == CLKPLL_LKD) {
487 /* do any PCI-E read transaction */
488 READ_REG(priv, regRXD_CFG0_0);
489 return 0;
490 }
491 pr_err("HW reset failed\n");
492 return 1; /* failure */
493}
494
495static int bdx_sw_reset(struct bdx_priv *priv)
496{
497 int i;
498
499 ENTER;
500 /* 1. load MAC (obsolete) */
501 /* 2. disable Rx (and Tx) */
502 WRITE_REG(priv, regGMAC_RXF_A, 0);
503 mdelay(100);
504 /* 3. disable port */
505 WRITE_REG(priv, regDIS_PORT, 1);
506 /* 4. disable queue */
507 WRITE_REG(priv, regDIS_QU, 1);
508 /* 5. wait until hw is disabled */
509 for (i = 0; i < 50; i++) {
510 if (READ_REG(priv, regRST_PORT) & 1)
511 break;
512 mdelay(10);
513 }
514 if (i == 50)
515 netdev_err(priv->ndev, "SW reset timeout. continuing anyway\n");
516
517 /* 6. disable intrs */
518 WRITE_REG(priv, regRDINTCM0, 0);
519 WRITE_REG(priv, regTDINTCM0, 0);
520 WRITE_REG(priv, regIMR, 0);
521 READ_REG(priv, regISR);
522
523 /* 7. reset queue */
524 WRITE_REG(priv, regRST_QU, 1);
525 /* 8. reset port */
526 WRITE_REG(priv, regRST_PORT, 1);
527 /* 9. zero all read and write pointers */
528 for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10)
529 DBG("%x = %x\n", i, READ_REG(priv, i) & TXF_WPTR_WR_PTR);
530 for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10)
531 WRITE_REG(priv, i, 0);
532 /* 10. unseet port disable */
533 WRITE_REG(priv, regDIS_PORT, 0);
534 /* 11. unset queue disable */
535 WRITE_REG(priv, regDIS_QU, 0);
536 /* 12. unset queue reset */
537 WRITE_REG(priv, regRST_QU, 0);
538 /* 13. unset port reset */
539 WRITE_REG(priv, regRST_PORT, 0);
540 /* 14. enable Rx */
541 /* skiped. will be done later */
542 /* 15. save MAC (obsolete) */
543 for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10)
544 DBG("%x = %x\n", i, READ_REG(priv, i) & TXF_WPTR_WR_PTR);
545
546 RET(0);
547}
548
549/* bdx_reset - performs right type of reset depending on hw type */
550static int bdx_reset(struct bdx_priv *priv)
551{
552 ENTER;
553 RET((priv->pdev->device == 0x3009)
554 ? bdx_hw_reset(priv)
555 : bdx_sw_reset(priv));
556}
557
558/**
559 * bdx_close - Disables a network interface
560 * @netdev: network interface device structure
561 *
562 * Returns 0, this is not allowed to fail
563 *
564 * The close entry point is called when an interface is de-activated
565 * by the OS. The hardware is still under the drivers control, but
566 * needs to be disabled. A global MAC reset is issued to stop the
567 * hardware, and all transmit and receive resources are freed.
568 **/
569static int bdx_close(struct net_device *ndev)
570{
571 struct bdx_priv *priv = NULL;
572
573 ENTER;
574 priv = netdev_priv(ndev);
575
576 napi_disable(&priv->napi);
577
578 bdx_reset(priv);
579 bdx_hw_stop(priv);
580 bdx_rx_free(priv);
581 bdx_tx_free(priv);
582 RET(0);
583}
584
585/**
586 * bdx_open - Called when a network interface is made active
587 * @netdev: network interface device structure
588 *
589 * Returns 0 on success, negative value on failure
590 *
591 * The open entry point is called when a network interface is made
592 * active by the system (IFF_UP). At this point all resources needed
593 * for transmit and receive operations are allocated, the interrupt
594 * handler is registered with the OS, the watchdog timer is started,
595 * and the stack is notified that the interface is ready.
596 **/
597static int bdx_open(struct net_device *ndev)
598{
599 struct bdx_priv *priv;
600 int rc;
601
602 ENTER;
603 priv = netdev_priv(ndev);
604 bdx_reset(priv);
605 if (netif_running(ndev))
606 netif_stop_queue(priv->ndev);
607
608 if ((rc = bdx_tx_init(priv)) ||
609 (rc = bdx_rx_init(priv)) ||
610 (rc = bdx_fw_load(priv)))
611 goto err;
612
613 bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0);
614
615 rc = bdx_hw_start(priv);
616 if (rc)
617 goto err;
618
619 napi_enable(&priv->napi);
620
621 print_fw_id(priv->nic);
622
623 RET(0);
624
625err:
626 bdx_close(ndev);
627 RET(rc);
628}
629
630static int bdx_range_check(struct bdx_priv *priv, u32 offset)
631{
632 return (offset > (u32) (BDX_REGS_SIZE / priv->nic->port_num)) ?
633 -EINVAL : 0;
634}
635
636static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd)
637{
638 struct bdx_priv *priv = netdev_priv(ndev);
639 u32 data[3];
640 int error;
641
642 ENTER;
643
644 DBG("jiffies=%ld cmd=%d\n", jiffies, cmd);
645 if (cmd != SIOCDEVPRIVATE) {
646 error = copy_from_user(data, ifr->ifr_data, sizeof(data));
647 if (error) {
648 pr_err("can't copy from user\n");
649 RET(-EFAULT);
650 }
651 DBG("%d 0x%x 0x%x\n", data[0], data[1], data[2]);
652 }
653
654 if (!capable(CAP_SYS_RAWIO))
655 return -EPERM;
656
657 switch (data[0]) {
658
659 case BDX_OP_READ:
660 error = bdx_range_check(priv, data[1]);
661 if (error < 0)
662 return error;
663 data[2] = READ_REG(priv, data[1]);
664 DBG("read_reg(0x%x)=0x%x (dec %d)\n", data[1], data[2],
665 data[2]);
666 error = copy_to_user(ifr->ifr_data, data, sizeof(data));
667 if (error)
668 RET(-EFAULT);
669 break;
670
671 case BDX_OP_WRITE:
672 error = bdx_range_check(priv, data[1]);
673 if (error < 0)
674 return error;
675 WRITE_REG(priv, data[1], data[2]);
676 DBG("write_reg(0x%x, 0x%x)\n", data[1], data[2]);
677 break;
678
679 default:
680 RET(-EOPNOTSUPP);
681 }
682 return 0;
683}
684
685static int bdx_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
686{
687 ENTER;
688 if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15))
689 RET(bdx_ioctl_priv(ndev, ifr, cmd));
690 else
691 RET(-EOPNOTSUPP);
692}
693
694/*
695 * __bdx_vlan_rx_vid - private helper for adding/killing VLAN vid
696 * by passing VLAN filter table to hardware
697 * @ndev network device
698 * @vid VLAN vid
699 * @op add or kill operation
700 */
701static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable)
702{
703 struct bdx_priv *priv = netdev_priv(ndev);
704 u32 reg, bit, val;
705
706 ENTER;
707 DBG2("vid=%d value=%d\n", (int)vid, enable);
708 if (unlikely(vid >= 4096)) {
709 pr_err("invalid VID: %u (> 4096)\n", vid);
710 RET();
711 }
712 reg = regVLAN_0 + (vid / 32) * 4;
713 bit = 1 << vid % 32;
714 val = READ_REG(priv, reg);
715 DBG2("reg=%x, val=%x, bit=%d\n", reg, val, bit);
716 if (enable)
717 val |= bit;
718 else
719 val &= ~bit;
720 DBG2("new val %x\n", val);
721 WRITE_REG(priv, reg, val);
722 RET();
723}
724
725/*
726 * bdx_vlan_rx_add_vid - kernel hook for adding VLAN vid to hw filtering table
727 * @ndev network device
728 * @vid VLAN vid to add
729 */
730static void bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
731{
732 __bdx_vlan_rx_vid(ndev, vid, 1);
733}
734
735/*
736 * bdx_vlan_rx_kill_vid - kernel hook for killing VLAN vid in hw filtering table
737 * @ndev network device
738 * @vid VLAN vid to kill
739 */
740static void bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid)
741{
742 __bdx_vlan_rx_vid(ndev, vid, 0);
743}
744
745/**
746 * bdx_change_mtu - Change the Maximum Transfer Unit
747 * @netdev: network interface device structure
748 * @new_mtu: new value for maximum frame size
749 *
750 * Returns 0 on success, negative on failure
751 */
752static int bdx_change_mtu(struct net_device *ndev, int new_mtu)
753{
754 ENTER;
755
756 if (new_mtu == ndev->mtu)
757 RET(0);
758
759 /* enforce minimum frame size */
760 if (new_mtu < ETH_ZLEN) {
761 netdev_err(ndev, "mtu %d is less then minimal %d\n",
762 new_mtu, ETH_ZLEN);
763 RET(-EINVAL);
764 }
765
766 ndev->mtu = new_mtu;
767 if (netif_running(ndev)) {
768 bdx_close(ndev);
769 bdx_open(ndev);
770 }
771 RET(0);
772}
773
774static void bdx_setmulti(struct net_device *ndev)
775{
776 struct bdx_priv *priv = netdev_priv(ndev);
777
778 u32 rxf_val =
779 GMAC_RX_FILTER_AM | GMAC_RX_FILTER_AB | GMAC_RX_FILTER_OSEN;
780 int i;
781
782 ENTER;
783 /* IMF - imperfect (hash) rx multicat filter */
784 /* PMF - perfect rx multicat filter */
785
786 /* FIXME: RXE(OFF) */
787 if (ndev->flags & IFF_PROMISC) {
788 rxf_val |= GMAC_RX_FILTER_PRM;
789 } else if (ndev->flags & IFF_ALLMULTI) {
790 /* set IMF to accept all multicast frmaes */
791 for (i = 0; i < MAC_MCST_HASH_NUM; i++)
792 WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, ~0);
793 } else if (!netdev_mc_empty(ndev)) {
794 u8 hash;
795 struct netdev_hw_addr *ha;
796 u32 reg, val;
797
798 /* set IMF to deny all multicast frames */
799 for (i = 0; i < MAC_MCST_HASH_NUM; i++)
800 WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, 0);
801 /* set PMF to deny all multicast frames */
802 for (i = 0; i < MAC_MCST_NUM; i++) {
803 WRITE_REG(priv, regRX_MAC_MCST0 + i * 8, 0);
804 WRITE_REG(priv, regRX_MAC_MCST1 + i * 8, 0);
805 }
806
807 /* use PMF to accept first MAC_MCST_NUM (15) addresses */
808 /* TBD: sort addresses and write them in ascending order
809 * into RX_MAC_MCST regs. we skip this phase now and accept ALL
810 * multicast frames throu IMF */
811 /* accept the rest of addresses throu IMF */
812 netdev_for_each_mc_addr(ha, ndev) {
813 hash = 0;
814 for (i = 0; i < ETH_ALEN; i++)
815 hash ^= ha->addr[i];
816 reg = regRX_MCST_HASH0 + ((hash >> 5) << 2);
817 val = READ_REG(priv, reg);
818 val |= (1 << (hash % 32));
819 WRITE_REG(priv, reg, val);
820 }
821
822 } else {
823 DBG("only own mac %d\n", netdev_mc_count(ndev));
824 rxf_val |= GMAC_RX_FILTER_AB;
825 }
826 WRITE_REG(priv, regGMAC_RXF_A, rxf_val);
827 /* enable RX */
828 /* FIXME: RXE(ON) */
829 RET();
830}
831
832static int bdx_set_mac(struct net_device *ndev, void *p)
833{
834 struct bdx_priv *priv = netdev_priv(ndev);
835 struct sockaddr *addr = p;
836
837 ENTER;
838 /*
839 if (netif_running(dev))
840 return -EBUSY
841 */
842 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
843 bdx_restore_mac(ndev, priv);
844 RET(0);
845}
846
847static int bdx_read_mac(struct bdx_priv *priv)
848{
849 u16 macAddress[3], i;
850 ENTER;
851
852 macAddress[2] = READ_REG(priv, regUNC_MAC0_A);
853 macAddress[2] = READ_REG(priv, regUNC_MAC0_A);
854 macAddress[1] = READ_REG(priv, regUNC_MAC1_A);
855 macAddress[1] = READ_REG(priv, regUNC_MAC1_A);
856 macAddress[0] = READ_REG(priv, regUNC_MAC2_A);
857 macAddress[0] = READ_REG(priv, regUNC_MAC2_A);
858 for (i = 0; i < 3; i++) {
859 priv->ndev->dev_addr[i * 2 + 1] = macAddress[i];
860 priv->ndev->dev_addr[i * 2] = macAddress[i] >> 8;
861 }
862 RET(0);
863}
864
865static u64 bdx_read_l2stat(struct bdx_priv *priv, int reg)
866{
867 u64 val;
868
869 val = READ_REG(priv, reg);
870 val |= ((u64) READ_REG(priv, reg + 8)) << 32;
871 return val;
872}
873
874/*Do the statistics-update work*/
875static void bdx_update_stats(struct bdx_priv *priv)
876{
877 struct bdx_stats *stats = &priv->hw_stats;
878 u64 *stats_vector = (u64 *) stats;
879 int i;
880 int addr;
881
882 /*Fill HW structure */
883 addr = 0x7200;
884 /*First 12 statistics - 0x7200 - 0x72B0 */
885 for (i = 0; i < 12; i++) {
886 stats_vector[i] = bdx_read_l2stat(priv, addr);
887 addr += 0x10;
888 }
889 BDX_ASSERT(addr != 0x72C0);
890 /* 0x72C0-0x72E0 RSRV */
891 addr = 0x72F0;
892 for (; i < 16; i++) {
893 stats_vector[i] = bdx_read_l2stat(priv, addr);
894 addr += 0x10;
895 }
896 BDX_ASSERT(addr != 0x7330);
897 /* 0x7330-0x7360 RSRV */
898 addr = 0x7370;
899 for (; i < 19; i++) {
900 stats_vector[i] = bdx_read_l2stat(priv, addr);
901 addr += 0x10;
902 }
903 BDX_ASSERT(addr != 0x73A0);
904 /* 0x73A0-0x73B0 RSRV */
905 addr = 0x73C0;
906 for (; i < 23; i++) {
907 stats_vector[i] = bdx_read_l2stat(priv, addr);
908 addr += 0x10;
909 }
910 BDX_ASSERT(addr != 0x7400);
911 BDX_ASSERT((sizeof(struct bdx_stats) / sizeof(u64)) != i);
912}
913
914static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len,
915 u16 rxd_vlan);
916static void print_rxfd(struct rxf_desc *rxfd);
917
918/*************************************************************************
919 * Rx DB *
920 *************************************************************************/
921
922static void bdx_rxdb_destroy(struct rxdb *db)
923{
924 vfree(db);
925}
926
927static struct rxdb *bdx_rxdb_create(int nelem)
928{
929 struct rxdb *db;
930 int i;
931
932 db = vmalloc(sizeof(struct rxdb)
933 + (nelem * sizeof(int))
934 + (nelem * sizeof(struct rx_map)));
935 if (likely(db != NULL)) {
936 db->stack = (int *)(db + 1);
937 db->elems = (void *)(db->stack + nelem);
938 db->nelem = nelem;
939 db->top = nelem;
940 for (i = 0; i < nelem; i++)
941 db->stack[i] = nelem - i - 1; /* to make first allocs
942 close to db struct*/
943 }
944
945 return db;
946}
947
948static inline int bdx_rxdb_alloc_elem(struct rxdb *db)
949{
950 BDX_ASSERT(db->top <= 0);
951 return db->stack[--(db->top)];
952}
953
954static inline void *bdx_rxdb_addr_elem(struct rxdb *db, int n)
955{
956 BDX_ASSERT((n < 0) || (n >= db->nelem));
957 return db->elems + n;
958}
959
960static inline int bdx_rxdb_available(struct rxdb *db)
961{
962 return db->top;
963}
964
965static inline void bdx_rxdb_free_elem(struct rxdb *db, int n)
966{
967 BDX_ASSERT((n >= db->nelem) || (n < 0));
968 db->stack[(db->top)++] = n;
969}
970
971/*************************************************************************
972 * Rx Init *
973 *************************************************************************/
974
975/* bdx_rx_init - initialize RX all related HW and SW resources
976 * @priv - NIC private structure
977 *
978 * Returns 0 on success, negative value on failure
979 *
980 * It creates rxf and rxd fifos, update relevant HW registers, preallocate
981 * skb for rx. It assumes that Rx is desabled in HW
982 * funcs are grouped for better cache usage
983 *
984 * RxD fifo is smaller than RxF fifo by design. Upon high load, RxD will be
985 * filled and packets will be dropped by nic without getting into host or
986 * cousing interrupt. Anyway, in that condition, host has no chance to process
987 * all packets, but dropping in nic is cheaper, since it takes 0 cpu cycles
988 */
989
990/* TBD: ensure proper packet size */
991
992static int bdx_rx_init(struct bdx_priv *priv)
993{
994 ENTER;
995
996 if (bdx_fifo_init(priv, &priv->rxd_fifo0.m, priv->rxd_size,
997 regRXD_CFG0_0, regRXD_CFG1_0,
998 regRXD_RPTR_0, regRXD_WPTR_0))
999 goto err_mem;
1000 if (bdx_fifo_init(priv, &priv->rxf_fifo0.m, priv->rxf_size,
1001 regRXF_CFG0_0, regRXF_CFG1_0,
1002 regRXF_RPTR_0, regRXF_WPTR_0))
1003 goto err_mem;
1004 priv->rxdb = bdx_rxdb_create(priv->rxf_fifo0.m.memsz /
1005 sizeof(struct rxf_desc));
1006 if (!priv->rxdb)
1007 goto err_mem;
1008
1009 priv->rxf_fifo0.m.pktsz = priv->ndev->mtu + VLAN_ETH_HLEN;
1010 return 0;
1011
1012err_mem:
1013 netdev_err(priv->ndev, "Rx init failed\n");
1014 return -ENOMEM;
1015}
1016
1017/* bdx_rx_free_skbs - frees and unmaps all skbs allocated for the fifo
1018 * @priv - NIC private structure
1019 * @f - RXF fifo
1020 */
1021static void bdx_rx_free_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
1022{
1023 struct rx_map *dm;
1024 struct rxdb *db = priv->rxdb;
1025 u16 i;
1026
1027 ENTER;
1028 DBG("total=%d free=%d busy=%d\n", db->nelem, bdx_rxdb_available(db),
1029 db->nelem - bdx_rxdb_available(db));
1030 while (bdx_rxdb_available(db) > 0) {
1031 i = bdx_rxdb_alloc_elem(db);
1032 dm = bdx_rxdb_addr_elem(db, i);
1033 dm->dma = 0;
1034 }
1035 for (i = 0; i < db->nelem; i++) {
1036 dm = bdx_rxdb_addr_elem(db, i);
1037 if (dm->dma) {
1038 pci_unmap_single(priv->pdev,
1039 dm->dma, f->m.pktsz,
1040 PCI_DMA_FROMDEVICE);
1041 dev_kfree_skb(dm->skb);
1042 }
1043 }
1044}
1045
1046/* bdx_rx_free - release all Rx resources
1047 * @priv - NIC private structure
1048 * It assumes that Rx is desabled in HW
1049 */
1050static void bdx_rx_free(struct bdx_priv *priv)
1051{
1052 ENTER;
1053 if (priv->rxdb) {
1054 bdx_rx_free_skbs(priv, &priv->rxf_fifo0);
1055 bdx_rxdb_destroy(priv->rxdb);
1056 priv->rxdb = NULL;
1057 }
1058 bdx_fifo_free(priv, &priv->rxf_fifo0.m);
1059 bdx_fifo_free(priv, &priv->rxd_fifo0.m);
1060
1061 RET();
1062}
1063
1064/*************************************************************************
1065 * Rx Engine *
1066 *************************************************************************/
1067
1068/* bdx_rx_alloc_skbs - fill rxf fifo with new skbs
1069 * @priv - nic's private structure
1070 * @f - RXF fifo that needs skbs
1071 * It allocates skbs, build rxf descs and push it (rxf descr) into rxf fifo.
1072 * skb's virtual and physical addresses are stored in skb db.
1073 * To calculate free space, func uses cached values of RPTR and WPTR
1074 * When needed, it also updates RPTR and WPTR.
1075 */
1076
1077/* TBD: do not update WPTR if no desc were written */
1078
1079static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
1080{
1081 struct sk_buff *skb;
1082 struct rxf_desc *rxfd;
1083 struct rx_map *dm;
1084 int dno, delta, idx;
1085 struct rxdb *db = priv->rxdb;
1086
1087 ENTER;
1088 dno = bdx_rxdb_available(db) - 1;
1089 while (dno > 0) {
1090 skb = dev_alloc_skb(f->m.pktsz + NET_IP_ALIGN);
1091 if (!skb) {
1092 pr_err("NO MEM: dev_alloc_skb failed\n");
1093 break;
1094 }
1095 skb->dev = priv->ndev;
1096 skb_reserve(skb, NET_IP_ALIGN);
1097
1098 idx = bdx_rxdb_alloc_elem(db);
1099 dm = bdx_rxdb_addr_elem(db, idx);
1100 dm->dma = pci_map_single(priv->pdev,
1101 skb->data, f->m.pktsz,
1102 PCI_DMA_FROMDEVICE);
1103 dm->skb = skb;
1104 rxfd = (struct rxf_desc *)(f->m.va + f->m.wptr);
1105 rxfd->info = CPU_CHIP_SWAP32(0x10003); /* INFO=1 BC=3 */
1106 rxfd->va_lo = idx;
1107 rxfd->pa_lo = CPU_CHIP_SWAP32(L32_64(dm->dma));
1108 rxfd->pa_hi = CPU_CHIP_SWAP32(H32_64(dm->dma));
1109 rxfd->len = CPU_CHIP_SWAP32(f->m.pktsz);
1110 print_rxfd(rxfd);
1111
1112 f->m.wptr += sizeof(struct rxf_desc);
1113 delta = f->m.wptr - f->m.memsz;
1114 if (unlikely(delta >= 0)) {
1115 f->m.wptr = delta;
1116 if (delta > 0) {
1117 memcpy(f->m.va, f->m.va + f->m.memsz, delta);
1118 DBG("wrapped descriptor\n");
1119 }
1120 }
1121 dno--;
1122 }
1123 /*TBD: to do - delayed rxf wptr like in txd */
1124 WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
1125 RET();
1126}
1127
1128static inline void
1129NETIF_RX_MUX(struct bdx_priv *priv, u32 rxd_val1, u16 rxd_vlan,
1130 struct sk_buff *skb)
1131{
1132 ENTER;
1133 DBG("rxdd->flags.bits.vtag=%d\n", GET_RXD_VTAG(rxd_val1));
1134 if (GET_RXD_VTAG(rxd_val1)) {
1135 DBG("%s: vlan rcv vlan '%x' vtag '%x'\n",
1136 priv->ndev->name,
1137 GET_RXD_VLAN_ID(rxd_vlan),
1138 GET_RXD_VTAG(rxd_val1));
1139 __vlan_hwaccel_put_tag(skb, GET_RXD_VLAN_TCI(rxd_vlan));
1140 }
1141 netif_receive_skb(skb);
1142}
1143
1144static void bdx_recycle_skb(struct bdx_priv *priv, struct rxd_desc *rxdd)
1145{
1146 struct rxf_desc *rxfd;
1147 struct rx_map *dm;
1148 struct rxf_fifo *f;
1149 struct rxdb *db;
1150 struct sk_buff *skb;
1151 int delta;
1152
1153 ENTER;
1154 DBG("priv=%p rxdd=%p\n", priv, rxdd);
1155 f = &priv->rxf_fifo0;
1156 db = priv->rxdb;
1157 DBG("db=%p f=%p\n", db, f);
1158 dm = bdx_rxdb_addr_elem(db, rxdd->va_lo);
1159 DBG("dm=%p\n", dm);
1160 skb = dm->skb;
1161 rxfd = (struct rxf_desc *)(f->m.va + f->m.wptr);
1162 rxfd->info = CPU_CHIP_SWAP32(0x10003); /* INFO=1 BC=3 */
1163 rxfd->va_lo = rxdd->va_lo;
1164 rxfd->pa_lo = CPU_CHIP_SWAP32(L32_64(dm->dma));
1165 rxfd->pa_hi = CPU_CHIP_SWAP32(H32_64(dm->dma));
1166 rxfd->len = CPU_CHIP_SWAP32(f->m.pktsz);
1167 print_rxfd(rxfd);
1168
1169 f->m.wptr += sizeof(struct rxf_desc);
1170 delta = f->m.wptr - f->m.memsz;
1171 if (unlikely(delta >= 0)) {
1172 f->m.wptr = delta;
1173 if (delta > 0) {
1174 memcpy(f->m.va, f->m.va + f->m.memsz, delta);
1175 DBG("wrapped descriptor\n");
1176 }
1177 }
1178 RET();
1179}
1180
1181/* bdx_rx_receive - receives full packets from RXD fifo and pass them to OS
1182 * NOTE: a special treatment is given to non-continuous descriptors
1183 * that start near the end, wraps around and continue at the beginning. a second
1184 * part is copied right after the first, and then descriptor is interpreted as
1185 * normal. fifo has an extra space to allow such operations
1186 * @priv - nic's private structure
1187 * @f - RXF fifo that needs skbs
1188 */
1189
1190/* TBD: replace memcpy func call by explicite inline asm */
1191
1192static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget)
1193{
1194 struct net_device *ndev = priv->ndev;
1195 struct sk_buff *skb, *skb2;
1196 struct rxd_desc *rxdd;
1197 struct rx_map *dm;
1198 struct rxf_fifo *rxf_fifo;
1199 int tmp_len, size;
1200 int done = 0;
1201 int max_done = BDX_MAX_RX_DONE;
1202 struct rxdb *db = NULL;
1203 /* Unmarshalled descriptor - copy of descriptor in host order */
1204 u32 rxd_val1;
1205 u16 len;
1206 u16 rxd_vlan;
1207
1208 ENTER;
1209 max_done = budget;
1210
1211 f->m.wptr = READ_REG(priv, f->m.reg_WPTR) & TXF_WPTR_WR_PTR;
1212
1213 size = f->m.wptr - f->m.rptr;
1214 if (size < 0)
1215 size = f->m.memsz + size; /* size is negative :-) */
1216
1217 while (size > 0) {
1218
1219 rxdd = (struct rxd_desc *)(f->m.va + f->m.rptr);
1220 rxd_val1 = CPU_CHIP_SWAP32(rxdd->rxd_val1);
1221
1222 len = CPU_CHIP_SWAP16(rxdd->len);
1223
1224 rxd_vlan = CPU_CHIP_SWAP16(rxdd->rxd_vlan);
1225
1226 print_rxdd(rxdd, rxd_val1, len, rxd_vlan);
1227
1228 tmp_len = GET_RXD_BC(rxd_val1) << 3;
1229 BDX_ASSERT(tmp_len <= 0);
1230 size -= tmp_len;
1231 if (size < 0) /* test for partially arrived descriptor */
1232 break;
1233
1234 f->m.rptr += tmp_len;
1235
1236 tmp_len = f->m.rptr - f->m.memsz;
1237 if (unlikely(tmp_len >= 0)) {
1238 f->m.rptr = tmp_len;
1239 if (tmp_len > 0) {
1240 DBG("wrapped desc rptr=%d tmp_len=%d\n",
1241 f->m.rptr, tmp_len);
1242 memcpy(f->m.va + f->m.memsz, f->m.va, tmp_len);
1243 }
1244 }
1245
1246 if (unlikely(GET_RXD_ERR(rxd_val1))) {
1247 DBG("rxd_err = 0x%x\n", GET_RXD_ERR(rxd_val1));
1248 ndev->stats.rx_errors++;
1249 bdx_recycle_skb(priv, rxdd);
1250 continue;
1251 }
1252
1253 rxf_fifo = &priv->rxf_fifo0;
1254 db = priv->rxdb;
1255 dm = bdx_rxdb_addr_elem(db, rxdd->va_lo);
1256 skb = dm->skb;
1257
1258 if (len < BDX_COPYBREAK &&
1259 (skb2 = dev_alloc_skb(len + NET_IP_ALIGN))) {
1260 skb_reserve(skb2, NET_IP_ALIGN);
1261 /*skb_put(skb2, len); */
1262 pci_dma_sync_single_for_cpu(priv->pdev,
1263 dm->dma, rxf_fifo->m.pktsz,
1264 PCI_DMA_FROMDEVICE);
1265 memcpy(skb2->data, skb->data, len);
1266 bdx_recycle_skb(priv, rxdd);
1267 skb = skb2;
1268 } else {
1269 pci_unmap_single(priv->pdev,
1270 dm->dma, rxf_fifo->m.pktsz,
1271 PCI_DMA_FROMDEVICE);
1272 bdx_rxdb_free_elem(db, rxdd->va_lo);
1273 }
1274
1275 ndev->stats.rx_bytes += len;
1276
1277 skb_put(skb, len);
1278 skb->protocol = eth_type_trans(skb, ndev);
1279
1280 /* Non-IP packets aren't checksum-offloaded */
1281 if (GET_RXD_PKT_ID(rxd_val1) == 0)
1282 skb_checksum_none_assert(skb);
1283 else
1284 skb->ip_summed = CHECKSUM_UNNECESSARY;
1285
1286 NETIF_RX_MUX(priv, rxd_val1, rxd_vlan, skb);
1287
1288 if (++done >= max_done)
1289 break;
1290 }
1291
1292 ndev->stats.rx_packets += done;
1293
1294 /* FIXME: do smth to minimize pci accesses */
1295 WRITE_REG(priv, f->m.reg_RPTR, f->m.rptr & TXF_WPTR_WR_PTR);
1296
1297 bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0);
1298
1299 RET(done);
1300}
1301
1302/*************************************************************************
1303 * Debug / Temprorary Code *
1304 *************************************************************************/
1305static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len,
1306 u16 rxd_vlan)
1307{
1308 DBG("ERROR: rxdd bc %d rxfq %d to %d type %d err %d rxp %d pkt_id %d vtag %d len %d vlan_id %d cfi %d prio %d va_lo %d va_hi %d\n",
1309 GET_RXD_BC(rxd_val1), GET_RXD_RXFQ(rxd_val1), GET_RXD_TO(rxd_val1),
1310 GET_RXD_TYPE(rxd_val1), GET_RXD_ERR(rxd_val1),
1311 GET_RXD_RXP(rxd_val1), GET_RXD_PKT_ID(rxd_val1),
1312 GET_RXD_VTAG(rxd_val1), len, GET_RXD_VLAN_ID(rxd_vlan),
1313 GET_RXD_CFI(rxd_vlan), GET_RXD_PRIO(rxd_vlan), rxdd->va_lo,
1314 rxdd->va_hi);
1315}
1316
1317static void print_rxfd(struct rxf_desc *rxfd)
1318{
1319 DBG("=== RxF desc CHIP ORDER/ENDIANESS =============\n"
1320 "info 0x%x va_lo %u pa_lo 0x%x pa_hi 0x%x len 0x%x\n",
1321 rxfd->info, rxfd->va_lo, rxfd->pa_lo, rxfd->pa_hi, rxfd->len);
1322}
1323
1324/*
1325 * TX HW/SW interaction overview
1326 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1327 * There are 2 types of TX communication channels between driver and NIC.
1328 * 1) TX Free Fifo - TXF - holds ack descriptors for sent packets
1329 * 2) TX Data Fifo - TXD - holds descriptors of full buffers.
1330 *
1331 * Currently NIC supports TSO, checksuming and gather DMA
1332 * UFO and IP fragmentation is on the way
1333 *
1334 * RX SW Data Structures
1335 * ~~~~~~~~~~~~~~~~~~~~~
1336 * txdb - used to keep track of all skbs owned by SW and their dma addresses.
1337 * For TX case, ownership lasts from geting packet via hard_xmit and until HW
1338 * acknowledges sent by TXF descriptors.
1339 * Implemented as cyclic buffer.
1340 * fifo - keeps info about fifo's size and location, relevant HW registers,
1341 * usage and skb db. Each RXD and RXF Fifo has its own fifo structure.
1342 * Implemented as simple struct.
1343 *
1344 * TX SW Execution Flow
1345 * ~~~~~~~~~~~~~~~~~~~~
1346 * OS calls driver's hard_xmit method with packet to sent.
1347 * Driver creates DMA mappings, builds TXD descriptors and kicks HW
1348 * by updating TXD WPTR.
1349 * When packet is sent, HW write us TXF descriptor and SW frees original skb.
1350 * To prevent TXD fifo overflow without reading HW registers every time,
1351 * SW deploys "tx level" technique.
1352 * Upon strart up, tx level is initialized to TXD fifo length.
1353 * For every sent packet, SW gets its TXD descriptor sizei
1354 * (from precalculated array) and substructs it from tx level.
1355 * The size is also stored in txdb. When TXF ack arrives, SW fetch size of
1356 * original TXD descriptor from txdb and adds it to tx level.
1357 * When Tx level drops under some predefined treshhold, the driver
1358 * stops the TX queue. When TX level rises above that level,
1359 * the tx queue is enabled again.
1360 *
1361 * This technique avoids eccessive reading of RPTR and WPTR registers.
1362 * As our benchmarks shows, it adds 1.5 Gbit/sec to NIS's throuput.
1363 */
1364
1365/*************************************************************************
1366 * Tx DB *
1367 *************************************************************************/
1368static inline int bdx_tx_db_size(struct txdb *db)
1369{
1370 int taken = db->wptr - db->rptr;
1371 if (taken < 0)
1372 taken = db->size + 1 + taken; /* (size + 1) equals memsz */
1373
1374 return db->size - taken;
1375}
1376
1377/* __bdx_tx_ptr_next - helper function, increment read/write pointer + wrap
1378 * @d - tx data base
1379 * @ptr - read or write pointer
1380 */
1381static inline void __bdx_tx_db_ptr_next(struct txdb *db, struct tx_map **pptr)
1382{
1383 BDX_ASSERT(db == NULL || pptr == NULL); /* sanity */
1384
1385 BDX_ASSERT(*pptr != db->rptr && /* expect either read */
1386 *pptr != db->wptr); /* or write pointer */
1387
1388 BDX_ASSERT(*pptr < db->start || /* pointer has to be */
1389 *pptr >= db->end); /* in range */
1390
1391 ++*pptr;
1392 if (unlikely(*pptr == db->end))
1393 *pptr = db->start;
1394}
1395
1396/* bdx_tx_db_inc_rptr - increment read pointer
1397 * @d - tx data base
1398 */
1399static inline void bdx_tx_db_inc_rptr(struct txdb *db)
1400{
1401 BDX_ASSERT(db->rptr == db->wptr); /* can't read from empty db */
1402 __bdx_tx_db_ptr_next(db, &db->rptr);
1403}
1404
1405/* bdx_tx_db_inc_rptr - increment write pointer
1406 * @d - tx data base
1407 */
1408static inline void bdx_tx_db_inc_wptr(struct txdb *db)
1409{
1410 __bdx_tx_db_ptr_next(db, &db->wptr);
1411 BDX_ASSERT(db->rptr == db->wptr); /* we can not get empty db as
1412 a result of write */
1413}
1414
1415/* bdx_tx_db_init - creates and initializes tx db
1416 * @d - tx data base
1417 * @sz_type - size of tx fifo
1418 * Returns 0 on success, error code otherwise
1419 */
1420static int bdx_tx_db_init(struct txdb *d, int sz_type)
1421{
1422 int memsz = FIFO_SIZE * (1 << (sz_type + 1));
1423
1424 d->start = vmalloc(memsz);
1425 if (!d->start)
1426 return -ENOMEM;
1427
1428 /*
1429 * In order to differentiate between db is empty and db is full
1430 * states at least one element should always be empty in order to
1431 * avoid rptr == wptr which means db is empty
1432 */
1433 d->size = memsz / sizeof(struct tx_map) - 1;
1434 d->end = d->start + d->size + 1; /* just after last element */
1435
1436 /* all dbs are created equally empty */
1437 d->rptr = d->start;
1438 d->wptr = d->start;
1439
1440 return 0;
1441}
1442
1443/* bdx_tx_db_close - closes tx db and frees all memory
1444 * @d - tx data base
1445 */
1446static void bdx_tx_db_close(struct txdb *d)
1447{
1448 BDX_ASSERT(d == NULL);
1449
1450 vfree(d->start);
1451 d->start = NULL;
1452}
1453
1454/*************************************************************************
1455 * Tx Engine *
1456 *************************************************************************/
1457
1458/* sizes of tx desc (including padding if needed) as function
1459 * of skb's frag number */
1460static struct {
1461 u16 bytes;
1462 u16 qwords; /* qword = 64 bit */
1463} txd_sizes[MAX_SKB_FRAGS + 1];
1464
1465/* txdb_map_skb - creates and stores dma mappings for skb's data blocks
1466 * @priv - NIC private structure
1467 * @skb - socket buffer to map
1468 *
1469 * It makes dma mappings for skb's data blocks and writes them to PBL of
1470 * new tx descriptor. It also stores them in the tx db, so they could be
1471 * unmaped after data was sent. It is reponsibility of a caller to make
1472 * sure that there is enough space in the tx db. Last element holds pointer
1473 * to skb itself and marked with zero length
1474 */
1475static inline void
1476bdx_tx_map_skb(struct bdx_priv *priv, struct sk_buff *skb,
1477 struct txd_desc *txdd)
1478{
1479 struct txdb *db = &priv->txdb;
1480 struct pbl *pbl = &txdd->pbl[0];
1481 int nr_frags = skb_shinfo(skb)->nr_frags;
1482 int i;
1483
1484 db->wptr->len = skb_headlen(skb);
1485 db->wptr->addr.dma = pci_map_single(priv->pdev, skb->data,
1486 db->wptr->len, PCI_DMA_TODEVICE);
1487 pbl->len = CPU_CHIP_SWAP32(db->wptr->len);
1488 pbl->pa_lo = CPU_CHIP_SWAP32(L32_64(db->wptr->addr.dma));
1489 pbl->pa_hi = CPU_CHIP_SWAP32(H32_64(db->wptr->addr.dma));
1490 DBG("=== pbl len: 0x%x ================\n", pbl->len);
1491 DBG("=== pbl pa_lo: 0x%x ================\n", pbl->pa_lo);
1492 DBG("=== pbl pa_hi: 0x%x ================\n", pbl->pa_hi);
1493 bdx_tx_db_inc_wptr(db);
1494
1495 for (i = 0; i < nr_frags; i++) {
1496 struct skb_frag_struct *frag;
1497
1498 frag = &skb_shinfo(skb)->frags[i];
1499 db->wptr->len = frag->size;
1500 db->wptr->addr.dma =
1501 pci_map_page(priv->pdev, frag->page, frag->page_offset,
1502 frag->size, PCI_DMA_TODEVICE);
1503
1504 pbl++;
1505 pbl->len = CPU_CHIP_SWAP32(db->wptr->len);
1506 pbl->pa_lo = CPU_CHIP_SWAP32(L32_64(db->wptr->addr.dma));
1507 pbl->pa_hi = CPU_CHIP_SWAP32(H32_64(db->wptr->addr.dma));
1508 bdx_tx_db_inc_wptr(db);
1509 }
1510
1511 /* add skb clean up info. */
1512 db->wptr->len = -txd_sizes[nr_frags].bytes;
1513 db->wptr->addr.skb = skb;
1514 bdx_tx_db_inc_wptr(db);
1515}
1516
1517/* init_txd_sizes - precalculate sizes of descriptors for skbs up to 16 frags
1518 * number of frags is used as index to fetch correct descriptors size,
1519 * instead of calculating it each time */
1520static void __init init_txd_sizes(void)
1521{
1522 int i, lwords;
1523
1524 /* 7 - is number of lwords in txd with one phys buffer
1525 * 3 - is number of lwords used for every additional phys buffer */
1526 for (i = 0; i < MAX_SKB_FRAGS + 1; i++) {
1527 lwords = 7 + (i * 3);
1528 if (lwords & 1)
1529 lwords++; /* pad it with 1 lword */
1530 txd_sizes[i].qwords = lwords >> 1;
1531 txd_sizes[i].bytes = lwords << 2;
1532 }
1533}
1534
1535/* bdx_tx_init - initialize all Tx related stuff.
1536 * Namely, TXD and TXF fifos, database etc */
1537static int bdx_tx_init(struct bdx_priv *priv)
1538{
1539 if (bdx_fifo_init(priv, &priv->txd_fifo0.m, priv->txd_size,
1540 regTXD_CFG0_0,
1541 regTXD_CFG1_0, regTXD_RPTR_0, regTXD_WPTR_0))
1542 goto err_mem;
1543 if (bdx_fifo_init(priv, &priv->txf_fifo0.m, priv->txf_size,
1544 regTXF_CFG0_0,
1545 regTXF_CFG1_0, regTXF_RPTR_0, regTXF_WPTR_0))
1546 goto err_mem;
1547
1548 /* The TX db has to keep mappings for all packets sent (on TxD)
1549 * and not yet reclaimed (on TxF) */
1550 if (bdx_tx_db_init(&priv->txdb, max(priv->txd_size, priv->txf_size)))
1551 goto err_mem;
1552
1553 priv->tx_level = BDX_MAX_TX_LEVEL;
1554#ifdef BDX_DELAY_WPTR
1555 priv->tx_update_mark = priv->tx_level - 1024;
1556#endif
1557 return 0;
1558
1559err_mem:
1560 netdev_err(priv->ndev, "Tx init failed\n");
1561 return -ENOMEM;
1562}
1563
1564/*
1565 * bdx_tx_space - calculates available space in TX fifo
1566 * @priv - NIC private structure
1567 * Returns available space in TX fifo in bytes
1568 */
1569static inline int bdx_tx_space(struct bdx_priv *priv)
1570{
1571 struct txd_fifo *f = &priv->txd_fifo0;
1572 int fsize;
1573
1574 f->m.rptr = READ_REG(priv, f->m.reg_RPTR) & TXF_WPTR_WR_PTR;
1575 fsize = f->m.rptr - f->m.wptr;
1576 if (fsize <= 0)
1577 fsize = f->m.memsz + fsize;
1578 return fsize;
1579}
1580
1581/* bdx_tx_transmit - send packet to NIC
1582 * @skb - packet to send
1583 * ndev - network device assigned to NIC
1584 * Return codes:
1585 * o NETDEV_TX_OK everything ok.
1586 * o NETDEV_TX_BUSY Cannot transmit packet, try later
1587 * Usually a bug, means queue start/stop flow control is broken in
1588 * the driver. Note: the driver must NOT put the skb in its DMA ring.
1589 * o NETDEV_TX_LOCKED Locking failed, please retry quickly.
1590 */
1591static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
1592 struct net_device *ndev)
1593{
1594 struct bdx_priv *priv = netdev_priv(ndev);
1595 struct txd_fifo *f = &priv->txd_fifo0;
1596 int txd_checksum = 7; /* full checksum */
1597 int txd_lgsnd = 0;
1598 int txd_vlan_id = 0;
1599 int txd_vtag = 0;
1600 int txd_mss = 0;
1601
1602 int nr_frags = skb_shinfo(skb)->nr_frags;
1603 struct txd_desc *txdd;
1604 int len;
1605 unsigned long flags;
1606
1607 ENTER;
1608 local_irq_save(flags);
1609 if (!spin_trylock(&priv->tx_lock)) {
1610 local_irq_restore(flags);
1611 DBG("%s[%s]: TX locked, returning NETDEV_TX_LOCKED\n",
1612 BDX_DRV_NAME, ndev->name);
1613 return NETDEV_TX_LOCKED;
1614 }
1615
1616 /* build tx descriptor */
1617 BDX_ASSERT(f->m.wptr >= f->m.memsz); /* started with valid wptr */
1618 txdd = (struct txd_desc *)(f->m.va + f->m.wptr);
1619 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL))
1620 txd_checksum = 0;
1621
1622 if (skb_shinfo(skb)->gso_size) {
1623 txd_mss = skb_shinfo(skb)->gso_size;
1624 txd_lgsnd = 1;
1625 DBG("skb %p skb len %d gso size = %d\n", skb, skb->len,
1626 txd_mss);
1627 }
1628
1629 if (vlan_tx_tag_present(skb)) {
1630 /*Cut VLAN ID to 12 bits */
1631 txd_vlan_id = vlan_tx_tag_get(skb) & BITS_MASK(12);
1632 txd_vtag = 1;
1633 }
1634
1635 txdd->length = CPU_CHIP_SWAP16(skb->len);
1636 txdd->mss = CPU_CHIP_SWAP16(txd_mss);
1637 txdd->txd_val1 =
1638 CPU_CHIP_SWAP32(TXD_W1_VAL
1639 (txd_sizes[nr_frags].qwords, txd_checksum, txd_vtag,
1640 txd_lgsnd, txd_vlan_id));
1641 DBG("=== TxD desc =====================\n");
1642 DBG("=== w1: 0x%x ================\n", txdd->txd_val1);
1643 DBG("=== w2: mss 0x%x len 0x%x\n", txdd->mss, txdd->length);
1644
1645 bdx_tx_map_skb(priv, skb, txdd);
1646
1647 /* increment TXD write pointer. In case of
1648 fifo wrapping copy reminder of the descriptor
1649 to the beginning */
1650 f->m.wptr += txd_sizes[nr_frags].bytes;
1651 len = f->m.wptr - f->m.memsz;
1652 if (unlikely(len >= 0)) {
1653 f->m.wptr = len;
1654 if (len > 0) {
1655 BDX_ASSERT(len > f->m.memsz);
1656 memcpy(f->m.va, f->m.va + f->m.memsz, len);
1657 }
1658 }
1659 BDX_ASSERT(f->m.wptr >= f->m.memsz); /* finished with valid wptr */
1660
1661 priv->tx_level -= txd_sizes[nr_frags].bytes;
1662 BDX_ASSERT(priv->tx_level <= 0 || priv->tx_level > BDX_MAX_TX_LEVEL);
1663#ifdef BDX_DELAY_WPTR
1664 if (priv->tx_level > priv->tx_update_mark) {
1665 /* Force memory writes to complete before letting h/w
1666 know there are new descriptors to fetch.
1667 (might be needed on platforms like IA64)
1668 wmb(); */
1669 WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
1670 } else {
1671 if (priv->tx_noupd++ > BDX_NO_UPD_PACKETS) {
1672 priv->tx_noupd = 0;
1673 WRITE_REG(priv, f->m.reg_WPTR,
1674 f->m.wptr & TXF_WPTR_WR_PTR);
1675 }
1676 }
1677#else
1678 /* Force memory writes to complete before letting h/w
1679 know there are new descriptors to fetch.
1680 (might be needed on platforms like IA64)
1681 wmb(); */
1682 WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
1683
1684#endif
1685#ifdef BDX_LLTX
1686 ndev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
1687#endif
1688 ndev->stats.tx_packets++;
1689 ndev->stats.tx_bytes += skb->len;
1690
1691 if (priv->tx_level < BDX_MIN_TX_LEVEL) {
1692 DBG("%s: %s: TX Q STOP level %d\n",
1693 BDX_DRV_NAME, ndev->name, priv->tx_level);
1694 netif_stop_queue(ndev);
1695 }
1696
1697 spin_unlock_irqrestore(&priv->tx_lock, flags);
1698 return NETDEV_TX_OK;
1699}
1700
1701/* bdx_tx_cleanup - clean TXF fifo, run in the context of IRQ.
1702 * @priv - bdx adapter
1703 * It scans TXF fifo for descriptors, frees DMA mappings and reports to OS
1704 * that those packets were sent
1705 */
1706static void bdx_tx_cleanup(struct bdx_priv *priv)
1707{
1708 struct txf_fifo *f = &priv->txf_fifo0;
1709 struct txdb *db = &priv->txdb;
1710 int tx_level = 0;
1711
1712 ENTER;
1713 f->m.wptr = READ_REG(priv, f->m.reg_WPTR) & TXF_WPTR_MASK;
1714 BDX_ASSERT(f->m.rptr >= f->m.memsz); /* started with valid rptr */
1715
1716 while (f->m.wptr != f->m.rptr) {
1717 f->m.rptr += BDX_TXF_DESC_SZ;
1718 f->m.rptr &= f->m.size_mask;
1719
1720 /* unmap all the fragments */
1721 /* first has to come tx_maps containing dma */
1722 BDX_ASSERT(db->rptr->len == 0);
1723 do {
1724 BDX_ASSERT(db->rptr->addr.dma == 0);
1725 pci_unmap_page(priv->pdev, db->rptr->addr.dma,
1726 db->rptr->len, PCI_DMA_TODEVICE);
1727 bdx_tx_db_inc_rptr(db);
1728 } while (db->rptr->len > 0);
1729 tx_level -= db->rptr->len; /* '-' koz len is negative */
1730
1731 /* now should come skb pointer - free it */
1732 dev_kfree_skb_irq(db->rptr->addr.skb);
1733 bdx_tx_db_inc_rptr(db);
1734 }
1735
1736 /* let h/w know which TXF descriptors were cleaned */
1737 BDX_ASSERT((f->m.wptr & TXF_WPTR_WR_PTR) >= f->m.memsz);
1738 WRITE_REG(priv, f->m.reg_RPTR, f->m.rptr & TXF_WPTR_WR_PTR);
1739
1740 /* We reclaimed resources, so in case the Q is stopped by xmit callback,
1741 * we resume the transmition and use tx_lock to synchronize with xmit.*/
1742 spin_lock(&priv->tx_lock);
1743 priv->tx_level += tx_level;
1744 BDX_ASSERT(priv->tx_level <= 0 || priv->tx_level > BDX_MAX_TX_LEVEL);
1745#ifdef BDX_DELAY_WPTR
1746 if (priv->tx_noupd) {
1747 priv->tx_noupd = 0;
1748 WRITE_REG(priv, priv->txd_fifo0.m.reg_WPTR,
1749 priv->txd_fifo0.m.wptr & TXF_WPTR_WR_PTR);
1750 }
1751#endif
1752
1753 if (unlikely(netif_queue_stopped(priv->ndev) &&
1754 netif_carrier_ok(priv->ndev) &&
1755 (priv->tx_level >= BDX_MIN_TX_LEVEL))) {
1756 DBG("%s: %s: TX Q WAKE level %d\n",
1757 BDX_DRV_NAME, priv->ndev->name, priv->tx_level);
1758 netif_wake_queue(priv->ndev);
1759 }
1760 spin_unlock(&priv->tx_lock);
1761}
1762
1763/* bdx_tx_free_skbs - frees all skbs from TXD fifo.
1764 * It gets called when OS stops this dev, eg upon "ifconfig down" or rmmod
1765 */
1766static void bdx_tx_free_skbs(struct bdx_priv *priv)
1767{
1768 struct txdb *db = &priv->txdb;
1769
1770 ENTER;
1771 while (db->rptr != db->wptr) {
1772 if (likely(db->rptr->len))
1773 pci_unmap_page(priv->pdev, db->rptr->addr.dma,
1774 db->rptr->len, PCI_DMA_TODEVICE);
1775 else
1776 dev_kfree_skb(db->rptr->addr.skb);
1777 bdx_tx_db_inc_rptr(db);
1778 }
1779 RET();
1780}
1781
1782/* bdx_tx_free - frees all Tx resources */
1783static void bdx_tx_free(struct bdx_priv *priv)
1784{
1785 ENTER;
1786 bdx_tx_free_skbs(priv);
1787 bdx_fifo_free(priv, &priv->txd_fifo0.m);
1788 bdx_fifo_free(priv, &priv->txf_fifo0.m);
1789 bdx_tx_db_close(&priv->txdb);
1790}
1791
1792/* bdx_tx_push_desc - push descriptor to TxD fifo
1793 * @priv - NIC private structure
1794 * @data - desc's data
1795 * @size - desc's size
1796 *
1797 * Pushes desc to TxD fifo and overlaps it if needed.
1798 * NOTE: this func does not check for available space. this is responsibility
1799 * of the caller. Neither does it check that data size is smaller than
1800 * fifo size.
1801 */
1802static void bdx_tx_push_desc(struct bdx_priv *priv, void *data, int size)
1803{
1804 struct txd_fifo *f = &priv->txd_fifo0;
1805 int i = f->m.memsz - f->m.wptr;
1806
1807 if (size == 0)
1808 return;
1809
1810 if (i > size) {
1811 memcpy(f->m.va + f->m.wptr, data, size);
1812 f->m.wptr += size;
1813 } else {
1814 memcpy(f->m.va + f->m.wptr, data, i);
1815 f->m.wptr = size - i;
1816 memcpy(f->m.va, data + i, f->m.wptr);
1817 }
1818 WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
1819}
1820
1821/* bdx_tx_push_desc_safe - push descriptor to TxD fifo in a safe way
1822 * @priv - NIC private structure
1823 * @data - desc's data
1824 * @size - desc's size
1825 *
1826 * NOTE: this func does check for available space and, if necessary, waits for
1827 * NIC to read existing data before writing new one.
1828 */
1829static void bdx_tx_push_desc_safe(struct bdx_priv *priv, void *data, int size)
1830{
1831 int timer = 0;
1832 ENTER;
1833
1834 while (size > 0) {
1835 /* we substruct 8 because when fifo is full rptr == wptr
1836 which also means that fifo is empty, we can understand
1837 the difference, but could hw do the same ??? :) */
1838 int avail = bdx_tx_space(priv) - 8;
1839 if (avail <= 0) {
1840 if (timer++ > 300) { /* prevent endless loop */
1841 DBG("timeout while writing desc to TxD fifo\n");
1842 break;
1843 }
1844 udelay(50); /* give hw a chance to clean fifo */
1845 continue;
1846 }
1847 avail = min(avail, size);
1848 DBG("about to push %d bytes starting %p size %d\n", avail,
1849 data, size);
1850 bdx_tx_push_desc(priv, data, avail);
1851 size -= avail;
1852 data += avail;
1853 }
1854 RET();
1855}
1856
1857static const struct net_device_ops bdx_netdev_ops = {
1858 .ndo_open = bdx_open,
1859 .ndo_stop = bdx_close,
1860 .ndo_start_xmit = bdx_tx_transmit,
1861 .ndo_validate_addr = eth_validate_addr,
1862 .ndo_do_ioctl = bdx_ioctl,
1863 .ndo_set_multicast_list = bdx_setmulti,
1864 .ndo_change_mtu = bdx_change_mtu,
1865 .ndo_set_mac_address = bdx_set_mac,
1866 .ndo_vlan_rx_add_vid = bdx_vlan_rx_add_vid,
1867 .ndo_vlan_rx_kill_vid = bdx_vlan_rx_kill_vid,
1868};
1869
1870/**
1871 * bdx_probe - Device Initialization Routine
1872 * @pdev: PCI device information struct
1873 * @ent: entry in bdx_pci_tbl
1874 *
1875 * Returns 0 on success, negative on failure
1876 *
1877 * bdx_probe initializes an adapter identified by a pci_dev structure.
1878 * The OS initialization, configuring of the adapter private structure,
1879 * and a hardware reset occur.
1880 *
1881 * functions and their order used as explained in
1882 * /usr/src/linux/Documentation/DMA-{API,mapping}.txt
1883 *
1884 */
1885
1886/* TBD: netif_msg should be checked and implemented. I disable it for now */
1887static int __devinit
1888bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1889{
1890 struct net_device *ndev;
1891 struct bdx_priv *priv;
1892 int err, pci_using_dac, port;
1893 unsigned long pciaddr;
1894 u32 regionSize;
1895 struct pci_nic *nic;
1896
1897 ENTER;
1898
1899 nic = vmalloc(sizeof(*nic));
1900 if (!nic)
1901 RET(-ENOMEM);
1902
1903 /************** pci *****************/
1904 err = pci_enable_device(pdev);
1905 if (err) /* it triggers interrupt, dunno why. */
1906 goto err_pci; /* it's not a problem though */
1907
1908 if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) &&
1909 !(err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))) {
1910 pci_using_dac = 1;
1911 } else {
1912 if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) ||
1913 (err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))) {
1914 pr_err("No usable DMA configuration, aborting\n");
1915 goto err_dma;
1916 }
1917 pci_using_dac = 0;
1918 }
1919
1920 err = pci_request_regions(pdev, BDX_DRV_NAME);
1921 if (err)
1922 goto err_dma;
1923
1924 pci_set_master(pdev);
1925
1926 pciaddr = pci_resource_start(pdev, 0);
1927 if (!pciaddr) {
1928 err = -EIO;
1929 pr_err("no MMIO resource\n");
1930 goto err_out_res;
1931 }
1932 regionSize = pci_resource_len(pdev, 0);
1933 if (regionSize < BDX_REGS_SIZE) {
1934 err = -EIO;
1935 pr_err("MMIO resource (%x) too small\n", regionSize);
1936 goto err_out_res;
1937 }
1938
1939 nic->regs = ioremap(pciaddr, regionSize);
1940 if (!nic->regs) {
1941 err = -EIO;
1942 pr_err("ioremap failed\n");
1943 goto err_out_res;
1944 }
1945
1946 if (pdev->irq < 2) {
1947 err = -EIO;
1948 pr_err("invalid irq (%d)\n", pdev->irq);
1949 goto err_out_iomap;
1950 }
1951 pci_set_drvdata(pdev, nic);
1952
1953 if (pdev->device == 0x3014)
1954 nic->port_num = 2;
1955 else
1956 nic->port_num = 1;
1957
1958 print_hw_id(pdev);
1959
1960 bdx_hw_reset_direct(nic->regs);
1961
1962 nic->irq_type = IRQ_INTX;
1963#ifdef BDX_MSI
1964 if ((readl(nic->regs + FPGA_VER) & 0xFFF) >= 378) {
1965 err = pci_enable_msi(pdev);
1966 if (err)
1967 pr_err("Can't eneble msi. error is %d\n", err);
1968 else
1969 nic->irq_type = IRQ_MSI;
1970 } else
1971 DBG("HW does not support MSI\n");
1972#endif
1973
1974 /************** netdev **************/
1975 for (port = 0; port < nic->port_num; port++) {
1976 ndev = alloc_etherdev(sizeof(struct bdx_priv));
1977 if (!ndev) {
1978 err = -ENOMEM;
1979 pr_err("alloc_etherdev failed\n");
1980 goto err_out_iomap;
1981 }
1982
1983 ndev->netdev_ops = &bdx_netdev_ops;
1984 ndev->tx_queue_len = BDX_NDEV_TXQ_LEN;
1985
1986 bdx_set_ethtool_ops(ndev); /* ethtool interface */
1987
1988 /* these fields are used for info purposes only
1989 * so we can have them same for all ports of the board */
1990 ndev->if_port = port;
1991 ndev->base_addr = pciaddr;
1992 ndev->mem_start = pciaddr;
1993 ndev->mem_end = pciaddr + regionSize;
1994 ndev->irq = pdev->irq;
1995 ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO
1996 | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
1997 NETIF_F_HW_VLAN_FILTER | NETIF_F_RXCSUM
1998 /*| NETIF_F_FRAGLIST */
1999 ;
2000 ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
2001 NETIF_F_TSO | NETIF_F_HW_VLAN_TX;
2002
2003 if (pci_using_dac)
2004 ndev->features |= NETIF_F_HIGHDMA;
2005
2006 /************** priv ****************/
2007 priv = nic->priv[port] = netdev_priv(ndev);
2008
2009 priv->pBdxRegs = nic->regs + port * 0x8000;
2010 priv->port = port;
2011 priv->pdev = pdev;
2012 priv->ndev = ndev;
2013 priv->nic = nic;
2014 priv->msg_enable = BDX_DEF_MSG_ENABLE;
2015
2016 netif_napi_add(ndev, &priv->napi, bdx_poll, 64);
2017
2018 if ((readl(nic->regs + FPGA_VER) & 0xFFF) == 308) {
2019 DBG("HW statistics not supported\n");
2020 priv->stats_flag = 0;
2021 } else {
2022 priv->stats_flag = 1;
2023 }
2024
2025 /* Initialize fifo sizes. */
2026 priv->txd_size = 2;
2027 priv->txf_size = 2;
2028 priv->rxd_size = 2;
2029 priv->rxf_size = 3;
2030
2031 /* Initialize the initial coalescing registers. */
2032 priv->rdintcm = INT_REG_VAL(0x20, 1, 4, 12);
2033 priv->tdintcm = INT_REG_VAL(0x20, 1, 0, 12);
2034
2035 /* ndev->xmit_lock spinlock is not used.
2036 * Private priv->tx_lock is used for synchronization
2037 * between transmit and TX irq cleanup. In addition
2038 * set multicast list callback has to use priv->tx_lock.
2039 */
2040#ifdef BDX_LLTX
2041 ndev->features |= NETIF_F_LLTX;
2042#endif
2043 spin_lock_init(&priv->tx_lock);
2044
2045 /*bdx_hw_reset(priv); */
2046 if (bdx_read_mac(priv)) {
2047 pr_err("load MAC address failed\n");
2048 goto err_out_iomap;
2049 }
2050 SET_NETDEV_DEV(ndev, &pdev->dev);
2051 err = register_netdev(ndev);
2052 if (err) {
2053 pr_err("register_netdev failed\n");
2054 goto err_out_free;
2055 }
2056 netif_carrier_off(ndev);
2057 netif_stop_queue(ndev);
2058
2059 print_eth_id(ndev);
2060 }
2061 RET(0);
2062
2063err_out_free:
2064 free_netdev(ndev);
2065err_out_iomap:
2066 iounmap(nic->regs);
2067err_out_res:
2068 pci_release_regions(pdev);
2069err_dma:
2070 pci_disable_device(pdev);
2071err_pci:
2072 vfree(nic);
2073
2074 RET(err);
2075}
2076
2077/****************** Ethtool interface *********************/
2078/* get strings for statistics counters */
2079static const char
2080 bdx_stat_names[][ETH_GSTRING_LEN] = {
2081 "InUCast", /* 0x7200 */
2082 "InMCast", /* 0x7210 */
2083 "InBCast", /* 0x7220 */
2084 "InPkts", /* 0x7230 */
2085 "InErrors", /* 0x7240 */
2086 "InDropped", /* 0x7250 */
2087 "FrameTooLong", /* 0x7260 */
2088 "FrameSequenceErrors", /* 0x7270 */
2089 "InVLAN", /* 0x7280 */
2090 "InDroppedDFE", /* 0x7290 */
2091 "InDroppedIntFull", /* 0x72A0 */
2092 "InFrameAlignErrors", /* 0x72B0 */
2093
2094 /* 0x72C0-0x72E0 RSRV */
2095
2096 "OutUCast", /* 0x72F0 */
2097 "OutMCast", /* 0x7300 */
2098 "OutBCast", /* 0x7310 */
2099 "OutPkts", /* 0x7320 */
2100
2101 /* 0x7330-0x7360 RSRV */
2102
2103 "OutVLAN", /* 0x7370 */
2104 "InUCastOctects", /* 0x7380 */
2105 "OutUCastOctects", /* 0x7390 */
2106
2107 /* 0x73A0-0x73B0 RSRV */
2108
2109 "InBCastOctects", /* 0x73C0 */
2110 "OutBCastOctects", /* 0x73D0 */
2111 "InOctects", /* 0x73E0 */
2112 "OutOctects", /* 0x73F0 */
2113};
2114
2115/*
2116 * bdx_get_settings - get device-specific settings
2117 * @netdev
2118 * @ecmd
2119 */
2120static int bdx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
2121{
2122 u32 rdintcm;
2123 u32 tdintcm;
2124 struct bdx_priv *priv = netdev_priv(netdev);
2125
2126 rdintcm = priv->rdintcm;
2127 tdintcm = priv->tdintcm;
2128
2129 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
2130 ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
2131 ethtool_cmd_speed_set(ecmd, SPEED_10000);
2132 ecmd->duplex = DUPLEX_FULL;
2133 ecmd->port = PORT_FIBRE;
2134 ecmd->transceiver = XCVR_EXTERNAL; /* what does it mean? */
2135 ecmd->autoneg = AUTONEG_DISABLE;
2136
2137 /* PCK_TH measures in multiples of FIFO bytes
2138 We translate to packets */
2139 ecmd->maxtxpkt =
2140 ((GET_PCK_TH(tdintcm) * PCK_TH_MULT) / BDX_TXF_DESC_SZ);
2141 ecmd->maxrxpkt =
2142 ((GET_PCK_TH(rdintcm) * PCK_TH_MULT) / sizeof(struct rxf_desc));
2143
2144 return 0;
2145}
2146
2147/*
2148 * bdx_get_drvinfo - report driver information
2149 * @netdev
2150 * @drvinfo
2151 */
2152static void
2153bdx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
2154{
2155 struct bdx_priv *priv = netdev_priv(netdev);
2156
2157 strlcat(drvinfo->driver, BDX_DRV_NAME, sizeof(drvinfo->driver));
2158 strlcat(drvinfo->version, BDX_DRV_VERSION, sizeof(drvinfo->version));
2159 strlcat(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
2160 strlcat(drvinfo->bus_info, pci_name(priv->pdev),
2161 sizeof(drvinfo->bus_info));
2162
2163 drvinfo->n_stats = ((priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names) : 0);
2164 drvinfo->testinfo_len = 0;
2165 drvinfo->regdump_len = 0;
2166 drvinfo->eedump_len = 0;
2167}
2168
2169/*
2170 * bdx_get_coalesce - get interrupt coalescing parameters
2171 * @netdev
2172 * @ecoal
2173 */
2174static int
2175bdx_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecoal)
2176{
2177 u32 rdintcm;
2178 u32 tdintcm;
2179 struct bdx_priv *priv = netdev_priv(netdev);
2180
2181 rdintcm = priv->rdintcm;
2182 tdintcm = priv->tdintcm;
2183
2184 /* PCK_TH measures in multiples of FIFO bytes
2185 We translate to packets */
2186 ecoal->rx_coalesce_usecs = GET_INT_COAL(rdintcm) * INT_COAL_MULT;
2187 ecoal->rx_max_coalesced_frames =
2188 ((GET_PCK_TH(rdintcm) * PCK_TH_MULT) / sizeof(struct rxf_desc));
2189
2190 ecoal->tx_coalesce_usecs = GET_INT_COAL(tdintcm) * INT_COAL_MULT;
2191 ecoal->tx_max_coalesced_frames =
2192 ((GET_PCK_TH(tdintcm) * PCK_TH_MULT) / BDX_TXF_DESC_SZ);
2193
2194 /* adaptive parameters ignored */
2195 return 0;
2196}
2197
2198/*
2199 * bdx_set_coalesce - set interrupt coalescing parameters
2200 * @netdev
2201 * @ecoal
2202 */
2203static int
2204bdx_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecoal)
2205{
2206 u32 rdintcm;
2207 u32 tdintcm;
2208 struct bdx_priv *priv = netdev_priv(netdev);
2209 int rx_coal;
2210 int tx_coal;
2211 int rx_max_coal;
2212 int tx_max_coal;
2213
2214 /* Check for valid input */
2215 rx_coal = ecoal->rx_coalesce_usecs / INT_COAL_MULT;
2216 tx_coal = ecoal->tx_coalesce_usecs / INT_COAL_MULT;
2217 rx_max_coal = ecoal->rx_max_coalesced_frames;
2218 tx_max_coal = ecoal->tx_max_coalesced_frames;
2219
2220 /* Translate from packets to multiples of FIFO bytes */
2221 rx_max_coal =
2222 (((rx_max_coal * sizeof(struct rxf_desc)) + PCK_TH_MULT - 1)
2223 / PCK_TH_MULT);
2224 tx_max_coal =
2225 (((tx_max_coal * BDX_TXF_DESC_SZ) + PCK_TH_MULT - 1)
2226 / PCK_TH_MULT);
2227
2228 if ((rx_coal > 0x7FFF) || (tx_coal > 0x7FFF) ||
2229 (rx_max_coal > 0xF) || (tx_max_coal > 0xF))
2230 return -EINVAL;
2231
2232 rdintcm = INT_REG_VAL(rx_coal, GET_INT_COAL_RC(priv->rdintcm),
2233 GET_RXF_TH(priv->rdintcm), rx_max_coal);
2234 tdintcm = INT_REG_VAL(tx_coal, GET_INT_COAL_RC(priv->tdintcm), 0,
2235 tx_max_coal);
2236
2237 priv->rdintcm = rdintcm;
2238 priv->tdintcm = tdintcm;
2239
2240 WRITE_REG(priv, regRDINTCM0, rdintcm);
2241 WRITE_REG(priv, regTDINTCM0, tdintcm);
2242
2243 return 0;
2244}
2245
2246/* Convert RX fifo size to number of pending packets */
2247static inline int bdx_rx_fifo_size_to_packets(int rx_size)
2248{
2249 return (FIFO_SIZE * (1 << rx_size)) / sizeof(struct rxf_desc);
2250}
2251
2252/* Convert TX fifo size to number of pending packets */
2253static inline int bdx_tx_fifo_size_to_packets(int tx_size)
2254{
2255 return (FIFO_SIZE * (1 << tx_size)) / BDX_TXF_DESC_SZ;
2256}
2257
2258/*
2259 * bdx_get_ringparam - report ring sizes
2260 * @netdev
2261 * @ring
2262 */
2263static void
2264bdx_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
2265{
2266 struct bdx_priv *priv = netdev_priv(netdev);
2267
2268 /*max_pending - the maximum-sized FIFO we allow */
2269 ring->rx_max_pending = bdx_rx_fifo_size_to_packets(3);
2270 ring->tx_max_pending = bdx_tx_fifo_size_to_packets(3);
2271 ring->rx_pending = bdx_rx_fifo_size_to_packets(priv->rxf_size);
2272 ring->tx_pending = bdx_tx_fifo_size_to_packets(priv->txd_size);
2273}
2274
2275/*
2276 * bdx_set_ringparam - set ring sizes
2277 * @netdev
2278 * @ring
2279 */
2280static int
2281bdx_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
2282{
2283 struct bdx_priv *priv = netdev_priv(netdev);
2284 int rx_size = 0;
2285 int tx_size = 0;
2286
2287 for (; rx_size < 4; rx_size++) {
2288 if (bdx_rx_fifo_size_to_packets(rx_size) >= ring->rx_pending)
2289 break;
2290 }
2291 if (rx_size == 4)
2292 rx_size = 3;
2293
2294 for (; tx_size < 4; tx_size++) {
2295 if (bdx_tx_fifo_size_to_packets(tx_size) >= ring->tx_pending)
2296 break;
2297 }
2298 if (tx_size == 4)
2299 tx_size = 3;
2300
2301 /*Is there anything to do? */
2302 if ((rx_size == priv->rxf_size) &&
2303 (tx_size == priv->txd_size))
2304 return 0;
2305
2306 priv->rxf_size = rx_size;
2307 if (rx_size > 1)
2308 priv->rxd_size = rx_size - 1;
2309 else
2310 priv->rxd_size = rx_size;
2311
2312 priv->txf_size = priv->txd_size = tx_size;
2313
2314 if (netif_running(netdev)) {
2315 bdx_close(netdev);
2316 bdx_open(netdev);
2317 }
2318 return 0;
2319}
2320
2321/*
2322 * bdx_get_strings - return a set of strings that describe the requested objects
2323 * @netdev
2324 * @data
2325 */
2326static void bdx_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2327{
2328 switch (stringset) {
2329 case ETH_SS_STATS:
2330 memcpy(data, *bdx_stat_names, sizeof(bdx_stat_names));
2331 break;
2332 }
2333}
2334
2335/*
2336 * bdx_get_sset_count - return number of statistics or tests
2337 * @netdev
2338 */
2339static int bdx_get_sset_count(struct net_device *netdev, int stringset)
2340{
2341 struct bdx_priv *priv = netdev_priv(netdev);
2342
2343 switch (stringset) {
2344 case ETH_SS_STATS:
2345 BDX_ASSERT(ARRAY_SIZE(bdx_stat_names)
2346 != sizeof(struct bdx_stats) / sizeof(u64));
2347 return (priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names) : 0;
2348 }
2349
2350 return -EINVAL;
2351}
2352
2353/*
2354 * bdx_get_ethtool_stats - return device's hardware L2 statistics
2355 * @netdev
2356 * @stats
2357 * @data
2358 */
2359static void bdx_get_ethtool_stats(struct net_device *netdev,
2360 struct ethtool_stats *stats, u64 *data)
2361{
2362 struct bdx_priv *priv = netdev_priv(netdev);
2363
2364 if (priv->stats_flag) {
2365
2366 /* Update stats from HW */
2367 bdx_update_stats(priv);
2368
2369 /* Copy data to user buffer */
2370 memcpy(data, &priv->hw_stats, sizeof(priv->hw_stats));
2371 }
2372}
2373
2374/*
2375 * bdx_set_ethtool_ops - ethtool interface implementation
2376 * @netdev
2377 */
2378static void bdx_set_ethtool_ops(struct net_device *netdev)
2379{
2380 static const struct ethtool_ops bdx_ethtool_ops = {
2381 .get_settings = bdx_get_settings,
2382 .get_drvinfo = bdx_get_drvinfo,
2383 .get_link = ethtool_op_get_link,
2384 .get_coalesce = bdx_get_coalesce,
2385 .set_coalesce = bdx_set_coalesce,
2386 .get_ringparam = bdx_get_ringparam,
2387 .set_ringparam = bdx_set_ringparam,
2388 .get_strings = bdx_get_strings,
2389 .get_sset_count = bdx_get_sset_count,
2390 .get_ethtool_stats = bdx_get_ethtool_stats,
2391 };
2392
2393 SET_ETHTOOL_OPS(netdev, &bdx_ethtool_ops);
2394}
2395
2396/**
2397 * bdx_remove - Device Removal Routine
2398 * @pdev: PCI device information struct
2399 *
2400 * bdx_remove is called by the PCI subsystem to alert the driver
2401 * that it should release a PCI device. The could be caused by a
2402 * Hot-Plug event, or because the driver is going to be removed from
2403 * memory.
2404 **/
2405static void __devexit bdx_remove(struct pci_dev *pdev)
2406{
2407 struct pci_nic *nic = pci_get_drvdata(pdev);
2408 struct net_device *ndev;
2409 int port;
2410
2411 for (port = 0; port < nic->port_num; port++) {
2412 ndev = nic->priv[port]->ndev;
2413 unregister_netdev(ndev);
2414 free_netdev(ndev);
2415 }
2416
2417 /*bdx_hw_reset_direct(nic->regs); */
2418#ifdef BDX_MSI
2419 if (nic->irq_type == IRQ_MSI)
2420 pci_disable_msi(pdev);
2421#endif
2422
2423 iounmap(nic->regs);
2424 pci_release_regions(pdev);
2425 pci_disable_device(pdev);
2426 pci_set_drvdata(pdev, NULL);
2427 vfree(nic);
2428
2429 RET();
2430}
2431
2432static struct pci_driver bdx_pci_driver = {
2433 .name = BDX_DRV_NAME,
2434 .id_table = bdx_pci_tbl,
2435 .probe = bdx_probe,
2436 .remove = __devexit_p(bdx_remove),
2437};
2438
2439/*
2440 * print_driver_id - print parameters of the driver build
2441 */
2442static void __init print_driver_id(void)
2443{
2444 pr_info("%s, %s\n", BDX_DRV_DESC, BDX_DRV_VERSION);
2445 pr_info("Options: hw_csum %s\n", BDX_MSI_STRING);
2446}
2447
2448static int __init bdx_module_init(void)
2449{
2450 ENTER;
2451 init_txd_sizes();
2452 print_driver_id();
2453 RET(pci_register_driver(&bdx_pci_driver));
2454}
2455
2456module_init(bdx_module_init);
2457
2458static void __exit bdx_module_exit(void)
2459{
2460 ENTER;
2461 pci_unregister_driver(&bdx_pci_driver);
2462 RET();
2463}
2464
2465module_exit(bdx_module_exit);
2466
2467MODULE_LICENSE("GPL");
2468MODULE_AUTHOR(DRIVER_AUTHOR);
2469MODULE_DESCRIPTION(BDX_DRV_DESC);
2470MODULE_FIRMWARE("tehuti/bdx.bin");
diff --git a/drivers/net/ethernet/tehuti/tehuti.h b/drivers/net/ethernet/tehuti/tehuti.h
new file mode 100644
index 000000000000..709ebd6e28b4
--- /dev/null
+++ b/drivers/net/ethernet/tehuti/tehuti.h
@@ -0,0 +1,561 @@
1/*
2 * Tehuti Networks(R) Network Driver
3 * Copyright (C) 2007 Tehuti Networks Ltd. All rights reserved
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */
10
11#ifndef _TEHUTI_H
12#define _TEHUTI_H
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/netdevice.h>
17#include <linux/etherdevice.h>
18#include <linux/pci.h>
19#include <linux/delay.h>
20#include <linux/ethtool.h>
21#include <linux/mii.h>
22#include <linux/crc32.h>
23#include <linux/uaccess.h>
24#include <linux/in.h>
25#include <linux/ip.h>
26#include <linux/tcp.h>
27#include <linux/sched.h>
28#include <linux/tty.h>
29#include <linux/if_vlan.h>
30#include <linux/interrupt.h>
31#include <linux/vmalloc.h>
32#include <linux/firmware.h>
33#include <asm/byteorder.h>
34#include <linux/dma-mapping.h>
35#include <linux/slab.h>
36
37/* Compile Time Switches */
38/* start */
39#define BDX_TSO
40#define BDX_LLTX
41#define BDX_DELAY_WPTR
42/* #define BDX_MSI */
43/* end */
44
45#if !defined CONFIG_PCI_MSI
46# undef BDX_MSI
47#endif
48
49#define BDX_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
50 NETIF_MSG_PROBE | \
51 NETIF_MSG_LINK)
52
53/* ioctl ops */
54#define BDX_OP_READ 1
55#define BDX_OP_WRITE 2
56
57/* RX copy break size */
58#define BDX_COPYBREAK 257
59
60#define DRIVER_AUTHOR "Tehuti Networks(R)"
61#define BDX_DRV_DESC "Tehuti Networks(R) Network Driver"
62#define BDX_DRV_NAME "tehuti"
63#define BDX_NIC_NAME "Tehuti 10 Giga TOE SmartNIC"
64#define BDX_NIC2PORT_NAME "Tehuti 2-Port 10 Giga TOE SmartNIC"
65#define BDX_DRV_VERSION "7.29.3"
66
67#ifdef BDX_MSI
68# define BDX_MSI_STRING "msi "
69#else
70# define BDX_MSI_STRING ""
71#endif
72
73/* netdev tx queue len for Luxor. default value is, btw, 1000
74 * ifcontig eth1 txqueuelen 3000 - to change it at runtime */
75#define BDX_NDEV_TXQ_LEN 3000
76
77#define FIFO_SIZE 4096
78#define FIFO_EXTRA_SPACE 1024
79
80#if BITS_PER_LONG == 64
81# define H32_64(x) (u32) ((u64)(x) >> 32)
82# define L32_64(x) (u32) ((u64)(x) & 0xffffffff)
83#elif BITS_PER_LONG == 32
84# define H32_64(x) 0
85# define L32_64(x) ((u32) (x))
86#else /* BITS_PER_LONG == ?? */
87# error BITS_PER_LONG is undefined. Must be 64 or 32
88#endif /* BITS_PER_LONG */
89
90#ifdef __BIG_ENDIAN
91# define CPU_CHIP_SWAP32(x) swab32(x)
92# define CPU_CHIP_SWAP16(x) swab16(x)
93#else
94# define CPU_CHIP_SWAP32(x) (x)
95# define CPU_CHIP_SWAP16(x) (x)
96#endif
97
98#define READ_REG(pp, reg) readl(pp->pBdxRegs + reg)
99#define WRITE_REG(pp, reg, val) writel(val, pp->pBdxRegs + reg)
100
101#ifndef NET_IP_ALIGN
102# define NET_IP_ALIGN 2
103#endif
104
105#ifndef NETDEV_TX_OK
106# define NETDEV_TX_OK 0
107#endif
108
109#define LUXOR_MAX_PORT 2
110#define BDX_MAX_RX_DONE 150
111#define BDX_TXF_DESC_SZ 16
112#define BDX_MAX_TX_LEVEL (priv->txd_fifo0.m.memsz - 16)
113#define BDX_MIN_TX_LEVEL 256
114#define BDX_NO_UPD_PACKETS 40
115
116struct pci_nic {
117 int port_num;
118 void __iomem *regs;
119 int irq_type;
120 struct bdx_priv *priv[LUXOR_MAX_PORT];
121};
122
123enum { IRQ_INTX, IRQ_MSI, IRQ_MSIX };
124
125#define PCK_TH_MULT 128
126#define INT_COAL_MULT 2
127
128#define BITS_MASK(nbits) ((1<<nbits)-1)
129#define GET_BITS_SHIFT(x, nbits, nshift) (((x)>>nshift)&BITS_MASK(nbits))
130#define BITS_SHIFT_MASK(nbits, nshift) (BITS_MASK(nbits)<<nshift)
131#define BITS_SHIFT_VAL(x, nbits, nshift) (((x)&BITS_MASK(nbits))<<nshift)
132#define BITS_SHIFT_CLEAR(x, nbits, nshift) \
133 ((x)&(~BITS_SHIFT_MASK(nbits, nshift)))
134
135#define GET_INT_COAL(x) GET_BITS_SHIFT(x, 15, 0)
136#define GET_INT_COAL_RC(x) GET_BITS_SHIFT(x, 1, 15)
137#define GET_RXF_TH(x) GET_BITS_SHIFT(x, 4, 16)
138#define GET_PCK_TH(x) GET_BITS_SHIFT(x, 4, 20)
139
140#define INT_REG_VAL(coal, coal_rc, rxf_th, pck_th) \
141 ((coal)|((coal_rc)<<15)|((rxf_th)<<16)|((pck_th)<<20))
142
143struct fifo {
144 dma_addr_t da; /* physical address of fifo (used by HW) */
145 char *va; /* virtual address of fifo (used by SW) */
146 u32 rptr, wptr; /* cached values of RPTR and WPTR registers,
147 they're 32 bits on both 32 and 64 archs */
148 u16 reg_CFG0, reg_CFG1;
149 u16 reg_RPTR, reg_WPTR;
150 u16 memsz; /* memory size allocated for fifo */
151 u16 size_mask;
152 u16 pktsz; /* skb packet size to allocate */
153 u16 rcvno; /* number of buffers that come from this RXF */
154};
155
156struct txf_fifo {
157 struct fifo m; /* minimal set of variables used by all fifos */
158};
159
160struct txd_fifo {
161 struct fifo m; /* minimal set of variables used by all fifos */
162};
163
164struct rxf_fifo {
165 struct fifo m; /* minimal set of variables used by all fifos */
166};
167
168struct rxd_fifo {
169 struct fifo m; /* minimal set of variables used by all fifos */
170};
171
172struct rx_map {
173 u64 dma;
174 struct sk_buff *skb;
175};
176
177struct rxdb {
178 int *stack;
179 struct rx_map *elems;
180 int nelem;
181 int top;
182};
183
184union bdx_dma_addr {
185 dma_addr_t dma;
186 struct sk_buff *skb;
187};
188
189/* Entry in the db.
190 * if len == 0 addr is dma
191 * if len != 0 addr is skb */
192struct tx_map {
193 union bdx_dma_addr addr;
194 int len;
195};
196
197/* tx database - implemented as circular fifo buffer*/
198struct txdb {
199 struct tx_map *start; /* points to the first element */
200 struct tx_map *end; /* points just AFTER the last element */
201 struct tx_map *rptr; /* points to the next element to read */
202 struct tx_map *wptr; /* points to the next element to write */
203 int size; /* number of elements in the db */
204};
205
206/*Internal stats structure*/
207struct bdx_stats {
208 u64 InUCast; /* 0x7200 */
209 u64 InMCast; /* 0x7210 */
210 u64 InBCast; /* 0x7220 */
211 u64 InPkts; /* 0x7230 */
212 u64 InErrors; /* 0x7240 */
213 u64 InDropped; /* 0x7250 */
214 u64 FrameTooLong; /* 0x7260 */
215 u64 FrameSequenceErrors; /* 0x7270 */
216 u64 InVLAN; /* 0x7280 */
217 u64 InDroppedDFE; /* 0x7290 */
218 u64 InDroppedIntFull; /* 0x72A0 */
219 u64 InFrameAlignErrors; /* 0x72B0 */
220
221 /* 0x72C0-0x72E0 RSRV */
222
223 u64 OutUCast; /* 0x72F0 */
224 u64 OutMCast; /* 0x7300 */
225 u64 OutBCast; /* 0x7310 */
226 u64 OutPkts; /* 0x7320 */
227
228 /* 0x7330-0x7360 RSRV */
229
230 u64 OutVLAN; /* 0x7370 */
231 u64 InUCastOctects; /* 0x7380 */
232 u64 OutUCastOctects; /* 0x7390 */
233
234 /* 0x73A0-0x73B0 RSRV */
235
236 u64 InBCastOctects; /* 0x73C0 */
237 u64 OutBCastOctects; /* 0x73D0 */
238 u64 InOctects; /* 0x73E0 */
239 u64 OutOctects; /* 0x73F0 */
240};
241
242struct bdx_priv {
243 void __iomem *pBdxRegs;
244 struct net_device *ndev;
245
246 struct napi_struct napi;
247
248 /* RX FIFOs: 1 for data (full) descs, and 2 for free descs */
249 struct rxd_fifo rxd_fifo0;
250 struct rxf_fifo rxf_fifo0;
251 struct rxdb *rxdb; /* rx dbs to store skb pointers */
252 int napi_stop;
253
254 /* Tx FIFOs: 1 for data desc, 1 for empty (acks) desc */
255 struct txd_fifo txd_fifo0;
256 struct txf_fifo txf_fifo0;
257
258 struct txdb txdb;
259 int tx_level;
260#ifdef BDX_DELAY_WPTR
261 int tx_update_mark;
262 int tx_noupd;
263#endif
264 spinlock_t tx_lock; /* NETIF_F_LLTX mode */
265
266 /* rarely used */
267 u8 port;
268 u32 msg_enable;
269 int stats_flag;
270 struct bdx_stats hw_stats;
271 struct pci_dev *pdev;
272
273 struct pci_nic *nic;
274
275 u8 txd_size;
276 u8 txf_size;
277 u8 rxd_size;
278 u8 rxf_size;
279 u32 rdintcm;
280 u32 tdintcm;
281};
282
283/* RX FREE descriptor - 64bit*/
284struct rxf_desc {
285 u32 info; /* Buffer Count + Info - described below */
286 u32 va_lo; /* VAdr[31:0] */
287 u32 va_hi; /* VAdr[63:32] */
288 u32 pa_lo; /* PAdr[31:0] */
289 u32 pa_hi; /* PAdr[63:32] */
290 u32 len; /* Buffer Length */
291};
292
293#define GET_RXD_BC(x) GET_BITS_SHIFT((x), 5, 0)
294#define GET_RXD_RXFQ(x) GET_BITS_SHIFT((x), 2, 8)
295#define GET_RXD_TO(x) GET_BITS_SHIFT((x), 1, 15)
296#define GET_RXD_TYPE(x) GET_BITS_SHIFT((x), 4, 16)
297#define GET_RXD_ERR(x) GET_BITS_SHIFT((x), 6, 21)
298#define GET_RXD_RXP(x) GET_BITS_SHIFT((x), 1, 27)
299#define GET_RXD_PKT_ID(x) GET_BITS_SHIFT((x), 3, 28)
300#define GET_RXD_VTAG(x) GET_BITS_SHIFT((x), 1, 31)
301#define GET_RXD_VLAN_ID(x) GET_BITS_SHIFT((x), 12, 0)
302#define GET_RXD_VLAN_TCI(x) GET_BITS_SHIFT((x), 16, 0)
303#define GET_RXD_CFI(x) GET_BITS_SHIFT((x), 1, 12)
304#define GET_RXD_PRIO(x) GET_BITS_SHIFT((x), 3, 13)
305
306struct rxd_desc {
307 u32 rxd_val1;
308 u16 len;
309 u16 rxd_vlan;
310 u32 va_lo;
311 u32 va_hi;
312};
313
314/* PBL describes each virtual buffer to be */
315/* transmitted from the host.*/
316struct pbl {
317 u32 pa_lo;
318 u32 pa_hi;
319 u32 len;
320};
321
322/* First word for TXD descriptor. It means: type = 3 for regular Tx packet,
323 * hw_csum = 7 for ip+udp+tcp hw checksums */
324#define TXD_W1_VAL(bc, checksum, vtag, lgsnd, vlan_id) \
325 ((bc) | ((checksum)<<5) | ((vtag)<<8) | \
326 ((lgsnd)<<9) | (0x30000) | ((vlan_id)<<20))
327
328struct txd_desc {
329 u32 txd_val1;
330 u16 mss;
331 u16 length;
332 u32 va_lo;
333 u32 va_hi;
334 struct pbl pbl[0]; /* Fragments */
335} __packed;
336
337/* Register region size */
338#define BDX_REGS_SIZE 0x1000
339
340/* Registers from 0x0000-0x00fc were remapped to 0x4000-0x40fc */
341#define regTXD_CFG1_0 0x4000
342#define regRXF_CFG1_0 0x4010
343#define regRXD_CFG1_0 0x4020
344#define regTXF_CFG1_0 0x4030
345#define regTXD_CFG0_0 0x4040
346#define regRXF_CFG0_0 0x4050
347#define regRXD_CFG0_0 0x4060
348#define regTXF_CFG0_0 0x4070
349#define regTXD_WPTR_0 0x4080
350#define regRXF_WPTR_0 0x4090
351#define regRXD_WPTR_0 0x40A0
352#define regTXF_WPTR_0 0x40B0
353#define regTXD_RPTR_0 0x40C0
354#define regRXF_RPTR_0 0x40D0
355#define regRXD_RPTR_0 0x40E0
356#define regTXF_RPTR_0 0x40F0
357#define regTXF_RPTR_3 0x40FC
358
359/* hardware versioning */
360#define FW_VER 0x5010
361#define SROM_VER 0x5020
362#define FPGA_VER 0x5030
363#define FPGA_SEED 0x5040
364
365/* Registers from 0x0100-0x0150 were remapped to 0x5100-0x5150 */
366#define regISR regISR0
367#define regISR0 0x5100
368
369#define regIMR regIMR0
370#define regIMR0 0x5110
371
372#define regRDINTCM0 0x5120
373#define regRDINTCM2 0x5128
374
375#define regTDINTCM0 0x5130
376
377#define regISR_MSK0 0x5140
378
379#define regINIT_SEMAPHORE 0x5170
380#define regINIT_STATUS 0x5180
381
382#define regMAC_LNK_STAT 0x0200
383#define MAC_LINK_STAT 0x4 /* Link state */
384
385#define regGMAC_RXF_A 0x1240
386
387#define regUNC_MAC0_A 0x1250
388#define regUNC_MAC1_A 0x1260
389#define regUNC_MAC2_A 0x1270
390
391#define regVLAN_0 0x1800
392
393#define regMAX_FRAME_A 0x12C0
394
395#define regRX_MAC_MCST0 0x1A80
396#define regRX_MAC_MCST1 0x1A84
397#define MAC_MCST_NUM 15
398#define regRX_MCST_HASH0 0x1A00
399#define MAC_MCST_HASH_NUM 8
400
401#define regVPC 0x2300
402#define regVIC 0x2320
403#define regVGLB 0x2340
404
405#define regCLKPLL 0x5000
406
407/*for 10G only*/
408#define regREVISION 0x6000
409#define regSCRATCH 0x6004
410#define regCTRLST 0x6008
411#define regMAC_ADDR_0 0x600C
412#define regMAC_ADDR_1 0x6010
413#define regFRM_LENGTH 0x6014
414#define regPAUSE_QUANT 0x6018
415#define regRX_FIFO_SECTION 0x601C
416#define regTX_FIFO_SECTION 0x6020
417#define regRX_FULLNESS 0x6024
418#define regTX_FULLNESS 0x6028
419#define regHASHTABLE 0x602C
420#define regMDIO_ST 0x6030
421#define regMDIO_CTL 0x6034
422#define regMDIO_DATA 0x6038
423#define regMDIO_ADDR 0x603C
424
425#define regRST_PORT 0x7000
426#define regDIS_PORT 0x7010
427#define regRST_QU 0x7020
428#define regDIS_QU 0x7030
429
430#define regCTRLST_TX_ENA 0x0001
431#define regCTRLST_RX_ENA 0x0002
432#define regCTRLST_PRM_ENA 0x0010
433#define regCTRLST_PAD_ENA 0x0020
434
435#define regCTRLST_BASE (regCTRLST_PAD_ENA|regCTRLST_PRM_ENA)
436
437#define regRX_FLT 0x1400
438
439/* TXD TXF RXF RXD CONFIG 0x0000 --- 0x007c*/
440#define TX_RX_CFG1_BASE 0xffffffff /*0-31 */
441#define TX_RX_CFG0_BASE 0xfffff000 /*31:12 */
442#define TX_RX_CFG0_RSVD 0x0ffc /*11:2 */
443#define TX_RX_CFG0_SIZE 0x0003 /*1:0 */
444
445/* TXD TXF RXF RXD WRITE 0x0080 --- 0x00BC */
446#define TXF_WPTR_WR_PTR 0x7ff8 /*14:3 */
447
448/* TXD TXF RXF RXD READ 0x00CO --- 0x00FC */
449#define TXF_RPTR_RD_PTR 0x7ff8 /*14:3 */
450
451#define TXF_WPTR_MASK 0x7ff0 /* last 4 bits are dropped
452 * size is rounded to 16 */
453
454/* regISR 0x0100 */
455/* regIMR 0x0110 */
456#define IMR_INPROG 0x80000000 /*31 */
457#define IR_LNKCHG1 0x10000000 /*28 */
458#define IR_LNKCHG0 0x08000000 /*27 */
459#define IR_GPIO 0x04000000 /*26 */
460#define IR_RFRSH 0x02000000 /*25 */
461#define IR_RSVD 0x01000000 /*24 */
462#define IR_SWI 0x00800000 /*23 */
463#define IR_RX_FREE_3 0x00400000 /*22 */
464#define IR_RX_FREE_2 0x00200000 /*21 */
465#define IR_RX_FREE_1 0x00100000 /*20 */
466#define IR_RX_FREE_0 0x00080000 /*19 */
467#define IR_TX_FREE_3 0x00040000 /*18 */
468#define IR_TX_FREE_2 0x00020000 /*17 */
469#define IR_TX_FREE_1 0x00010000 /*16 */
470#define IR_TX_FREE_0 0x00008000 /*15 */
471#define IR_RX_DESC_3 0x00004000 /*14 */
472#define IR_RX_DESC_2 0x00002000 /*13 */
473#define IR_RX_DESC_1 0x00001000 /*12 */
474#define IR_RX_DESC_0 0x00000800 /*11 */
475#define IR_PSE 0x00000400 /*10 */
476#define IR_TMR3 0x00000200 /*9 */
477#define IR_TMR2 0x00000100 /*8 */
478#define IR_TMR1 0x00000080 /*7 */
479#define IR_TMR0 0x00000040 /*6 */
480#define IR_VNT 0x00000020 /*5 */
481#define IR_RxFL 0x00000010 /*4 */
482#define IR_SDPERR 0x00000008 /*3 */
483#define IR_TR 0x00000004 /*2 */
484#define IR_PCIE_LINK 0x00000002 /*1 */
485#define IR_PCIE_TOUT 0x00000001 /*0 */
486
487#define IR_EXTRA (IR_RX_FREE_0 | IR_LNKCHG0 | IR_PSE | \
488 IR_TMR0 | IR_PCIE_LINK | IR_PCIE_TOUT)
489#define IR_RUN (IR_EXTRA | IR_RX_DESC_0 | IR_TX_FREE_0)
490#define IR_ALL 0xfdfffff7
491
492#define IR_LNKCHG0_ofst 27
493
494#define GMAC_RX_FILTER_OSEN 0x1000 /* shared OS enable */
495#define GMAC_RX_FILTER_TXFC 0x0400 /* Tx flow control */
496#define GMAC_RX_FILTER_RSV0 0x0200 /* reserved */
497#define GMAC_RX_FILTER_FDA 0x0100 /* filter out direct address */
498#define GMAC_RX_FILTER_AOF 0x0080 /* accept over run */
499#define GMAC_RX_FILTER_ACF 0x0040 /* accept control frames */
500#define GMAC_RX_FILTER_ARUNT 0x0020 /* accept under run */
501#define GMAC_RX_FILTER_ACRC 0x0010 /* accept crc error */
502#define GMAC_RX_FILTER_AM 0x0008 /* accept multicast */
503#define GMAC_RX_FILTER_AB 0x0004 /* accept broadcast */
504#define GMAC_RX_FILTER_PRM 0x0001 /* [0:1] promiscuous mode */
505
506#define MAX_FRAME_AB_VAL 0x3fff /* 13:0 */
507
508#define CLKPLL_PLLLKD 0x0200 /*9 */
509#define CLKPLL_RSTEND 0x0100 /*8 */
510#define CLKPLL_SFTRST 0x0001 /*0 */
511
512#define CLKPLL_LKD (CLKPLL_PLLLKD|CLKPLL_RSTEND)
513
514/*
515 * PCI-E Device Control Register (Offset 0x88)
516 * Source: Luxor Data Sheet, 7.1.3.3.3
517 */
518#define PCI_DEV_CTRL_REG 0x88
519#define GET_DEV_CTRL_MAXPL(x) GET_BITS_SHIFT(x, 3, 5)
520#define GET_DEV_CTRL_MRRS(x) GET_BITS_SHIFT(x, 3, 12)
521
522/*
523 * PCI-E Link Status Register (Offset 0x92)
524 * Source: Luxor Data Sheet, 7.1.3.3.7
525 */
526#define PCI_LINK_STATUS_REG 0x92
527#define GET_LINK_STATUS_LANES(x) GET_BITS_SHIFT(x, 6, 4)
528
529/* Debugging Macros */
530
531#define DBG2(fmt, args...) \
532 pr_err("%s:%-5d: " fmt, __func__, __LINE__, ## args)
533
534#define BDX_ASSERT(x) BUG_ON(x)
535
536#ifdef DEBUG
537
538#define ENTER \
539do { \
540 pr_err("%s:%-5d: ENTER\n", __func__, __LINE__); \
541} while (0)
542
543#define RET(args...) \
544do { \
545 pr_err("%s:%-5d: RETURN\n", __func__, __LINE__); \
546 return args; \
547} while (0)
548
549#define DBG(fmt, args...) \
550 pr_err("%s:%-5d: " fmt, __func__, __LINE__, ## args)
551#else
552#define ENTER do { } while (0)
553#define RET(args...) return args
554#define DBG(fmt, args...) \
555do { \
556 if (0) \
557 pr_err(fmt, ##args); \
558} while (0)
559#endif
560
561#endif /* _BDX__H */