aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/s2io.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/net/s2io.c
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'drivers/net/s2io.c')
-rw-r--r--drivers/net/s2io.c4950
1 files changed, 4950 insertions, 0 deletions
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
new file mode 100644
index 000000000000..9c224eba057d
--- /dev/null
+++ b/drivers/net/s2io.c
@@ -0,0 +1,4950 @@
1/************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for S2IO 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc.
4
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
12 *
13 * Credits:
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
26 *
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
29 * rx_ring_num : This can be used to program the number of receive rings used
30 * in the driver.
31 * rx_ring_len: This defines the number of descriptors each ring can have. This
32 * is also an array of size 8.
33 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
34 * tx_fifo_len: This too is an array of 8. Each element defines the number of
35 * Tx descriptors that can be associated with each corresponding FIFO.
36 * in PCI Configuration space.
37 ************************************************************************/
38
39#include <linux/config.h>
40#include <linux/module.h>
41#include <linux/types.h>
42#include <linux/errno.h>
43#include <linux/ioport.h>
44#include <linux/pci.h>
45#include <linux/kernel.h>
46#include <linux/netdevice.h>
47#include <linux/etherdevice.h>
48#include <linux/skbuff.h>
49#include <linux/init.h>
50#include <linux/delay.h>
51#include <linux/stddef.h>
52#include <linux/ioctl.h>
53#include <linux/timex.h>
54#include <linux/sched.h>
55#include <linux/ethtool.h>
56#include <linux/version.h>
57#include <linux/workqueue.h>
58
59#include <asm/io.h>
60#include <asm/system.h>
61#include <asm/uaccess.h>
62
63/* local include */
64#include "s2io.h"
65#include "s2io-regs.h"
66
67/* S2io Driver name & version. */
68static char s2io_driver_name[] = "s2io";
69static char s2io_driver_version[] = "Version 1.7.7.1";
70
71/*
72 * Cards with following subsystem_id have a link state indication
73 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
74 * macro below identifies these cards given the subsystem_id.
75 */
76#define CARDS_WITH_FAULTY_LINK_INDICATORS(subid) \
77 (((subid >= 0x600B) && (subid <= 0x600D)) || \
78 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0
79
80#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
81 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
82#define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
83#define PANIC 1
84#define LOW 2
85static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
86{
87 int level = 0;
88 if ((sp->pkt_cnt[ring] - rxb_size) > 16) {
89 level = LOW;
90 if ((sp->pkt_cnt[ring] - rxb_size) < MAX_RXDS_PER_BLOCK) {
91 level = PANIC;
92 }
93 }
94
95 return level;
96}
97
98/* Ethtool related variables and Macros. */
99static char s2io_gstrings[][ETH_GSTRING_LEN] = {
100 "Register test\t(offline)",
101 "Eeprom test\t(offline)",
102 "Link test\t(online)",
103 "RLDRAM test\t(offline)",
104 "BIST Test\t(offline)"
105};
106
107static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
108 {"tmac_frms"},
109 {"tmac_data_octets"},
110 {"tmac_drop_frms"},
111 {"tmac_mcst_frms"},
112 {"tmac_bcst_frms"},
113 {"tmac_pause_ctrl_frms"},
114 {"tmac_any_err_frms"},
115 {"tmac_vld_ip_octets"},
116 {"tmac_vld_ip"},
117 {"tmac_drop_ip"},
118 {"tmac_icmp"},
119 {"tmac_rst_tcp"},
120 {"tmac_tcp"},
121 {"tmac_udp"},
122 {"rmac_vld_frms"},
123 {"rmac_data_octets"},
124 {"rmac_fcs_err_frms"},
125 {"rmac_drop_frms"},
126 {"rmac_vld_mcst_frms"},
127 {"rmac_vld_bcst_frms"},
128 {"rmac_in_rng_len_err_frms"},
129 {"rmac_long_frms"},
130 {"rmac_pause_ctrl_frms"},
131 {"rmac_discarded_frms"},
132 {"rmac_usized_frms"},
133 {"rmac_osized_frms"},
134 {"rmac_frag_frms"},
135 {"rmac_jabber_frms"},
136 {"rmac_ip"},
137 {"rmac_ip_octets"},
138 {"rmac_hdr_err_ip"},
139 {"rmac_drop_ip"},
140 {"rmac_icmp"},
141 {"rmac_tcp"},
142 {"rmac_udp"},
143 {"rmac_err_drp_udp"},
144 {"rmac_pause_cnt"},
145 {"rmac_accepted_ip"},
146 {"rmac_err_tcp"},
147};
148
149#define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
150#define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
151
152#define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
153#define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
154
155
156/*
157 * Constants to be programmed into the Xena's registers, to configure
158 * the XAUI.
159 */
160
161#define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
162#define END_SIGN 0x0
163
164static u64 default_mdio_cfg[] = {
165 /* Reset PMA PLL */
166 0xC001010000000000ULL, 0xC0010100000000E0ULL,
167 0xC0010100008000E4ULL,
168 /* Remove Reset from PMA PLL */
169 0xC001010000000000ULL, 0xC0010100000000E0ULL,
170 0xC0010100000000E4ULL,
171 END_SIGN
172};
173
174static u64 default_dtx_cfg[] = {
175 0x8000051500000000ULL, 0x80000515000000E0ULL,
176 0x80000515D93500E4ULL, 0x8001051500000000ULL,
177 0x80010515000000E0ULL, 0x80010515001E00E4ULL,
178 0x8002051500000000ULL, 0x80020515000000E0ULL,
179 0x80020515F21000E4ULL,
180 /* Set PADLOOPBACKN */
181 0x8002051500000000ULL, 0x80020515000000E0ULL,
182 0x80020515B20000E4ULL, 0x8003051500000000ULL,
183 0x80030515000000E0ULL, 0x80030515B20000E4ULL,
184 0x8004051500000000ULL, 0x80040515000000E0ULL,
185 0x80040515B20000E4ULL, 0x8005051500000000ULL,
186 0x80050515000000E0ULL, 0x80050515B20000E4ULL,
187 SWITCH_SIGN,
188 /* Remove PADLOOPBACKN */
189 0x8002051500000000ULL, 0x80020515000000E0ULL,
190 0x80020515F20000E4ULL, 0x8003051500000000ULL,
191 0x80030515000000E0ULL, 0x80030515F20000E4ULL,
192 0x8004051500000000ULL, 0x80040515000000E0ULL,
193 0x80040515F20000E4ULL, 0x8005051500000000ULL,
194 0x80050515000000E0ULL, 0x80050515F20000E4ULL,
195 END_SIGN
196};
197
198
199/*
200 * Constants for Fixing the MacAddress problem seen mostly on
201 * Alpha machines.
202 */
203static u64 fix_mac[] = {
204 0x0060000000000000ULL, 0x0060600000000000ULL,
205 0x0040600000000000ULL, 0x0000600000000000ULL,
206 0x0020600000000000ULL, 0x0060600000000000ULL,
207 0x0020600000000000ULL, 0x0060600000000000ULL,
208 0x0020600000000000ULL, 0x0060600000000000ULL,
209 0x0020600000000000ULL, 0x0060600000000000ULL,
210 0x0020600000000000ULL, 0x0060600000000000ULL,
211 0x0020600000000000ULL, 0x0060600000000000ULL,
212 0x0020600000000000ULL, 0x0060600000000000ULL,
213 0x0020600000000000ULL, 0x0060600000000000ULL,
214 0x0020600000000000ULL, 0x0060600000000000ULL,
215 0x0020600000000000ULL, 0x0060600000000000ULL,
216 0x0020600000000000ULL, 0x0000600000000000ULL,
217 0x0040600000000000ULL, 0x0060600000000000ULL,
218 END_SIGN
219};
220
221/* Module Loadable parameters. */
222static unsigned int tx_fifo_num = 1;
223static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
224 {[0 ...(MAX_TX_FIFOS - 1)] = 0 };
225static unsigned int rx_ring_num = 1;
226static unsigned int rx_ring_sz[MAX_RX_RINGS] =
227 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
228static unsigned int Stats_refresh_time = 4;
229static unsigned int rmac_pause_time = 65535;
230static unsigned int mc_pause_threshold_q0q3 = 187;
231static unsigned int mc_pause_threshold_q4q7 = 187;
232static unsigned int shared_splits;
233static unsigned int tmac_util_period = 5;
234static unsigned int rmac_util_period = 5;
235#ifndef CONFIG_S2IO_NAPI
236static unsigned int indicate_max_pkts;
237#endif
238
239/*
240 * S2IO device table.
241 * This table lists all the devices that this driver supports.
242 */
243static struct pci_device_id s2io_tbl[] __devinitdata = {
244 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
245 PCI_ANY_ID, PCI_ANY_ID},
246 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
247 PCI_ANY_ID, PCI_ANY_ID},
248 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
249 PCI_ANY_ID, PCI_ANY_ID},
250 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
251 PCI_ANY_ID, PCI_ANY_ID},
252 {0,}
253};
254
255MODULE_DEVICE_TABLE(pci, s2io_tbl);
256
257static struct pci_driver s2io_driver = {
258 .name = "S2IO",
259 .id_table = s2io_tbl,
260 .probe = s2io_init_nic,
261 .remove = __devexit_p(s2io_rem_nic),
262};
263
264/* A simplifier macro used both by init and free shared_mem Fns(). */
265#define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
266
267/**
268 * init_shared_mem - Allocation and Initialization of Memory
269 * @nic: Device private variable.
270 * Description: The function allocates all the memory areas shared
271 * between the NIC and the driver. This includes Tx descriptors,
272 * Rx descriptors and the statistics block.
273 */
274
275static int init_shared_mem(struct s2io_nic *nic)
276{
277 u32 size;
278 void *tmp_v_addr, *tmp_v_addr_next;
279 dma_addr_t tmp_p_addr, tmp_p_addr_next;
280 RxD_block_t *pre_rxd_blk = NULL;
281 int i, j, blk_cnt;
282 int lst_size, lst_per_page;
283 struct net_device *dev = nic->dev;
284#ifdef CONFIG_2BUFF_MODE
285 unsigned long tmp;
286 buffAdd_t *ba;
287#endif
288
289 mac_info_t *mac_control;
290 struct config_param *config;
291
292 mac_control = &nic->mac_control;
293 config = &nic->config;
294
295
296 /* Allocation and initialization of TXDLs in FIOFs */
297 size = 0;
298 for (i = 0; i < config->tx_fifo_num; i++) {
299 size += config->tx_cfg[i].fifo_len;
300 }
301 if (size > MAX_AVAILABLE_TXDS) {
302 DBG_PRINT(ERR_DBG, "%s: Total number of Tx FIFOs ",
303 dev->name);
304 DBG_PRINT(ERR_DBG, "exceeds the maximum value ");
305 DBG_PRINT(ERR_DBG, "that can be used\n");
306 return FAILURE;
307 }
308
309 lst_size = (sizeof(TxD_t) * config->max_txds);
310 lst_per_page = PAGE_SIZE / lst_size;
311
312 for (i = 0; i < config->tx_fifo_num; i++) {
313 int fifo_len = config->tx_cfg[i].fifo_len;
314 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
315 nic->list_info[i] = kmalloc(list_holder_size, GFP_KERNEL);
316 if (!nic->list_info[i]) {
317 DBG_PRINT(ERR_DBG,
318 "Malloc failed for list_info\n");
319 return -ENOMEM;
320 }
321 memset(nic->list_info[i], 0, list_holder_size);
322 }
323 for (i = 0; i < config->tx_fifo_num; i++) {
324 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
325 lst_per_page);
326 mac_control->tx_curr_put_info[i].offset = 0;
327 mac_control->tx_curr_put_info[i].fifo_len =
328 config->tx_cfg[i].fifo_len - 1;
329 mac_control->tx_curr_get_info[i].offset = 0;
330 mac_control->tx_curr_get_info[i].fifo_len =
331 config->tx_cfg[i].fifo_len - 1;
332 for (j = 0; j < page_num; j++) {
333 int k = 0;
334 dma_addr_t tmp_p;
335 void *tmp_v;
336 tmp_v = pci_alloc_consistent(nic->pdev,
337 PAGE_SIZE, &tmp_p);
338 if (!tmp_v) {
339 DBG_PRINT(ERR_DBG,
340 "pci_alloc_consistent ");
341 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
342 return -ENOMEM;
343 }
344 while (k < lst_per_page) {
345 int l = (j * lst_per_page) + k;
346 if (l == config->tx_cfg[i].fifo_len)
347 goto end_txd_alloc;
348 nic->list_info[i][l].list_virt_addr =
349 tmp_v + (k * lst_size);
350 nic->list_info[i][l].list_phy_addr =
351 tmp_p + (k * lst_size);
352 k++;
353 }
354 }
355 }
356 end_txd_alloc:
357
358 /* Allocation and initialization of RXDs in Rings */
359 size = 0;
360 for (i = 0; i < config->rx_ring_num; i++) {
361 if (config->rx_cfg[i].num_rxd % (MAX_RXDS_PER_BLOCK + 1)) {
362 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
363 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
364 i);
365 DBG_PRINT(ERR_DBG, "RxDs per Block");
366 return FAILURE;
367 }
368 size += config->rx_cfg[i].num_rxd;
369 nic->block_count[i] =
370 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
371 nic->pkt_cnt[i] =
372 config->rx_cfg[i].num_rxd - nic->block_count[i];
373 }
374
375 for (i = 0; i < config->rx_ring_num; i++) {
376 mac_control->rx_curr_get_info[i].block_index = 0;
377 mac_control->rx_curr_get_info[i].offset = 0;
378 mac_control->rx_curr_get_info[i].ring_len =
379 config->rx_cfg[i].num_rxd - 1;
380 mac_control->rx_curr_put_info[i].block_index = 0;
381 mac_control->rx_curr_put_info[i].offset = 0;
382 mac_control->rx_curr_put_info[i].ring_len =
383 config->rx_cfg[i].num_rxd - 1;
384 blk_cnt =
385 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
386 /* Allocating all the Rx blocks */
387 for (j = 0; j < blk_cnt; j++) {
388#ifndef CONFIG_2BUFF_MODE
389 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
390#else
391 size = SIZE_OF_BLOCK;
392#endif
393 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
394 &tmp_p_addr);
395 if (tmp_v_addr == NULL) {
396 /*
397 * In case of failure, free_shared_mem()
398 * is called, which should free any
399 * memory that was alloced till the
400 * failure happened.
401 */
402 nic->rx_blocks[i][j].block_virt_addr =
403 tmp_v_addr;
404 return -ENOMEM;
405 }
406 memset(tmp_v_addr, 0, size);
407 nic->rx_blocks[i][j].block_virt_addr = tmp_v_addr;
408 nic->rx_blocks[i][j].block_dma_addr = tmp_p_addr;
409 }
410 /* Interlinking all Rx Blocks */
411 for (j = 0; j < blk_cnt; j++) {
412 tmp_v_addr = nic->rx_blocks[i][j].block_virt_addr;
413 tmp_v_addr_next =
414 nic->rx_blocks[i][(j + 1) %
415 blk_cnt].block_virt_addr;
416 tmp_p_addr = nic->rx_blocks[i][j].block_dma_addr;
417 tmp_p_addr_next =
418 nic->rx_blocks[i][(j + 1) %
419 blk_cnt].block_dma_addr;
420
421 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
422 pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
423 * marker.
424 */
425#ifndef CONFIG_2BUFF_MODE
426 pre_rxd_blk->reserved_2_pNext_RxD_block =
427 (unsigned long) tmp_v_addr_next;
428#endif
429 pre_rxd_blk->pNext_RxD_Blk_physical =
430 (u64) tmp_p_addr_next;
431 }
432 }
433
434#ifdef CONFIG_2BUFF_MODE
435 /*
436 * Allocation of Storages for buffer addresses in 2BUFF mode
437 * and the buffers as well.
438 */
439 for (i = 0; i < config->rx_ring_num; i++) {
440 blk_cnt =
441 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
442 nic->ba[i] = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
443 GFP_KERNEL);
444 if (!nic->ba[i])
445 return -ENOMEM;
446 for (j = 0; j < blk_cnt; j++) {
447 int k = 0;
448 nic->ba[i][j] = kmalloc((sizeof(buffAdd_t) *
449 (MAX_RXDS_PER_BLOCK + 1)),
450 GFP_KERNEL);
451 if (!nic->ba[i][j])
452 return -ENOMEM;
453 while (k != MAX_RXDS_PER_BLOCK) {
454 ba = &nic->ba[i][j][k];
455
456 ba->ba_0_org = kmalloc
457 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
458 if (!ba->ba_0_org)
459 return -ENOMEM;
460 tmp = (unsigned long) ba->ba_0_org;
461 tmp += ALIGN_SIZE;
462 tmp &= ~((unsigned long) ALIGN_SIZE);
463 ba->ba_0 = (void *) tmp;
464
465 ba->ba_1_org = kmalloc
466 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
467 if (!ba->ba_1_org)
468 return -ENOMEM;
469 tmp = (unsigned long) ba->ba_1_org;
470 tmp += ALIGN_SIZE;
471 tmp &= ~((unsigned long) ALIGN_SIZE);
472 ba->ba_1 = (void *) tmp;
473 k++;
474 }
475 }
476 }
477#endif
478
479 /* Allocation and initialization of Statistics block */
480 size = sizeof(StatInfo_t);
481 mac_control->stats_mem = pci_alloc_consistent
482 (nic->pdev, size, &mac_control->stats_mem_phy);
483
484 if (!mac_control->stats_mem) {
485 /*
486 * In case of failure, free_shared_mem() is called, which
487 * should free any memory that was alloced till the
488 * failure happened.
489 */
490 return -ENOMEM;
491 }
492 mac_control->stats_mem_sz = size;
493
494 tmp_v_addr = mac_control->stats_mem;
495 mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
496 memset(tmp_v_addr, 0, size);
497
498 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
499 (unsigned long long) tmp_p_addr);
500
501 return SUCCESS;
502}
503
504/**
505 * free_shared_mem - Free the allocated Memory
506 * @nic: Device private variable.
507 * Description: This function is to free all memory locations allocated by
508 * the init_shared_mem() function and return it to the kernel.
509 */
510
511static void free_shared_mem(struct s2io_nic *nic)
512{
513 int i, j, blk_cnt, size;
514 void *tmp_v_addr;
515 dma_addr_t tmp_p_addr;
516 mac_info_t *mac_control;
517 struct config_param *config;
518 int lst_size, lst_per_page;
519
520
521 if (!nic)
522 return;
523
524 mac_control = &nic->mac_control;
525 config = &nic->config;
526
527 lst_size = (sizeof(TxD_t) * config->max_txds);
528 lst_per_page = PAGE_SIZE / lst_size;
529
530 for (i = 0; i < config->tx_fifo_num; i++) {
531 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
532 lst_per_page);
533 for (j = 0; j < page_num; j++) {
534 int mem_blks = (j * lst_per_page);
535 if (!nic->list_info[i][mem_blks].list_virt_addr)
536 break;
537 pci_free_consistent(nic->pdev, PAGE_SIZE,
538 nic->list_info[i][mem_blks].
539 list_virt_addr,
540 nic->list_info[i][mem_blks].
541 list_phy_addr);
542 }
543 kfree(nic->list_info[i]);
544 }
545
546#ifndef CONFIG_2BUFF_MODE
547 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
548#else
549 size = SIZE_OF_BLOCK;
550#endif
551 for (i = 0; i < config->rx_ring_num; i++) {
552 blk_cnt = nic->block_count[i];
553 for (j = 0; j < blk_cnt; j++) {
554 tmp_v_addr = nic->rx_blocks[i][j].block_virt_addr;
555 tmp_p_addr = nic->rx_blocks[i][j].block_dma_addr;
556 if (tmp_v_addr == NULL)
557 break;
558 pci_free_consistent(nic->pdev, size,
559 tmp_v_addr, tmp_p_addr);
560 }
561 }
562
563#ifdef CONFIG_2BUFF_MODE
564 /* Freeing buffer storage addresses in 2BUFF mode. */
565 for (i = 0; i < config->rx_ring_num; i++) {
566 blk_cnt =
567 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
568 if (!nic->ba[i])
569 goto end_free;
570 for (j = 0; j < blk_cnt; j++) {
571 int k = 0;
572 if (!nic->ba[i][j]) {
573 kfree(nic->ba[i]);
574 goto end_free;
575 }
576 while (k != MAX_RXDS_PER_BLOCK) {
577 buffAdd_t *ba = &nic->ba[i][j][k];
578 if (!ba || !ba->ba_0_org || !ba->ba_1_org)
579 {
580 kfree(nic->ba[i]);
581 kfree(nic->ba[i][j]);
582 if(ba->ba_0_org)
583 kfree(ba->ba_0_org);
584 if(ba->ba_1_org)
585 kfree(ba->ba_1_org);
586 goto end_free;
587 }
588 kfree(ba->ba_0_org);
589 kfree(ba->ba_1_org);
590 k++;
591 }
592 kfree(nic->ba[i][j]);
593 }
594 kfree(nic->ba[i]);
595 }
596end_free:
597#endif
598
599 if (mac_control->stats_mem) {
600 pci_free_consistent(nic->pdev,
601 mac_control->stats_mem_sz,
602 mac_control->stats_mem,
603 mac_control->stats_mem_phy);
604 }
605}
606
607/**
608 * init_nic - Initialization of hardware
609 * @nic: device peivate variable
610 * Description: The function sequentially configures every block
611 * of the H/W from their reset values.
612 * Return Value: SUCCESS on success and
613 * '-1' on failure (endian settings incorrect).
614 */
615
616static int init_nic(struct s2io_nic *nic)
617{
618 XENA_dev_config_t __iomem *bar0 = nic->bar0;
619 struct net_device *dev = nic->dev;
620 register u64 val64 = 0;
621 void __iomem *add;
622 u32 time;
623 int i, j;
624 mac_info_t *mac_control;
625 struct config_param *config;
626 int mdio_cnt = 0, dtx_cnt = 0;
627 unsigned long long mem_share;
628
629 mac_control = &nic->mac_control;
630 config = &nic->config;
631
632 /* Initialize swapper control register */
633 if (s2io_set_swapper(nic)) {
634 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
635 return -1;
636 }
637
638 /* Remove XGXS from reset state */
639 val64 = 0;
640 writeq(val64, &bar0->sw_reset);
641 val64 = readq(&bar0->sw_reset);
642 msleep(500);
643
644 /* Enable Receiving broadcasts */
645 add = &bar0->mac_cfg;
646 val64 = readq(&bar0->mac_cfg);
647 val64 |= MAC_RMAC_BCAST_ENABLE;
648 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
649 writel((u32) val64, add);
650 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
651 writel((u32) (val64 >> 32), (add + 4));
652
653 /* Read registers in all blocks */
654 val64 = readq(&bar0->mac_int_mask);
655 val64 = readq(&bar0->mc_int_mask);
656 val64 = readq(&bar0->xgxs_int_mask);
657
658 /* Set MTU */
659 val64 = dev->mtu;
660 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
661
662 /*
663 * Configuring the XAUI Interface of Xena.
664 * ***************************************
665 * To Configure the Xena's XAUI, one has to write a series
666 * of 64 bit values into two registers in a particular
667 * sequence. Hence a macro 'SWITCH_SIGN' has been defined
668 * which will be defined in the array of configuration values
669 * (default_dtx_cfg & default_mdio_cfg) at appropriate places
670 * to switch writing from one regsiter to another. We continue
671 * writing these values until we encounter the 'END_SIGN' macro.
672 * For example, After making a series of 21 writes into
673 * dtx_control register the 'SWITCH_SIGN' appears and hence we
674 * start writing into mdio_control until we encounter END_SIGN.
675 */
676 while (1) {
677 dtx_cfg:
678 while (default_dtx_cfg[dtx_cnt] != END_SIGN) {
679 if (default_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
680 dtx_cnt++;
681 goto mdio_cfg;
682 }
683 SPECIAL_REG_WRITE(default_dtx_cfg[dtx_cnt],
684 &bar0->dtx_control, UF);
685 val64 = readq(&bar0->dtx_control);
686 dtx_cnt++;
687 }
688 mdio_cfg:
689 while (default_mdio_cfg[mdio_cnt] != END_SIGN) {
690 if (default_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
691 mdio_cnt++;
692 goto dtx_cfg;
693 }
694 SPECIAL_REG_WRITE(default_mdio_cfg[mdio_cnt],
695 &bar0->mdio_control, UF);
696 val64 = readq(&bar0->mdio_control);
697 mdio_cnt++;
698 }
699 if ((default_dtx_cfg[dtx_cnt] == END_SIGN) &&
700 (default_mdio_cfg[mdio_cnt] == END_SIGN)) {
701 break;
702 } else {
703 goto dtx_cfg;
704 }
705 }
706
707 /* Tx DMA Initialization */
708 val64 = 0;
709 writeq(val64, &bar0->tx_fifo_partition_0);
710 writeq(val64, &bar0->tx_fifo_partition_1);
711 writeq(val64, &bar0->tx_fifo_partition_2);
712 writeq(val64, &bar0->tx_fifo_partition_3);
713
714
715 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
716 val64 |=
717 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
718 13) | vBIT(config->tx_cfg[i].fifo_priority,
719 ((i * 32) + 5), 3);
720
721 if (i == (config->tx_fifo_num - 1)) {
722 if (i % 2 == 0)
723 i++;
724 }
725
726 switch (i) {
727 case 1:
728 writeq(val64, &bar0->tx_fifo_partition_0);
729 val64 = 0;
730 break;
731 case 3:
732 writeq(val64, &bar0->tx_fifo_partition_1);
733 val64 = 0;
734 break;
735 case 5:
736 writeq(val64, &bar0->tx_fifo_partition_2);
737 val64 = 0;
738 break;
739 case 7:
740 writeq(val64, &bar0->tx_fifo_partition_3);
741 break;
742 }
743 }
744
745 /* Enable Tx FIFO partition 0. */
746 val64 = readq(&bar0->tx_fifo_partition_0);
747 val64 |= BIT(0); /* To enable the FIFO partition. */
748 writeq(val64, &bar0->tx_fifo_partition_0);
749
750 val64 = readq(&bar0->tx_fifo_partition_0);
751 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
752 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
753
754 /*
755 * Initialization of Tx_PA_CONFIG register to ignore packet
756 * integrity checking.
757 */
758 val64 = readq(&bar0->tx_pa_cfg);
759 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
760 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
761 writeq(val64, &bar0->tx_pa_cfg);
762
763 /* Rx DMA intialization. */
764 val64 = 0;
765 for (i = 0; i < config->rx_ring_num; i++) {
766 val64 |=
767 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
768 3);
769 }
770 writeq(val64, &bar0->rx_queue_priority);
771
772 /*
773 * Allocating equal share of memory to all the
774 * configured Rings.
775 */
776 val64 = 0;
777 for (i = 0; i < config->rx_ring_num; i++) {
778 switch (i) {
779 case 0:
780 mem_share = (64 / config->rx_ring_num +
781 64 % config->rx_ring_num);
782 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
783 continue;
784 case 1:
785 mem_share = (64 / config->rx_ring_num);
786 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
787 continue;
788 case 2:
789 mem_share = (64 / config->rx_ring_num);
790 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
791 continue;
792 case 3:
793 mem_share = (64 / config->rx_ring_num);
794 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
795 continue;
796 case 4:
797 mem_share = (64 / config->rx_ring_num);
798 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
799 continue;
800 case 5:
801 mem_share = (64 / config->rx_ring_num);
802 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
803 continue;
804 case 6:
805 mem_share = (64 / config->rx_ring_num);
806 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
807 continue;
808 case 7:
809 mem_share = (64 / config->rx_ring_num);
810 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
811 continue;
812 }
813 }
814 writeq(val64, &bar0->rx_queue_cfg);
815
816 /*
817 * Initializing the Tx round robin registers to 0.
818 * Filling Tx and Rx round robin registers as per the
819 * number of FIFOs and Rings is still TODO.
820 */
821 writeq(0, &bar0->tx_w_round_robin_0);
822 writeq(0, &bar0->tx_w_round_robin_1);
823 writeq(0, &bar0->tx_w_round_robin_2);
824 writeq(0, &bar0->tx_w_round_robin_3);
825 writeq(0, &bar0->tx_w_round_robin_4);
826
827 /*
828 * TODO
829 * Disable Rx steering. Hard coding all packets be steered to
830 * Queue 0 for now.
831 */
832 val64 = 0x8080808080808080ULL;
833 writeq(val64, &bar0->rts_qos_steering);
834
835 /* UDP Fix */
836 val64 = 0;
837 for (i = 1; i < 8; i++)
838 writeq(val64, &bar0->rts_frm_len_n[i]);
839
840 /* Set rts_frm_len register for fifo 0 */
841 writeq(MAC_RTS_FRM_LEN_SET(dev->mtu + 22),
842 &bar0->rts_frm_len_n[0]);
843
844 /* Enable statistics */
845 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
846 val64 = SET_UPDT_PERIOD(Stats_refresh_time) |
847 STAT_CFG_STAT_RO | STAT_CFG_STAT_EN;
848 writeq(val64, &bar0->stat_cfg);
849
850 /*
851 * Initializing the sampling rate for the device to calculate the
852 * bandwidth utilization.
853 */
854 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
855 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
856 writeq(val64, &bar0->mac_link_util);
857
858
859 /*
860 * Initializing the Transmit and Receive Traffic Interrupt
861 * Scheme.
862 */
863 /* TTI Initialization. Default Tx timer gets us about
864 * 250 interrupts per sec. Continuous interrupts are enabled
865 * by default.
866 */
867 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078) |
868 TTI_DATA1_MEM_TX_URNG_A(0xA) |
869 TTI_DATA1_MEM_TX_URNG_B(0x10) |
870 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN |
871 TTI_DATA1_MEM_TX_TIMER_CI_EN;
872 writeq(val64, &bar0->tti_data1_mem);
873
874 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
875 TTI_DATA2_MEM_TX_UFC_B(0x20) |
876 TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
877 writeq(val64, &bar0->tti_data2_mem);
878
879 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
880 writeq(val64, &bar0->tti_command_mem);
881
882 /*
883 * Once the operation completes, the Strobe bit of the command
884 * register will be reset. We poll for this particular condition
885 * We wait for a maximum of 500ms for the operation to complete,
886 * if it's not complete by then we return error.
887 */
888 time = 0;
889 while (TRUE) {
890 val64 = readq(&bar0->tti_command_mem);
891 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
892 break;
893 }
894 if (time > 10) {
895 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
896 dev->name);
897 return -1;
898 }
899 msleep(50);
900 time++;
901 }
902
903 /* RTI Initialization */
904 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF) |
905 RTI_DATA1_MEM_RX_URNG_A(0xA) |
906 RTI_DATA1_MEM_RX_URNG_B(0x10) |
907 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
908
909 writeq(val64, &bar0->rti_data1_mem);
910
911 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
912 RTI_DATA2_MEM_RX_UFC_B(0x2) |
913 RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
914 writeq(val64, &bar0->rti_data2_mem);
915
916 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD;
917 writeq(val64, &bar0->rti_command_mem);
918
919 /*
920 * Once the operation completes, the Strobe bit of the command
921 * register will be reset. We poll for this particular condition
922 * We wait for a maximum of 500ms for the operation to complete,
923 * if it's not complete by then we return error.
924 */
925 time = 0;
926 while (TRUE) {
927 val64 = readq(&bar0->rti_command_mem);
928 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
929 break;
930 }
931 if (time > 10) {
932 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
933 dev->name);
934 return -1;
935 }
936 time++;
937 msleep(50);
938 }
939
940 /*
941 * Initializing proper values as Pause threshold into all
942 * the 8 Queues on Rx side.
943 */
944 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
945 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
946
947 /* Disable RMAC PAD STRIPPING */
948 add = &bar0->mac_cfg;
949 val64 = readq(&bar0->mac_cfg);
950 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
951 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
952 writel((u32) (val64), add);
953 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
954 writel((u32) (val64 >> 32), (add + 4));
955 val64 = readq(&bar0->mac_cfg);
956
957 /*
958 * Set the time value to be inserted in the pause frame
959 * generated by xena.
960 */
961 val64 = readq(&bar0->rmac_pause_cfg);
962 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
963 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
964 writeq(val64, &bar0->rmac_pause_cfg);
965
966 /*
967 * Set the Threshold Limit for Generating the pause frame
968 * If the amount of data in any Queue exceeds ratio of
969 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
970 * pause frame is generated
971 */
972 val64 = 0;
973 for (i = 0; i < 4; i++) {
974 val64 |=
975 (((u64) 0xFF00 | nic->mac_control.
976 mc_pause_threshold_q0q3)
977 << (i * 2 * 8));
978 }
979 writeq(val64, &bar0->mc_pause_thresh_q0q3);
980
981 val64 = 0;
982 for (i = 0; i < 4; i++) {
983 val64 |=
984 (((u64) 0xFF00 | nic->mac_control.
985 mc_pause_threshold_q4q7)
986 << (i * 2 * 8));
987 }
988 writeq(val64, &bar0->mc_pause_thresh_q4q7);
989
990 /*
991 * TxDMA will stop Read request if the number of read split has
992 * exceeded the limit pointed by shared_splits
993 */
994 val64 = readq(&bar0->pic_control);
995 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
996 writeq(val64, &bar0->pic_control);
997
998 return SUCCESS;
999}
1000
1001/**
1002 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1003 * @nic: device private variable,
1004 * @mask: A mask indicating which Intr block must be modified and,
1005 * @flag: A flag indicating whether to enable or disable the Intrs.
1006 * Description: This function will either disable or enable the interrupts
1007 * depending on the flag argument. The mask argument can be used to
1008 * enable/disable any Intr block.
1009 * Return Value: NONE.
1010 */
1011
1012static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1013{
1014 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1015 register u64 val64 = 0, temp64 = 0;
1016
1017 /* Top level interrupt classification */
1018 /* PIC Interrupts */
1019 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1020 /* Enable PIC Intrs in the general intr mask register */
1021 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1022 if (flag == ENABLE_INTRS) {
1023 temp64 = readq(&bar0->general_int_mask);
1024 temp64 &= ~((u64) val64);
1025 writeq(temp64, &bar0->general_int_mask);
1026 /*
1027 * Disabled all PCIX, Flash, MDIO, IIC and GPIO
1028 * interrupts for now.
1029 * TODO
1030 */
1031 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1032 /*
1033 * No MSI Support is available presently, so TTI and
1034 * RTI interrupts are also disabled.
1035 */
1036 } else if (flag == DISABLE_INTRS) {
1037 /*
1038 * Disable PIC Intrs in the general
1039 * intr mask register
1040 */
1041 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1042 temp64 = readq(&bar0->general_int_mask);
1043 val64 |= temp64;
1044 writeq(val64, &bar0->general_int_mask);
1045 }
1046 }
1047
1048 /* DMA Interrupts */
1049 /* Enabling/Disabling Tx DMA interrupts */
1050 if (mask & TX_DMA_INTR) {
1051 /* Enable TxDMA Intrs in the general intr mask register */
1052 val64 = TXDMA_INT_M;
1053 if (flag == ENABLE_INTRS) {
1054 temp64 = readq(&bar0->general_int_mask);
1055 temp64 &= ~((u64) val64);
1056 writeq(temp64, &bar0->general_int_mask);
1057 /*
1058 * Keep all interrupts other than PFC interrupt
1059 * and PCC interrupt disabled in DMA level.
1060 */
1061 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1062 TXDMA_PCC_INT_M);
1063 writeq(val64, &bar0->txdma_int_mask);
1064 /*
1065 * Enable only the MISC error 1 interrupt in PFC block
1066 */
1067 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1068 writeq(val64, &bar0->pfc_err_mask);
1069 /*
1070 * Enable only the FB_ECC error interrupt in PCC block
1071 */
1072 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1073 writeq(val64, &bar0->pcc_err_mask);
1074 } else if (flag == DISABLE_INTRS) {
1075 /*
1076 * Disable TxDMA Intrs in the general intr mask
1077 * register
1078 */
1079 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1080 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1081 temp64 = readq(&bar0->general_int_mask);
1082 val64 |= temp64;
1083 writeq(val64, &bar0->general_int_mask);
1084 }
1085 }
1086
1087 /* Enabling/Disabling Rx DMA interrupts */
1088 if (mask & RX_DMA_INTR) {
1089 /* Enable RxDMA Intrs in the general intr mask register */
1090 val64 = RXDMA_INT_M;
1091 if (flag == ENABLE_INTRS) {
1092 temp64 = readq(&bar0->general_int_mask);
1093 temp64 &= ~((u64) val64);
1094 writeq(temp64, &bar0->general_int_mask);
1095 /*
1096 * All RxDMA block interrupts are disabled for now
1097 * TODO
1098 */
1099 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1100 } else if (flag == DISABLE_INTRS) {
1101 /*
1102 * Disable RxDMA Intrs in the general intr mask
1103 * register
1104 */
1105 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1106 temp64 = readq(&bar0->general_int_mask);
1107 val64 |= temp64;
1108 writeq(val64, &bar0->general_int_mask);
1109 }
1110 }
1111
1112 /* MAC Interrupts */
1113 /* Enabling/Disabling MAC interrupts */
1114 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1115 val64 = TXMAC_INT_M | RXMAC_INT_M;
1116 if (flag == ENABLE_INTRS) {
1117 temp64 = readq(&bar0->general_int_mask);
1118 temp64 &= ~((u64) val64);
1119 writeq(temp64, &bar0->general_int_mask);
1120 /*
1121 * All MAC block error interrupts are disabled for now
1122 * except the link status change interrupt.
1123 * TODO
1124 */
1125 val64 = MAC_INT_STATUS_RMAC_INT;
1126 temp64 = readq(&bar0->mac_int_mask);
1127 temp64 &= ~((u64) val64);
1128 writeq(temp64, &bar0->mac_int_mask);
1129
1130 val64 = readq(&bar0->mac_rmac_err_mask);
1131 val64 &= ~((u64) RMAC_LINK_STATE_CHANGE_INT);
1132 writeq(val64, &bar0->mac_rmac_err_mask);
1133 } else if (flag == DISABLE_INTRS) {
1134 /*
1135 * Disable MAC Intrs in the general intr mask register
1136 */
1137 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1138 writeq(DISABLE_ALL_INTRS,
1139 &bar0->mac_rmac_err_mask);
1140
1141 temp64 = readq(&bar0->general_int_mask);
1142 val64 |= temp64;
1143 writeq(val64, &bar0->general_int_mask);
1144 }
1145 }
1146
1147 /* XGXS Interrupts */
1148 if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1149 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1150 if (flag == ENABLE_INTRS) {
1151 temp64 = readq(&bar0->general_int_mask);
1152 temp64 &= ~((u64) val64);
1153 writeq(temp64, &bar0->general_int_mask);
1154 /*
1155 * All XGXS block error interrupts are disabled for now
1156 * TODO
1157 */
1158 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1159 } else if (flag == DISABLE_INTRS) {
1160 /*
1161 * Disable MC Intrs in the general intr mask register
1162 */
1163 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1164 temp64 = readq(&bar0->general_int_mask);
1165 val64 |= temp64;
1166 writeq(val64, &bar0->general_int_mask);
1167 }
1168 }
1169
1170 /* Memory Controller(MC) interrupts */
1171 if (mask & MC_INTR) {
1172 val64 = MC_INT_M;
1173 if (flag == ENABLE_INTRS) {
1174 temp64 = readq(&bar0->general_int_mask);
1175 temp64 &= ~((u64) val64);
1176 writeq(temp64, &bar0->general_int_mask);
1177 /*
1178 * All MC block error interrupts are disabled for now
1179 * TODO
1180 */
1181 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1182 } else if (flag == DISABLE_INTRS) {
1183 /*
1184 * Disable MC Intrs in the general intr mask register
1185 */
1186 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1187 temp64 = readq(&bar0->general_int_mask);
1188 val64 |= temp64;
1189 writeq(val64, &bar0->general_int_mask);
1190 }
1191 }
1192
1193
1194 /* Tx traffic interrupts */
1195 if (mask & TX_TRAFFIC_INTR) {
1196 val64 = TXTRAFFIC_INT_M;
1197 if (flag == ENABLE_INTRS) {
1198 temp64 = readq(&bar0->general_int_mask);
1199 temp64 &= ~((u64) val64);
1200 writeq(temp64, &bar0->general_int_mask);
1201 /*
1202 * Enable all the Tx side interrupts
1203 * writing 0 Enables all 64 TX interrupt levels
1204 */
1205 writeq(0x0, &bar0->tx_traffic_mask);
1206 } else if (flag == DISABLE_INTRS) {
1207 /*
1208 * Disable Tx Traffic Intrs in the general intr mask
1209 * register.
1210 */
1211 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1212 temp64 = readq(&bar0->general_int_mask);
1213 val64 |= temp64;
1214 writeq(val64, &bar0->general_int_mask);
1215 }
1216 }
1217
1218 /* Rx traffic interrupts */
1219 if (mask & RX_TRAFFIC_INTR) {
1220 val64 = RXTRAFFIC_INT_M;
1221 if (flag == ENABLE_INTRS) {
1222 temp64 = readq(&bar0->general_int_mask);
1223 temp64 &= ~((u64) val64);
1224 writeq(temp64, &bar0->general_int_mask);
1225 /* writing 0 Enables all 8 RX interrupt levels */
1226 writeq(0x0, &bar0->rx_traffic_mask);
1227 } else if (flag == DISABLE_INTRS) {
1228 /*
1229 * Disable Rx Traffic Intrs in the general intr mask
1230 * register.
1231 */
1232 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1233 temp64 = readq(&bar0->general_int_mask);
1234 val64 |= temp64;
1235 writeq(val64, &bar0->general_int_mask);
1236 }
1237 }
1238}
1239
1240/**
1241 * verify_xena_quiescence - Checks whether the H/W is ready
1242 * @val64 : Value read from adapter status register.
1243 * @flag : indicates if the adapter enable bit was ever written once
1244 * before.
1245 * Description: Returns whether the H/W is ready to go or not. Depending
1246 * on whether adapter enable bit was written or not the comparison
1247 * differs and the calling function passes the input argument flag to
1248 * indicate this.
1249 * Return: 1 If xena is quiescence
1250 * 0 If Xena is not quiescence
1251 */
1252
1253static int verify_xena_quiescence(u64 val64, int flag)
1254{
1255 int ret = 0;
1256 u64 tmp64 = ~((u64) val64);
1257
1258 if (!
1259 (tmp64 &
1260 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1261 ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1262 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1263 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1264 ADAPTER_STATUS_P_PLL_LOCK))) {
1265 if (flag == FALSE) {
1266 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1267 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1268 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1269
1270 ret = 1;
1271
1272 }
1273 } else {
1274 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1275 ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1276 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1277 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1278 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1279
1280 ret = 1;
1281
1282 }
1283 }
1284 }
1285
1286 return ret;
1287}
1288
1289/**
1290 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1291 * @sp: Pointer to device specifc structure
1292 * Description :
1293 * New procedure to clear mac address reading problems on Alpha platforms
1294 *
1295 */
1296
1297static void fix_mac_address(nic_t * sp)
1298{
1299 XENA_dev_config_t __iomem *bar0 = sp->bar0;
1300 u64 val64;
1301 int i = 0;
1302
1303 while (fix_mac[i] != END_SIGN) {
1304 writeq(fix_mac[i++], &bar0->gpio_control);
1305 val64 = readq(&bar0->gpio_control);
1306 }
1307}
1308
1309/**
1310 * start_nic - Turns the device on
1311 * @nic : device private variable.
1312 * Description:
1313 * This function actually turns the device on. Before this function is
1314 * called,all Registers are configured from their reset states
1315 * and shared memory is allocated but the NIC is still quiescent. On
1316 * calling this function, the device interrupts are cleared and the NIC is
1317 * literally switched on by writing into the adapter control register.
1318 * Return Value:
1319 * SUCCESS on success and -1 on failure.
1320 */
1321
1322static int start_nic(struct s2io_nic *nic)
1323{
1324 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1325 struct net_device *dev = nic->dev;
1326 register u64 val64 = 0;
1327 u16 interruptible, i;
1328 u16 subid;
1329 mac_info_t *mac_control;
1330 struct config_param *config;
1331
1332 mac_control = &nic->mac_control;
1333 config = &nic->config;
1334
1335 /* PRC Initialization and configuration */
1336 for (i = 0; i < config->rx_ring_num; i++) {
1337 writeq((u64) nic->rx_blocks[i][0].block_dma_addr,
1338 &bar0->prc_rxd0_n[i]);
1339
1340 val64 = readq(&bar0->prc_ctrl_n[i]);
1341#ifndef CONFIG_2BUFF_MODE
1342 val64 |= PRC_CTRL_RC_ENABLED;
1343#else
1344 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1345#endif
1346 writeq(val64, &bar0->prc_ctrl_n[i]);
1347 }
1348
1349#ifdef CONFIG_2BUFF_MODE
1350 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1351 val64 = readq(&bar0->rx_pa_cfg);
1352 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1353 writeq(val64, &bar0->rx_pa_cfg);
1354#endif
1355
1356 /*
1357 * Enabling MC-RLDRAM. After enabling the device, we timeout
1358 * for around 100ms, which is approximately the time required
1359 * for the device to be ready for operation.
1360 */
1361 val64 = readq(&bar0->mc_rldram_mrs);
1362 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1363 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1364 val64 = readq(&bar0->mc_rldram_mrs);
1365
1366 msleep(100); /* Delay by around 100 ms. */
1367
1368 /* Enabling ECC Protection. */
1369 val64 = readq(&bar0->adapter_control);
1370 val64 &= ~ADAPTER_ECC_EN;
1371 writeq(val64, &bar0->adapter_control);
1372
1373 /*
1374 * Clearing any possible Link state change interrupts that
1375 * could have popped up just before Enabling the card.
1376 */
1377 val64 = readq(&bar0->mac_rmac_err_reg);
1378 if (val64)
1379 writeq(val64, &bar0->mac_rmac_err_reg);
1380
1381 /*
1382 * Verify if the device is ready to be enabled, if so enable
1383 * it.
1384 */
1385 val64 = readq(&bar0->adapter_status);
1386 if (!verify_xena_quiescence(val64, nic->device_enabled_once)) {
1387 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1388 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1389 (unsigned long long) val64);
1390 return FAILURE;
1391 }
1392
1393 /* Enable select interrupts */
1394 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1395 RX_MAC_INTR;
1396 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
1397
1398 /*
1399 * With some switches, link might be already up at this point.
1400 * Because of this weird behavior, when we enable laser,
1401 * we may not get link. We need to handle this. We cannot
1402 * figure out which switch is misbehaving. So we are forced to
1403 * make a global change.
1404 */
1405
1406 /* Enabling Laser. */
1407 val64 = readq(&bar0->adapter_control);
1408 val64 |= ADAPTER_EOI_TX_ON;
1409 writeq(val64, &bar0->adapter_control);
1410
1411 /* SXE-002: Initialize link and activity LED */
1412 subid = nic->pdev->subsystem_device;
1413 if ((subid & 0xFF) >= 0x07) {
1414 val64 = readq(&bar0->gpio_control);
1415 val64 |= 0x0000800000000000ULL;
1416 writeq(val64, &bar0->gpio_control);
1417 val64 = 0x0411040400000000ULL;
1418 writeq(val64, (void __iomem *) bar0 + 0x2700);
1419 }
1420
1421 /*
1422 * Don't see link state interrupts on certain switches, so
1423 * directly scheduling a link state task from here.
1424 */
1425 schedule_work(&nic->set_link_task);
1426
1427 /*
1428 * Here we are performing soft reset on XGXS to
1429 * force link down. Since link is already up, we will get
1430 * link state change interrupt after this reset
1431 */
1432 SPECIAL_REG_WRITE(0x80010515001E0000ULL, &bar0->dtx_control, UF);
1433 val64 = readq(&bar0->dtx_control);
1434 udelay(50);
1435 SPECIAL_REG_WRITE(0x80010515001E00E0ULL, &bar0->dtx_control, UF);
1436 val64 = readq(&bar0->dtx_control);
1437 udelay(50);
1438 SPECIAL_REG_WRITE(0x80070515001F00E4ULL, &bar0->dtx_control, UF);
1439 val64 = readq(&bar0->dtx_control);
1440 udelay(50);
1441
1442 return SUCCESS;
1443}
1444
1445/**
1446 * free_tx_buffers - Free all queued Tx buffers
1447 * @nic : device private variable.
1448 * Description:
1449 * Free all queued Tx buffers.
1450 * Return Value: void
1451*/
1452
1453static void free_tx_buffers(struct s2io_nic *nic)
1454{
1455 struct net_device *dev = nic->dev;
1456 struct sk_buff *skb;
1457 TxD_t *txdp;
1458 int i, j;
1459 mac_info_t *mac_control;
1460 struct config_param *config;
1461 int cnt = 0;
1462
1463 mac_control = &nic->mac_control;
1464 config = &nic->config;
1465
1466 for (i = 0; i < config->tx_fifo_num; i++) {
1467 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
1468 txdp = (TxD_t *) nic->list_info[i][j].
1469 list_virt_addr;
1470 skb =
1471 (struct sk_buff *) ((unsigned long) txdp->
1472 Host_Control);
1473 if (skb == NULL) {
1474 memset(txdp, 0, sizeof(TxD_t));
1475 continue;
1476 }
1477 dev_kfree_skb(skb);
1478 memset(txdp, 0, sizeof(TxD_t));
1479 cnt++;
1480 }
1481 DBG_PRINT(INTR_DBG,
1482 "%s:forcibly freeing %d skbs on FIFO%d\n",
1483 dev->name, cnt, i);
1484 mac_control->tx_curr_get_info[i].offset = 0;
1485 mac_control->tx_curr_put_info[i].offset = 0;
1486 }
1487}
1488
1489/**
1490 * stop_nic - To stop the nic
1491 * @nic ; device private variable.
1492 * Description:
1493 * This function does exactly the opposite of what the start_nic()
1494 * function does. This function is called to stop the device.
1495 * Return Value:
1496 * void.
1497 */
1498
1499static void stop_nic(struct s2io_nic *nic)
1500{
1501 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1502 register u64 val64 = 0;
1503 u16 interruptible, i;
1504 mac_info_t *mac_control;
1505 struct config_param *config;
1506
1507 mac_control = &nic->mac_control;
1508 config = &nic->config;
1509
1510 /* Disable all interrupts */
1511 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1512 RX_MAC_INTR;
1513 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
1514
1515 /* Disable PRCs */
1516 for (i = 0; i < config->rx_ring_num; i++) {
1517 val64 = readq(&bar0->prc_ctrl_n[i]);
1518 val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
1519 writeq(val64, &bar0->prc_ctrl_n[i]);
1520 }
1521}
1522
1523/**
1524 * fill_rx_buffers - Allocates the Rx side skbs
1525 * @nic: device private variable
1526 * @ring_no: ring number
1527 * Description:
1528 * The function allocates Rx side skbs and puts the physical
1529 * address of these buffers into the RxD buffer pointers, so that the NIC
1530 * can DMA the received frame into these locations.
1531 * The NIC supports 3 receive modes, viz
1532 * 1. single buffer,
1533 * 2. three buffer and
1534 * 3. Five buffer modes.
1535 * Each mode defines how many fragments the received frame will be split
1536 * up into by the NIC. The frame is split into L3 header, L4 Header,
1537 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
1538 * is split into 3 fragments. As of now only single buffer mode is
1539 * supported.
1540 * Return Value:
1541 * SUCCESS on success or an appropriate -ve value on failure.
1542 */
1543
1544static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1545{
1546 struct net_device *dev = nic->dev;
1547 struct sk_buff *skb;
1548 RxD_t *rxdp;
1549 int off, off1, size, block_no, block_no1;
1550 int offset, offset1;
1551 u32 alloc_tab = 0;
1552 u32 alloc_cnt = nic->pkt_cnt[ring_no] -
1553 atomic_read(&nic->rx_bufs_left[ring_no]);
1554 mac_info_t *mac_control;
1555 struct config_param *config;
1556#ifdef CONFIG_2BUFF_MODE
1557 RxD_t *rxdpnext;
1558 int nextblk;
1559 unsigned long tmp;
1560 buffAdd_t *ba;
1561 dma_addr_t rxdpphys;
1562#endif
1563#ifndef CONFIG_S2IO_NAPI
1564 unsigned long flags;
1565#endif
1566
1567 mac_control = &nic->mac_control;
1568 config = &nic->config;
1569
1570 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
1571 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
1572
1573 while (alloc_tab < alloc_cnt) {
1574 block_no = mac_control->rx_curr_put_info[ring_no].
1575 block_index;
1576 block_no1 = mac_control->rx_curr_get_info[ring_no].
1577 block_index;
1578 off = mac_control->rx_curr_put_info[ring_no].offset;
1579 off1 = mac_control->rx_curr_get_info[ring_no].offset;
1580#ifndef CONFIG_2BUFF_MODE
1581 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
1582 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
1583#else
1584 offset = block_no * (MAX_RXDS_PER_BLOCK) + off;
1585 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
1586#endif
1587
1588 rxdp = nic->rx_blocks[ring_no][block_no].
1589 block_virt_addr + off;
1590 if ((offset == offset1) && (rxdp->Host_Control)) {
1591 DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name);
1592 DBG_PRINT(INTR_DBG, " info equated\n");
1593 goto end;
1594 }
1595#ifndef CONFIG_2BUFF_MODE
1596 if (rxdp->Control_1 == END_OF_BLOCK) {
1597 mac_control->rx_curr_put_info[ring_no].
1598 block_index++;
1599 mac_control->rx_curr_put_info[ring_no].
1600 block_index %= nic->block_count[ring_no];
1601 block_no = mac_control->rx_curr_put_info
1602 [ring_no].block_index;
1603 off++;
1604 off %= (MAX_RXDS_PER_BLOCK + 1);
1605 mac_control->rx_curr_put_info[ring_no].offset =
1606 off;
1607 rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
1608 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
1609 dev->name, rxdp);
1610 }
1611#ifndef CONFIG_S2IO_NAPI
1612 spin_lock_irqsave(&nic->put_lock, flags);
1613 nic->put_pos[ring_no] =
1614 (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off;
1615 spin_unlock_irqrestore(&nic->put_lock, flags);
1616#endif
1617#else
1618 if (rxdp->Host_Control == END_OF_BLOCK) {
1619 mac_control->rx_curr_put_info[ring_no].
1620 block_index++;
1621 mac_control->rx_curr_put_info[ring_no].
1622 block_index %= nic->block_count[ring_no];
1623 block_no = mac_control->rx_curr_put_info
1624 [ring_no].block_index;
1625 off = 0;
1626 DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
1627 dev->name, block_no,
1628 (unsigned long long) rxdp->Control_1);
1629 mac_control->rx_curr_put_info[ring_no].offset =
1630 off;
1631 rxdp = nic->rx_blocks[ring_no][block_no].
1632 block_virt_addr;
1633 }
1634#ifndef CONFIG_S2IO_NAPI
1635 spin_lock_irqsave(&nic->put_lock, flags);
1636 nic->put_pos[ring_no] = (block_no *
1637 (MAX_RXDS_PER_BLOCK + 1)) + off;
1638 spin_unlock_irqrestore(&nic->put_lock, flags);
1639#endif
1640#endif
1641
1642#ifndef CONFIG_2BUFF_MODE
1643 if (rxdp->Control_1 & RXD_OWN_XENA)
1644#else
1645 if (rxdp->Control_2 & BIT(0))
1646#endif
1647 {
1648 mac_control->rx_curr_put_info[ring_no].
1649 offset = off;
1650 goto end;
1651 }
1652#ifdef CONFIG_2BUFF_MODE
1653 /*
1654 * RxDs Spanning cache lines will be replenished only
1655 * if the succeeding RxD is also owned by Host. It
1656 * will always be the ((8*i)+3) and ((8*i)+6)
1657 * descriptors for the 48 byte descriptor. The offending
1658 * decsriptor is of-course the 3rd descriptor.
1659 */
1660 rxdpphys = nic->rx_blocks[ring_no][block_no].
1661 block_dma_addr + (off * sizeof(RxD_t));
1662 if (((u64) (rxdpphys)) % 128 > 80) {
1663 rxdpnext = nic->rx_blocks[ring_no][block_no].
1664 block_virt_addr + (off + 1);
1665 if (rxdpnext->Host_Control == END_OF_BLOCK) {
1666 nextblk = (block_no + 1) %
1667 (nic->block_count[ring_no]);
1668 rxdpnext = nic->rx_blocks[ring_no]
1669 [nextblk].block_virt_addr;
1670 }
1671 if (rxdpnext->Control_2 & BIT(0))
1672 goto end;
1673 }
1674#endif
1675
1676#ifndef CONFIG_2BUFF_MODE
1677 skb = dev_alloc_skb(size + NET_IP_ALIGN);
1678#else
1679 skb = dev_alloc_skb(dev->mtu + ALIGN_SIZE + BUF0_LEN + 4);
1680#endif
1681 if (!skb) {
1682 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
1683 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
1684 return -ENOMEM;
1685 }
1686#ifndef CONFIG_2BUFF_MODE
1687 skb_reserve(skb, NET_IP_ALIGN);
1688 memset(rxdp, 0, sizeof(RxD_t));
1689 rxdp->Buffer0_ptr = pci_map_single
1690 (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
1691 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
1692 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
1693 rxdp->Host_Control = (unsigned long) (skb);
1694 rxdp->Control_1 |= RXD_OWN_XENA;
1695 off++;
1696 off %= (MAX_RXDS_PER_BLOCK + 1);
1697 mac_control->rx_curr_put_info[ring_no].offset = off;
1698#else
1699 ba = &nic->ba[ring_no][block_no][off];
1700 skb_reserve(skb, BUF0_LEN);
1701 tmp = (unsigned long) skb->data;
1702 tmp += ALIGN_SIZE;
1703 tmp &= ~ALIGN_SIZE;
1704 skb->data = (void *) tmp;
1705 skb->tail = (void *) tmp;
1706
1707 memset(rxdp, 0, sizeof(RxD_t));
1708 rxdp->Buffer2_ptr = pci_map_single
1709 (nic->pdev, skb->data, dev->mtu + BUF0_LEN + 4,
1710 PCI_DMA_FROMDEVICE);
1711 rxdp->Buffer0_ptr =
1712 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
1713 PCI_DMA_FROMDEVICE);
1714 rxdp->Buffer1_ptr =
1715 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
1716 PCI_DMA_FROMDEVICE);
1717
1718 rxdp->Control_2 = SET_BUFFER2_SIZE(dev->mtu + 4);
1719 rxdp->Control_2 |= SET_BUFFER0_SIZE(BUF0_LEN);
1720 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
1721 rxdp->Control_2 |= BIT(0); /* Set Buffer_Empty bit. */
1722 rxdp->Host_Control = (u64) ((unsigned long) (skb));
1723 rxdp->Control_1 |= RXD_OWN_XENA;
1724 off++;
1725 mac_control->rx_curr_put_info[ring_no].offset = off;
1726#endif
1727 atomic_inc(&nic->rx_bufs_left[ring_no]);
1728 alloc_tab++;
1729 }
1730
1731 end:
1732 return SUCCESS;
1733}
1734
1735/**
1736 * free_rx_buffers - Frees all Rx buffers
1737 * @sp: device private variable.
1738 * Description:
1739 * This function will free all Rx buffers allocated by host.
1740 * Return Value:
1741 * NONE.
1742 */
1743
1744static void free_rx_buffers(struct s2io_nic *sp)
1745{
1746 struct net_device *dev = sp->dev;
1747 int i, j, blk = 0, off, buf_cnt = 0;
1748 RxD_t *rxdp;
1749 struct sk_buff *skb;
1750 mac_info_t *mac_control;
1751 struct config_param *config;
1752#ifdef CONFIG_2BUFF_MODE
1753 buffAdd_t *ba;
1754#endif
1755
1756 mac_control = &sp->mac_control;
1757 config = &sp->config;
1758
1759 for (i = 0; i < config->rx_ring_num; i++) {
1760 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) {
1761 off = j % (MAX_RXDS_PER_BLOCK + 1);
1762 rxdp = sp->rx_blocks[i][blk].block_virt_addr + off;
1763
1764#ifndef CONFIG_2BUFF_MODE
1765 if (rxdp->Control_1 == END_OF_BLOCK) {
1766 rxdp =
1767 (RxD_t *) ((unsigned long) rxdp->
1768 Control_2);
1769 j++;
1770 blk++;
1771 }
1772#else
1773 if (rxdp->Host_Control == END_OF_BLOCK) {
1774 blk++;
1775 continue;
1776 }
1777#endif
1778
1779 if (!(rxdp->Control_1 & RXD_OWN_XENA)) {
1780 memset(rxdp, 0, sizeof(RxD_t));
1781 continue;
1782 }
1783
1784 skb =
1785 (struct sk_buff *) ((unsigned long) rxdp->
1786 Host_Control);
1787 if (skb) {
1788#ifndef CONFIG_2BUFF_MODE
1789 pci_unmap_single(sp->pdev, (dma_addr_t)
1790 rxdp->Buffer0_ptr,
1791 dev->mtu +
1792 HEADER_ETHERNET_II_802_3_SIZE
1793 + HEADER_802_2_SIZE +
1794 HEADER_SNAP_SIZE,
1795 PCI_DMA_FROMDEVICE);
1796#else
1797 ba = &sp->ba[i][blk][off];
1798 pci_unmap_single(sp->pdev, (dma_addr_t)
1799 rxdp->Buffer0_ptr,
1800 BUF0_LEN,
1801 PCI_DMA_FROMDEVICE);
1802 pci_unmap_single(sp->pdev, (dma_addr_t)
1803 rxdp->Buffer1_ptr,
1804 BUF1_LEN,
1805 PCI_DMA_FROMDEVICE);
1806 pci_unmap_single(sp->pdev, (dma_addr_t)
1807 rxdp->Buffer2_ptr,
1808 dev->mtu + BUF0_LEN + 4,
1809 PCI_DMA_FROMDEVICE);
1810#endif
1811 dev_kfree_skb(skb);
1812 atomic_dec(&sp->rx_bufs_left[i]);
1813 buf_cnt++;
1814 }
1815 memset(rxdp, 0, sizeof(RxD_t));
1816 }
1817 mac_control->rx_curr_put_info[i].block_index = 0;
1818 mac_control->rx_curr_get_info[i].block_index = 0;
1819 mac_control->rx_curr_put_info[i].offset = 0;
1820 mac_control->rx_curr_get_info[i].offset = 0;
1821 atomic_set(&sp->rx_bufs_left[i], 0);
1822 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
1823 dev->name, buf_cnt, i);
1824 }
1825}
1826
1827/**
1828 * s2io_poll - Rx interrupt handler for NAPI support
1829 * @dev : pointer to the device structure.
1830 * @budget : The number of packets that were budgeted to be processed
1831 * during one pass through the 'Poll" function.
1832 * Description:
1833 * Comes into picture only if NAPI support has been incorporated. It does
1834 * the same thing that rx_intr_handler does, but not in a interrupt context
1835 * also It will process only a given number of packets.
1836 * Return value:
1837 * 0 on success and 1 if there are No Rx packets to be processed.
1838 */
1839
1840#ifdef CONFIG_S2IO_NAPI
1841static int s2io_poll(struct net_device *dev, int *budget)
1842{
1843 nic_t *nic = dev->priv;
1844 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1845 int pkts_to_process = *budget, pkt_cnt = 0;
1846 register u64 val64 = 0;
1847 rx_curr_get_info_t get_info, put_info;
1848 int i, get_block, put_block, get_offset, put_offset, ring_bufs;
1849#ifndef CONFIG_2BUFF_MODE
1850 u16 val16, cksum;
1851#endif
1852 struct sk_buff *skb;
1853 RxD_t *rxdp;
1854 mac_info_t *mac_control;
1855 struct config_param *config;
1856#ifdef CONFIG_2BUFF_MODE
1857 buffAdd_t *ba;
1858#endif
1859
1860 mac_control = &nic->mac_control;
1861 config = &nic->config;
1862
1863 if (pkts_to_process > dev->quota)
1864 pkts_to_process = dev->quota;
1865
1866 val64 = readq(&bar0->rx_traffic_int);
1867 writeq(val64, &bar0->rx_traffic_int);
1868
1869 for (i = 0; i < config->rx_ring_num; i++) {
1870 get_info = mac_control->rx_curr_get_info[i];
1871 get_block = get_info.block_index;
1872 put_info = mac_control->rx_curr_put_info[i];
1873 put_block = put_info.block_index;
1874 ring_bufs = config->rx_cfg[i].num_rxd;
1875 rxdp = nic->rx_blocks[i][get_block].block_virt_addr +
1876 get_info.offset;
1877#ifndef CONFIG_2BUFF_MODE
1878 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1879 get_info.offset;
1880 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
1881 put_info.offset;
1882 while ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
1883 (((get_offset + 1) % ring_bufs) != put_offset)) {
1884 if (--pkts_to_process < 0) {
1885 goto no_rx;
1886 }
1887 if (rxdp->Control_1 == END_OF_BLOCK) {
1888 rxdp =
1889 (RxD_t *) ((unsigned long) rxdp->
1890 Control_2);
1891 get_info.offset++;
1892 get_info.offset %=
1893 (MAX_RXDS_PER_BLOCK + 1);
1894 get_block++;
1895 get_block %= nic->block_count[i];
1896 mac_control->rx_curr_get_info[i].
1897 offset = get_info.offset;
1898 mac_control->rx_curr_get_info[i].
1899 block_index = get_block;
1900 continue;
1901 }
1902 get_offset =
1903 (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1904 get_info.offset;
1905 skb =
1906 (struct sk_buff *) ((unsigned long) rxdp->
1907 Host_Control);
1908 if (skb == NULL) {
1909 DBG_PRINT(ERR_DBG, "%s: The skb is ",
1910 dev->name);
1911 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
1912 goto no_rx;
1913 }
1914 val64 = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
1915 val16 = (u16) (val64 >> 48);
1916 cksum = RXD_GET_L4_CKSUM(rxdp->Control_1);
1917 pci_unmap_single(nic->pdev, (dma_addr_t)
1918 rxdp->Buffer0_ptr,
1919 dev->mtu +
1920 HEADER_ETHERNET_II_802_3_SIZE +
1921 HEADER_802_2_SIZE +
1922 HEADER_SNAP_SIZE,
1923 PCI_DMA_FROMDEVICE);
1924 rx_osm_handler(nic, val16, rxdp, i);
1925 pkt_cnt++;
1926 get_info.offset++;
1927 get_info.offset %= (MAX_RXDS_PER_BLOCK + 1);
1928 rxdp =
1929 nic->rx_blocks[i][get_block].block_virt_addr +
1930 get_info.offset;
1931 mac_control->rx_curr_get_info[i].offset =
1932 get_info.offset;
1933 }
1934#else
1935 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1936 get_info.offset;
1937 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
1938 put_info.offset;
1939 while (((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
1940 !(rxdp->Control_2 & BIT(0))) &&
1941 (((get_offset + 1) % ring_bufs) != put_offset)) {
1942 if (--pkts_to_process < 0) {
1943 goto no_rx;
1944 }
1945 skb = (struct sk_buff *) ((unsigned long)
1946 rxdp->Host_Control);
1947 if (skb == NULL) {
1948 DBG_PRINT(ERR_DBG, "%s: The skb is ",
1949 dev->name);
1950 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
1951 goto no_rx;
1952 }
1953
1954 pci_unmap_single(nic->pdev, (dma_addr_t)
1955 rxdp->Buffer0_ptr,
1956 BUF0_LEN, PCI_DMA_FROMDEVICE);
1957 pci_unmap_single(nic->pdev, (dma_addr_t)
1958 rxdp->Buffer1_ptr,
1959 BUF1_LEN, PCI_DMA_FROMDEVICE);
1960 pci_unmap_single(nic->pdev, (dma_addr_t)
1961 rxdp->Buffer2_ptr,
1962 dev->mtu + BUF0_LEN + 4,
1963 PCI_DMA_FROMDEVICE);
1964 ba = &nic->ba[i][get_block][get_info.offset];
1965
1966 rx_osm_handler(nic, rxdp, i, ba);
1967
1968 get_info.offset++;
1969 mac_control->rx_curr_get_info[i].offset =
1970 get_info.offset;
1971 rxdp =
1972 nic->rx_blocks[i][get_block].block_virt_addr +
1973 get_info.offset;
1974
1975 if (get_info.offset &&
1976 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
1977 get_info.offset = 0;
1978 mac_control->rx_curr_get_info[i].
1979 offset = get_info.offset;
1980 get_block++;
1981 get_block %= nic->block_count[i];
1982 mac_control->rx_curr_get_info[i].
1983 block_index = get_block;
1984 rxdp =
1985 nic->rx_blocks[i][get_block].
1986 block_virt_addr;
1987 }
1988 get_offset =
1989 (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1990 get_info.offset;
1991 pkt_cnt++;
1992 }
1993#endif
1994 }
1995 if (!pkt_cnt)
1996 pkt_cnt = 1;
1997
1998 dev->quota -= pkt_cnt;
1999 *budget -= pkt_cnt;
2000 netif_rx_complete(dev);
2001
2002 for (i = 0; i < config->rx_ring_num; i++) {
2003 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2004 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2005 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2006 break;
2007 }
2008 }
2009 /* Re enable the Rx interrupts. */
2010 en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
2011 return 0;
2012
2013 no_rx:
2014 dev->quota -= pkt_cnt;
2015 *budget -= pkt_cnt;
2016
2017 for (i = 0; i < config->rx_ring_num; i++) {
2018 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2019 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2020 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2021 break;
2022 }
2023 }
2024 return 1;
2025}
2026#else
2027/**
2028 * rx_intr_handler - Rx interrupt handler
2029 * @nic: device private variable.
2030 * Description:
2031 * If the interrupt is because of a received frame or if the
2032 * receive ring contains fresh as yet un-processed frames,this function is
2033 * called. It picks out the RxD at which place the last Rx processing had
2034 * stopped and sends the skb to the OSM's Rx handler and then increments
2035 * the offset.
2036 * Return Value:
2037 * NONE.
2038 */
2039
2040static void rx_intr_handler(struct s2io_nic *nic)
2041{
2042 struct net_device *dev = (struct net_device *) nic->dev;
2043 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
2044 rx_curr_get_info_t get_info, put_info;
2045 RxD_t *rxdp;
2046 struct sk_buff *skb;
2047#ifndef CONFIG_2BUFF_MODE
2048 u16 val16, cksum;
2049#endif
2050 register u64 val64 = 0;
2051 int get_block, get_offset, put_block, put_offset, ring_bufs;
2052 int i, pkt_cnt = 0;
2053 mac_info_t *mac_control;
2054 struct config_param *config;
2055#ifdef CONFIG_2BUFF_MODE
2056 buffAdd_t *ba;
2057#endif
2058
2059 mac_control = &nic->mac_control;
2060 config = &nic->config;
2061
2062 /*
2063 * rx_traffic_int reg is an R1 register, hence we read and write back
2064 * the samevalue in the register to clear it.
2065 */
2066 val64 = readq(&bar0->rx_traffic_int);
2067 writeq(val64, &bar0->rx_traffic_int);
2068
2069 for (i = 0; i < config->rx_ring_num; i++) {
2070 get_info = mac_control->rx_curr_get_info[i];
2071 get_block = get_info.block_index;
2072 put_info = mac_control->rx_curr_put_info[i];
2073 put_block = put_info.block_index;
2074 ring_bufs = config->rx_cfg[i].num_rxd;
2075 rxdp = nic->rx_blocks[i][get_block].block_virt_addr +
2076 get_info.offset;
2077#ifndef CONFIG_2BUFF_MODE
2078 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2079 get_info.offset;
2080 spin_lock(&nic->put_lock);
2081 put_offset = nic->put_pos[i];
2082 spin_unlock(&nic->put_lock);
2083 while ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
2084 (((get_offset + 1) % ring_bufs) != put_offset)) {
2085 if (rxdp->Control_1 == END_OF_BLOCK) {
2086 rxdp = (RxD_t *) ((unsigned long)
2087 rxdp->Control_2);
2088 get_info.offset++;
2089 get_info.offset %=
2090 (MAX_RXDS_PER_BLOCK + 1);
2091 get_block++;
2092 get_block %= nic->block_count[i];
2093 mac_control->rx_curr_get_info[i].
2094 offset = get_info.offset;
2095 mac_control->rx_curr_get_info[i].
2096 block_index = get_block;
2097 continue;
2098 }
2099 get_offset =
2100 (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2101 get_info.offset;
2102 skb = (struct sk_buff *) ((unsigned long)
2103 rxdp->Host_Control);
2104 if (skb == NULL) {
2105 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2106 dev->name);
2107 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2108 return;
2109 }
2110 val64 = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
2111 val16 = (u16) (val64 >> 48);
2112 cksum = RXD_GET_L4_CKSUM(rxdp->Control_1);
2113 pci_unmap_single(nic->pdev, (dma_addr_t)
2114 rxdp->Buffer0_ptr,
2115 dev->mtu +
2116 HEADER_ETHERNET_II_802_3_SIZE +
2117 HEADER_802_2_SIZE +
2118 HEADER_SNAP_SIZE,
2119 PCI_DMA_FROMDEVICE);
2120 rx_osm_handler(nic, val16, rxdp, i);
2121 get_info.offset++;
2122 get_info.offset %= (MAX_RXDS_PER_BLOCK + 1);
2123 rxdp =
2124 nic->rx_blocks[i][get_block].block_virt_addr +
2125 get_info.offset;
2126 mac_control->rx_curr_get_info[i].offset =
2127 get_info.offset;
2128 pkt_cnt++;
2129 if ((indicate_max_pkts)
2130 && (pkt_cnt > indicate_max_pkts))
2131 break;
2132 }
2133#else
2134 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2135 get_info.offset;
2136 spin_lock(&nic->put_lock);
2137 put_offset = nic->put_pos[i];
2138 spin_unlock(&nic->put_lock);
2139 while (((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
2140 !(rxdp->Control_2 & BIT(0))) &&
2141 (((get_offset + 1) % ring_bufs) != put_offset)) {
2142 skb = (struct sk_buff *) ((unsigned long)
2143 rxdp->Host_Control);
2144 if (skb == NULL) {
2145 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2146 dev->name);
2147 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2148 return;
2149 }
2150
2151 pci_unmap_single(nic->pdev, (dma_addr_t)
2152 rxdp->Buffer0_ptr,
2153 BUF0_LEN, PCI_DMA_FROMDEVICE);
2154 pci_unmap_single(nic->pdev, (dma_addr_t)
2155 rxdp->Buffer1_ptr,
2156 BUF1_LEN, PCI_DMA_FROMDEVICE);
2157 pci_unmap_single(nic->pdev, (dma_addr_t)
2158 rxdp->Buffer2_ptr,
2159 dev->mtu + BUF0_LEN + 4,
2160 PCI_DMA_FROMDEVICE);
2161 ba = &nic->ba[i][get_block][get_info.offset];
2162
2163 rx_osm_handler(nic, rxdp, i, ba);
2164
2165 get_info.offset++;
2166 mac_control->rx_curr_get_info[i].offset =
2167 get_info.offset;
2168 rxdp =
2169 nic->rx_blocks[i][get_block].block_virt_addr +
2170 get_info.offset;
2171
2172 if (get_info.offset &&
2173 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2174 get_info.offset = 0;
2175 mac_control->rx_curr_get_info[i].
2176 offset = get_info.offset;
2177 get_block++;
2178 get_block %= nic->block_count[i];
2179 mac_control->rx_curr_get_info[i].
2180 block_index = get_block;
2181 rxdp =
2182 nic->rx_blocks[i][get_block].
2183 block_virt_addr;
2184 }
2185 get_offset =
2186 (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2187 get_info.offset;
2188 pkt_cnt++;
2189 if ((indicate_max_pkts)
2190 && (pkt_cnt > indicate_max_pkts))
2191 break;
2192 }
2193#endif
2194 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2195 break;
2196 }
2197}
2198#endif
2199/**
2200 * tx_intr_handler - Transmit interrupt handler
2201 * @nic : device private variable
2202 * Description:
2203 * If an interrupt was raised to indicate DMA complete of the
2204 * Tx packet, this function is called. It identifies the last TxD
2205 * whose buffer was freed and frees all skbs whose data have already
2206 * DMA'ed into the NICs internal memory.
2207 * Return Value:
2208 * NONE
2209 */
2210
2211static void tx_intr_handler(struct s2io_nic *nic)
2212{
2213 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2214 struct net_device *dev = (struct net_device *) nic->dev;
2215 tx_curr_get_info_t get_info, put_info;
2216 struct sk_buff *skb;
2217 TxD_t *txdlp;
2218 register u64 val64 = 0;
2219 int i;
2220 u16 j, frg_cnt;
2221 mac_info_t *mac_control;
2222 struct config_param *config;
2223
2224 mac_control = &nic->mac_control;
2225 config = &nic->config;
2226
2227 /*
2228 * tx_traffic_int reg is an R1 register, hence we read and write
2229 * back the samevalue in the register to clear it.
2230 */
2231 val64 = readq(&bar0->tx_traffic_int);
2232 writeq(val64, &bar0->tx_traffic_int);
2233
2234 for (i = 0; i < config->tx_fifo_num; i++) {
2235 get_info = mac_control->tx_curr_get_info[i];
2236 put_info = mac_control->tx_curr_put_info[i];
2237 txdlp = (TxD_t *) nic->list_info[i][get_info.offset].
2238 list_virt_addr;
2239 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2240 (get_info.offset != put_info.offset) &&
2241 (txdlp->Host_Control)) {
2242 /* Check for TxD errors */
2243 if (txdlp->Control_1 & TXD_T_CODE) {
2244 unsigned long long err;
2245 err = txdlp->Control_1 & TXD_T_CODE;
2246 DBG_PRINT(ERR_DBG, "***TxD error %llx\n",
2247 err);
2248 }
2249
2250 skb = (struct sk_buff *) ((unsigned long)
2251 txdlp->Host_Control);
2252 if (skb == NULL) {
2253 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2254 dev->name);
2255 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2256 return;
2257 }
2258 nic->tx_pkt_count++;
2259
2260 frg_cnt = skb_shinfo(skb)->nr_frags;
2261
2262 /* For unfragmented skb */
2263 pci_unmap_single(nic->pdev, (dma_addr_t)
2264 txdlp->Buffer_Pointer,
2265 skb->len - skb->data_len,
2266 PCI_DMA_TODEVICE);
2267 if (frg_cnt) {
2268 TxD_t *temp = txdlp;
2269 txdlp++;
2270 for (j = 0; j < frg_cnt; j++, txdlp++) {
2271 skb_frag_t *frag =
2272 &skb_shinfo(skb)->frags[j];
2273 pci_unmap_page(nic->pdev,
2274 (dma_addr_t)
2275 txdlp->
2276 Buffer_Pointer,
2277 frag->size,
2278 PCI_DMA_TODEVICE);
2279 }
2280 txdlp = temp;
2281 }
2282 memset(txdlp, 0,
2283 (sizeof(TxD_t) * config->max_txds));
2284
2285 /* Updating the statistics block */
2286 nic->stats.tx_packets++;
2287 nic->stats.tx_bytes += skb->len;
2288 dev_kfree_skb_irq(skb);
2289
2290 get_info.offset++;
2291 get_info.offset %= get_info.fifo_len + 1;
2292 txdlp = (TxD_t *) nic->list_info[i]
2293 [get_info.offset].list_virt_addr;
2294 mac_control->tx_curr_get_info[i].offset =
2295 get_info.offset;
2296 }
2297 }
2298
2299 spin_lock(&nic->tx_lock);
2300 if (netif_queue_stopped(dev))
2301 netif_wake_queue(dev);
2302 spin_unlock(&nic->tx_lock);
2303}
2304
2305/**
2306 * alarm_intr_handler - Alarm Interrrupt handler
2307 * @nic: device private variable
2308 * Description: If the interrupt was neither because of Rx packet or Tx
2309 * complete, this function is called. If the interrupt was to indicate
2310 * a loss of link, the OSM link status handler is invoked for any other
2311 * alarm interrupt the block that raised the interrupt is displayed
2312 * and a H/W reset is issued.
2313 * Return Value:
2314 * NONE
2315*/
2316
2317static void alarm_intr_handler(struct s2io_nic *nic)
2318{
2319 struct net_device *dev = (struct net_device *) nic->dev;
2320 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2321 register u64 val64 = 0, err_reg = 0;
2322
2323 /* Handling link status change error Intr */
2324 err_reg = readq(&bar0->mac_rmac_err_reg);
2325 writeq(err_reg, &bar0->mac_rmac_err_reg);
2326 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
2327 schedule_work(&nic->set_link_task);
2328 }
2329
2330 /* In case of a serious error, the device will be Reset. */
2331 val64 = readq(&bar0->serr_source);
2332 if (val64 & SERR_SOURCE_ANY) {
2333 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
2334 DBG_PRINT(ERR_DBG, "serious error!!\n");
2335 netif_stop_queue(dev);
2336 schedule_work(&nic->rst_timer_task);
2337 }
2338
2339 /*
2340 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2341 * Error occurs, the adapter will be recycled by disabling the
2342 * adapter enable bit and enabling it again after the device
2343 * becomes Quiescent.
2344 */
2345 val64 = readq(&bar0->pcc_err_reg);
2346 writeq(val64, &bar0->pcc_err_reg);
2347 if (val64 & PCC_FB_ECC_DB_ERR) {
2348 u64 ac = readq(&bar0->adapter_control);
2349 ac &= ~(ADAPTER_CNTL_EN);
2350 writeq(ac, &bar0->adapter_control);
2351 ac = readq(&bar0->adapter_control);
2352 schedule_work(&nic->set_link_task);
2353 }
2354
2355 /* Other type of interrupts are not being handled now, TODO */
2356}
2357
2358/**
2359 * wait_for_cmd_complete - waits for a command to complete.
2360 * @sp : private member of the device structure, which is a pointer to the
2361 * s2io_nic structure.
2362 * Description: Function that waits for a command to Write into RMAC
2363 * ADDR DATA registers to be completed and returns either success or
2364 * error depending on whether the command was complete or not.
2365 * Return value:
2366 * SUCCESS on success and FAILURE on failure.
2367 */
2368
2369static int wait_for_cmd_complete(nic_t * sp)
2370{
2371 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2372 int ret = FAILURE, cnt = 0;
2373 u64 val64;
2374
2375 while (TRUE) {
2376 val64 = readq(&bar0->rmac_addr_cmd_mem);
2377 if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
2378 ret = SUCCESS;
2379 break;
2380 }
2381 msleep(50);
2382 if (cnt++ > 10)
2383 break;
2384 }
2385
2386 return ret;
2387}
2388
2389/**
2390 * s2io_reset - Resets the card.
2391 * @sp : private member of the device structure.
2392 * Description: Function to Reset the card. This function then also
2393 * restores the previously saved PCI configuration space registers as
2394 * the card reset also resets the configuration space.
2395 * Return value:
2396 * void.
2397 */
2398
2399static void s2io_reset(nic_t * sp)
2400{
2401 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2402 u64 val64;
2403 u16 subid;
2404
2405 val64 = SW_RESET_ALL;
2406 writeq(val64, &bar0->sw_reset);
2407
2408 /*
2409 * At this stage, if the PCI write is indeed completed, the
2410 * card is reset and so is the PCI Config space of the device.
2411 * So a read cannot be issued at this stage on any of the
2412 * registers to ensure the write into "sw_reset" register
2413 * has gone through.
2414 * Question: Is there any system call that will explicitly force
2415 * all the write commands still pending on the bus to be pushed
2416 * through?
2417 * As of now I'am just giving a 250ms delay and hoping that the
2418 * PCI write to sw_reset register is done by this time.
2419 */
2420 msleep(250);
2421
2422 /* Restore the PCI state saved during initializarion. */
2423 pci_restore_state(sp->pdev);
2424 s2io_init_pci(sp);
2425
2426 msleep(250);
2427
2428 /* SXE-002: Configure link and activity LED to turn it off */
2429 subid = sp->pdev->subsystem_device;
2430 if ((subid & 0xFF) >= 0x07) {
2431 val64 = readq(&bar0->gpio_control);
2432 val64 |= 0x0000800000000000ULL;
2433 writeq(val64, &bar0->gpio_control);
2434 val64 = 0x0411040400000000ULL;
2435 writeq(val64, (void __iomem *) bar0 + 0x2700);
2436 }
2437
2438 sp->device_enabled_once = FALSE;
2439}
2440
2441/**
2442 * s2io_set_swapper - to set the swapper controle on the card
2443 * @sp : private member of the device structure,
2444 * pointer to the s2io_nic structure.
2445 * Description: Function to set the swapper control on the card
2446 * correctly depending on the 'endianness' of the system.
2447 * Return value:
2448 * SUCCESS on success and FAILURE on failure.
2449 */
2450
2451static int s2io_set_swapper(nic_t * sp)
2452{
2453 struct net_device *dev = sp->dev;
2454 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2455 u64 val64, valt, valr;
2456
2457 /*
2458 * Set proper endian settings and verify the same by reading
2459 * the PIF Feed-back register.
2460 */
2461
2462 val64 = readq(&bar0->pif_rd_swapper_fb);
2463 if (val64 != 0x0123456789ABCDEFULL) {
2464 int i = 0;
2465 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
2466 0x8100008181000081ULL, /* FE=1, SE=0 */
2467 0x4200004242000042ULL, /* FE=0, SE=1 */
2468 0}; /* FE=0, SE=0 */
2469
2470 while(i<4) {
2471 writeq(value[i], &bar0->swapper_ctrl);
2472 val64 = readq(&bar0->pif_rd_swapper_fb);
2473 if (val64 == 0x0123456789ABCDEFULL)
2474 break;
2475 i++;
2476 }
2477 if (i == 4) {
2478 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2479 dev->name);
2480 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2481 (unsigned long long) val64);
2482 return FAILURE;
2483 }
2484 valr = value[i];
2485 } else {
2486 valr = readq(&bar0->swapper_ctrl);
2487 }
2488
2489 valt = 0x0123456789ABCDEFULL;
2490 writeq(valt, &bar0->xmsi_address);
2491 val64 = readq(&bar0->xmsi_address);
2492
2493 if(val64 != valt) {
2494 int i = 0;
2495 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
2496 0x0081810000818100ULL, /* FE=1, SE=0 */
2497 0x0042420000424200ULL, /* FE=0, SE=1 */
2498 0}; /* FE=0, SE=0 */
2499
2500 while(i<4) {
2501 writeq((value[i] | valr), &bar0->swapper_ctrl);
2502 writeq(valt, &bar0->xmsi_address);
2503 val64 = readq(&bar0->xmsi_address);
2504 if(val64 == valt)
2505 break;
2506 i++;
2507 }
2508 if(i == 4) {
2509 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
2510 DBG_PRINT(ERR_DBG, "reads:0x%llx\n",val64);
2511 return FAILURE;
2512 }
2513 }
2514 val64 = readq(&bar0->swapper_ctrl);
2515 val64 &= 0xFFFF000000000000ULL;
2516
2517#ifdef __BIG_ENDIAN
2518 /*
2519 * The device by default set to a big endian format, so a
2520 * big endian driver need not set anything.
2521 */
2522 val64 |= (SWAPPER_CTRL_TXP_FE |
2523 SWAPPER_CTRL_TXP_SE |
2524 SWAPPER_CTRL_TXD_R_FE |
2525 SWAPPER_CTRL_TXD_W_FE |
2526 SWAPPER_CTRL_TXF_R_FE |
2527 SWAPPER_CTRL_RXD_R_FE |
2528 SWAPPER_CTRL_RXD_W_FE |
2529 SWAPPER_CTRL_RXF_W_FE |
2530 SWAPPER_CTRL_XMSI_FE |
2531 SWAPPER_CTRL_XMSI_SE |
2532 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2533 writeq(val64, &bar0->swapper_ctrl);
2534#else
2535 /*
2536 * Initially we enable all bits to make it accessible by the
2537 * driver, then we selectively enable only those bits that
2538 * we want to set.
2539 */
2540 val64 |= (SWAPPER_CTRL_TXP_FE |
2541 SWAPPER_CTRL_TXP_SE |
2542 SWAPPER_CTRL_TXD_R_FE |
2543 SWAPPER_CTRL_TXD_R_SE |
2544 SWAPPER_CTRL_TXD_W_FE |
2545 SWAPPER_CTRL_TXD_W_SE |
2546 SWAPPER_CTRL_TXF_R_FE |
2547 SWAPPER_CTRL_RXD_R_FE |
2548 SWAPPER_CTRL_RXD_R_SE |
2549 SWAPPER_CTRL_RXD_W_FE |
2550 SWAPPER_CTRL_RXD_W_SE |
2551 SWAPPER_CTRL_RXF_W_FE |
2552 SWAPPER_CTRL_XMSI_FE |
2553 SWAPPER_CTRL_XMSI_SE |
2554 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2555 writeq(val64, &bar0->swapper_ctrl);
2556#endif
2557 val64 = readq(&bar0->swapper_ctrl);
2558
2559 /*
2560 * Verifying if endian settings are accurate by reading a
2561 * feedback register.
2562 */
2563 val64 = readq(&bar0->pif_rd_swapper_fb);
2564 if (val64 != 0x0123456789ABCDEFULL) {
2565 /* Endian settings are incorrect, calls for another dekko. */
2566 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2567 dev->name);
2568 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2569 (unsigned long long) val64);
2570 return FAILURE;
2571 }
2572
2573 return SUCCESS;
2574}
2575
2576/* ********************************************************* *
2577 * Functions defined below concern the OS part of the driver *
2578 * ********************************************************* */
2579
2580/**
2581 * s2io_open - open entry point of the driver
2582 * @dev : pointer to the device structure.
2583 * Description:
2584 * This function is the open entry point of the driver. It mainly calls a
2585 * function to allocate Rx buffers and inserts them into the buffer
2586 * descriptors and then enables the Rx part of the NIC.
2587 * Return value:
2588 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2589 * file on failure.
2590 */
2591
2592static int s2io_open(struct net_device *dev)
2593{
2594 nic_t *sp = dev->priv;
2595 int err = 0;
2596
2597 /*
2598 * Make sure you have link off by default every time
2599 * Nic is initialized
2600 */
2601 netif_carrier_off(dev);
2602 sp->last_link_state = LINK_DOWN;
2603
2604 /* Initialize H/W and enable interrupts */
2605 if (s2io_card_up(sp)) {
2606 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
2607 dev->name);
2608 return -ENODEV;
2609 }
2610
2611 /* After proper initialization of H/W, register ISR */
2612 err = request_irq((int) sp->irq, s2io_isr, SA_SHIRQ,
2613 sp->name, dev);
2614 if (err) {
2615 s2io_reset(sp);
2616 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
2617 dev->name);
2618 return err;
2619 }
2620
2621 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
2622 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
2623 s2io_reset(sp);
2624 return -ENODEV;
2625 }
2626
2627 netif_start_queue(dev);
2628 return 0;
2629}
2630
2631/**
2632 * s2io_close -close entry point of the driver
2633 * @dev : device pointer.
2634 * Description:
2635 * This is the stop entry point of the driver. It needs to undo exactly
2636 * whatever was done by the open entry point,thus it's usually referred to
2637 * as the close function.Among other things this function mainly stops the
2638 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
2639 * Return value:
2640 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2641 * file on failure.
2642 */
2643
2644static int s2io_close(struct net_device *dev)
2645{
2646 nic_t *sp = dev->priv;
2647
2648 flush_scheduled_work();
2649 netif_stop_queue(dev);
2650 /* Reset card, kill tasklet and free Tx and Rx buffers. */
2651 s2io_card_down(sp);
2652
2653 free_irq(dev->irq, dev);
2654 sp->device_close_flag = TRUE; /* Device is shut down. */
2655 return 0;
2656}
2657
2658/**
2659 * s2io_xmit - Tx entry point of te driver
2660 * @skb : the socket buffer containing the Tx data.
2661 * @dev : device pointer.
2662 * Description :
2663 * This function is the Tx entry point of the driver. S2IO NIC supports
2664 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
2665 * NOTE: when device cant queue the pkt,just the trans_start variable will
2666 * not be upadted.
2667 * Return value:
2668 * 0 on success & 1 on failure.
2669 */
2670
2671static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2672{
2673 nic_t *sp = dev->priv;
2674 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
2675 register u64 val64;
2676 TxD_t *txdp;
2677 TxFIFO_element_t __iomem *tx_fifo;
2678 unsigned long flags;
2679#ifdef NETIF_F_TSO
2680 int mss;
2681#endif
2682 mac_info_t *mac_control;
2683 struct config_param *config;
2684 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2685
2686 mac_control = &sp->mac_control;
2687 config = &sp->config;
2688
2689 DBG_PRINT(TX_DBG, "%s: In S2IO Tx routine\n", dev->name);
2690 spin_lock_irqsave(&sp->tx_lock, flags);
2691
2692 if (atomic_read(&sp->card_state) == CARD_DOWN) {
2693 DBG_PRINT(ERR_DBG, "%s: Card going down for reset\n",
2694 dev->name);
2695 spin_unlock_irqrestore(&sp->tx_lock, flags);
2696 return 1;
2697 }
2698
2699 queue = 0;
2700 put_off = (u16) mac_control->tx_curr_put_info[queue].offset;
2701 get_off = (u16) mac_control->tx_curr_get_info[queue].offset;
2702 txdp = (TxD_t *) sp->list_info[queue][put_off].list_virt_addr;
2703
2704 queue_len = mac_control->tx_curr_put_info[queue].fifo_len + 1;
2705 /* Avoid "put" pointer going beyond "get" pointer */
2706 if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
2707 DBG_PRINT(ERR_DBG, "Error in xmit, No free TXDs.\n");
2708 netif_stop_queue(dev);
2709 dev_kfree_skb(skb);
2710 spin_unlock_irqrestore(&sp->tx_lock, flags);
2711 return 0;
2712 }
2713#ifdef NETIF_F_TSO
2714 mss = skb_shinfo(skb)->tso_size;
2715 if (mss) {
2716 txdp->Control_1 |= TXD_TCP_LSO_EN;
2717 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
2718 }
2719#endif
2720
2721 frg_cnt = skb_shinfo(skb)->nr_frags;
2722 frg_len = skb->len - skb->data_len;
2723
2724 txdp->Host_Control = (unsigned long) skb;
2725 txdp->Buffer_Pointer = pci_map_single
2726 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
2727 if (skb->ip_summed == CHECKSUM_HW) {
2728 txdp->Control_2 |=
2729 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
2730 TXD_TX_CKO_UDP_EN);
2731 }
2732
2733 txdp->Control_2 |= config->tx_intr_type;
2734
2735 txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) |
2736 TXD_GATHER_CODE_FIRST);
2737 txdp->Control_1 |= TXD_LIST_OWN_XENA;
2738
2739 /* For fragmented SKB. */
2740 for (i = 0; i < frg_cnt; i++) {
2741 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2742 txdp++;
2743 txdp->Buffer_Pointer = (u64) pci_map_page
2744 (sp->pdev, frag->page, frag->page_offset,
2745 frag->size, PCI_DMA_TODEVICE);
2746 txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
2747 }
2748 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
2749
2750 tx_fifo = mac_control->tx_FIFO_start[queue];
2751 val64 = sp->list_info[queue][put_off].list_phy_addr;
2752 writeq(val64, &tx_fifo->TxDL_Pointer);
2753
2754 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
2755 TX_FIFO_LAST_LIST);
2756#ifdef NETIF_F_TSO
2757 if (mss)
2758 val64 |= TX_FIFO_SPECIAL_FUNC;
2759#endif
2760 writeq(val64, &tx_fifo->List_Control);
2761
2762 /* Perform a PCI read to flush previous writes */
2763 val64 = readq(&bar0->general_int_status);
2764
2765 put_off++;
2766 put_off %= mac_control->tx_curr_put_info[queue].fifo_len + 1;
2767 mac_control->tx_curr_put_info[queue].offset = put_off;
2768
2769 /* Avoid "put" pointer going beyond "get" pointer */
2770 if (((put_off + 1) % queue_len) == get_off) {
2771 DBG_PRINT(TX_DBG,
2772 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
2773 put_off, get_off);
2774 netif_stop_queue(dev);
2775 }
2776
2777 dev->trans_start = jiffies;
2778 spin_unlock_irqrestore(&sp->tx_lock, flags);
2779
2780 return 0;
2781}
2782
2783/**
2784 * s2io_isr - ISR handler of the device .
2785 * @irq: the irq of the device.
2786 * @dev_id: a void pointer to the dev structure of the NIC.
2787 * @pt_regs: pointer to the registers pushed on the stack.
2788 * Description: This function is the ISR handler of the device. It
2789 * identifies the reason for the interrupt and calls the relevant
2790 * service routines. As a contongency measure, this ISR allocates the
2791 * recv buffers, if their numbers are below the panic value which is
2792 * presently set to 25% of the original number of rcv buffers allocated.
2793 * Return value:
2794 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
2795 * IRQ_NONE: will be returned if interrupt is not from our device
2796 */
2797static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
2798{
2799 struct net_device *dev = (struct net_device *) dev_id;
2800 nic_t *sp = dev->priv;
2801 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2802#ifndef CONFIG_S2IO_NAPI
2803 int i, ret;
2804#endif
2805 u64 reason = 0;
2806 mac_info_t *mac_control;
2807 struct config_param *config;
2808
2809 mac_control = &sp->mac_control;
2810 config = &sp->config;
2811
2812 /*
2813 * Identify the cause for interrupt and call the appropriate
2814 * interrupt handler. Causes for the interrupt could be;
2815 * 1. Rx of packet.
2816 * 2. Tx complete.
2817 * 3. Link down.
2818 * 4. Error in any functional blocks of the NIC.
2819 */
2820 reason = readq(&bar0->general_int_status);
2821
2822 if (!reason) {
2823 /* The interrupt was not raised by Xena. */
2824 return IRQ_NONE;
2825 }
2826
2827 /* If Intr is because of Tx Traffic */
2828 if (reason & GEN_INTR_TXTRAFFIC) {
2829 tx_intr_handler(sp);
2830 }
2831
2832 /* If Intr is because of an error */
2833 if (reason & (GEN_ERROR_INTR))
2834 alarm_intr_handler(sp);
2835
2836#ifdef CONFIG_S2IO_NAPI
2837 if (reason & GEN_INTR_RXTRAFFIC) {
2838 if (netif_rx_schedule_prep(dev)) {
2839 en_dis_able_nic_intrs(sp, RX_TRAFFIC_INTR,
2840 DISABLE_INTRS);
2841 __netif_rx_schedule(dev);
2842 }
2843 }
2844#else
2845 /* If Intr is because of Rx Traffic */
2846 if (reason & GEN_INTR_RXTRAFFIC) {
2847 rx_intr_handler(sp);
2848 }
2849#endif
2850
2851 /*
2852 * If the Rx buffer count is below the panic threshold then
2853 * reallocate the buffers from the interrupt handler itself,
2854 * else schedule a tasklet to reallocate the buffers.
2855 */
2856#ifndef CONFIG_S2IO_NAPI
2857 for (i = 0; i < config->rx_ring_num; i++) {
2858 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
2859 int level = rx_buffer_level(sp, rxb_size, i);
2860
2861 if ((level == PANIC) && (!TASKLET_IN_USE)) {
2862 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
2863 DBG_PRINT(INTR_DBG, "PANIC levels\n");
2864 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
2865 DBG_PRINT(ERR_DBG, "%s:Out of memory",
2866 dev->name);
2867 DBG_PRINT(ERR_DBG, " in ISR!!\n");
2868 clear_bit(0, (&sp->tasklet_status));
2869 return IRQ_HANDLED;
2870 }
2871 clear_bit(0, (&sp->tasklet_status));
2872 } else if (level == LOW) {
2873 tasklet_schedule(&sp->task);
2874 }
2875 }
2876#endif
2877
2878 return IRQ_HANDLED;
2879}
2880
2881/**
2882 * s2io_get_stats - Updates the device statistics structure.
2883 * @dev : pointer to the device structure.
2884 * Description:
2885 * This function updates the device statistics structure in the s2io_nic
2886 * structure and returns a pointer to the same.
2887 * Return value:
2888 * pointer to the updated net_device_stats structure.
2889 */
2890
2891static struct net_device_stats *s2io_get_stats(struct net_device *dev)
2892{
2893 nic_t *sp = dev->priv;
2894 mac_info_t *mac_control;
2895 struct config_param *config;
2896
2897 mac_control = &sp->mac_control;
2898 config = &sp->config;
2899
2900 sp->stats.tx_errors = mac_control->stats_info->tmac_any_err_frms;
2901 sp->stats.rx_errors = mac_control->stats_info->rmac_drop_frms;
2902 sp->stats.multicast = mac_control->stats_info->rmac_vld_mcst_frms;
2903 sp->stats.rx_length_errors =
2904 mac_control->stats_info->rmac_long_frms;
2905
2906 return (&sp->stats);
2907}
2908
2909/**
2910 * s2io_set_multicast - entry point for multicast address enable/disable.
2911 * @dev : pointer to the device structure
2912 * Description:
2913 * This function is a driver entry point which gets called by the kernel
2914 * whenever multicast addresses must be enabled/disabled. This also gets
2915 * called to set/reset promiscuous mode. Depending on the deivce flag, we
2916 * determine, if multicast address must be enabled or if promiscuous mode
2917 * is to be disabled etc.
2918 * Return value:
2919 * void.
2920 */
2921
2922static void s2io_set_multicast(struct net_device *dev)
2923{
2924 int i, j, prev_cnt;
2925 struct dev_mc_list *mclist;
2926 nic_t *sp = dev->priv;
2927 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2928 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
2929 0xfeffffffffffULL;
2930 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
2931 void __iomem *add;
2932
2933 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
2934 /* Enable all Multicast addresses */
2935 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
2936 &bar0->rmac_addr_data0_mem);
2937 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
2938 &bar0->rmac_addr_data1_mem);
2939 val64 = RMAC_ADDR_CMD_MEM_WE |
2940 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
2941 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
2942 writeq(val64, &bar0->rmac_addr_cmd_mem);
2943 /* Wait till command completes */
2944 wait_for_cmd_complete(sp);
2945
2946 sp->m_cast_flg = 1;
2947 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
2948 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
2949 /* Disable all Multicast addresses */
2950 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
2951 &bar0->rmac_addr_data0_mem);
2952 val64 = RMAC_ADDR_CMD_MEM_WE |
2953 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
2954 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
2955 writeq(val64, &bar0->rmac_addr_cmd_mem);
2956 /* Wait till command completes */
2957 wait_for_cmd_complete(sp);
2958
2959 sp->m_cast_flg = 0;
2960 sp->all_multi_pos = 0;
2961 }
2962
2963 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
2964 /* Put the NIC into promiscuous mode */
2965 add = &bar0->mac_cfg;
2966 val64 = readq(&bar0->mac_cfg);
2967 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
2968
2969 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
2970 writel((u32) val64, add);
2971 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
2972 writel((u32) (val64 >> 32), (add + 4));
2973
2974 val64 = readq(&bar0->mac_cfg);
2975 sp->promisc_flg = 1;
2976 DBG_PRINT(ERR_DBG, "%s: entered promiscuous mode\n",
2977 dev->name);
2978 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
2979 /* Remove the NIC from promiscuous mode */
2980 add = &bar0->mac_cfg;
2981 val64 = readq(&bar0->mac_cfg);
2982 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
2983
2984 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
2985 writel((u32) val64, add);
2986 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
2987 writel((u32) (val64 >> 32), (add + 4));
2988
2989 val64 = readq(&bar0->mac_cfg);
2990 sp->promisc_flg = 0;
2991 DBG_PRINT(ERR_DBG, "%s: left promiscuous mode\n",
2992 dev->name);
2993 }
2994
2995 /* Update individual M_CAST address list */
2996 if ((!sp->m_cast_flg) && dev->mc_count) {
2997 if (dev->mc_count >
2998 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
2999 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
3000 dev->name);
3001 DBG_PRINT(ERR_DBG, "can be added, please enable ");
3002 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
3003 return;
3004 }
3005
3006 prev_cnt = sp->mc_addr_count;
3007 sp->mc_addr_count = dev->mc_count;
3008
3009 /* Clear out the previous list of Mc in the H/W. */
3010 for (i = 0; i < prev_cnt; i++) {
3011 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3012 &bar0->rmac_addr_data0_mem);
3013 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3014 &bar0->rmac_addr_data1_mem);
3015 val64 = RMAC_ADDR_CMD_MEM_WE |
3016 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3017 RMAC_ADDR_CMD_MEM_OFFSET
3018 (MAC_MC_ADDR_START_OFFSET + i);
3019 writeq(val64, &bar0->rmac_addr_cmd_mem);
3020
3021 /* Wait for command completes */
3022 if (wait_for_cmd_complete(sp)) {
3023 DBG_PRINT(ERR_DBG, "%s: Adding ",
3024 dev->name);
3025 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3026 return;
3027 }
3028 }
3029
3030 /* Create the new Rx filter list and update the same in H/W. */
3031 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
3032 i++, mclist = mclist->next) {
3033 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
3034 ETH_ALEN);
3035 for (j = 0; j < ETH_ALEN; j++) {
3036 mac_addr |= mclist->dmi_addr[j];
3037 mac_addr <<= 8;
3038 }
3039 mac_addr >>= 8;
3040 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3041 &bar0->rmac_addr_data0_mem);
3042 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3043 &bar0->rmac_addr_data1_mem);
3044
3045 val64 = RMAC_ADDR_CMD_MEM_WE |
3046 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3047 RMAC_ADDR_CMD_MEM_OFFSET
3048 (i + MAC_MC_ADDR_START_OFFSET);
3049 writeq(val64, &bar0->rmac_addr_cmd_mem);
3050
3051 /* Wait for command completes */
3052 if (wait_for_cmd_complete(sp)) {
3053 DBG_PRINT(ERR_DBG, "%s: Adding ",
3054 dev->name);
3055 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3056 return;
3057 }
3058 }
3059 }
3060}
3061
3062/**
3063 * s2io_set_mac_addr - Programs the Xframe mac address
3064 * @dev : pointer to the device structure.
3065 * @addr: a uchar pointer to the new mac address which is to be set.
3066 * Description : This procedure will program the Xframe to receive
3067 * frames with new Mac Address
3068 * Return value: SUCCESS on success and an appropriate (-)ve integer
3069 * as defined in errno.h file on failure.
3070 */
3071
3072int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
3073{
3074 nic_t *sp = dev->priv;
3075 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3076 register u64 val64, mac_addr = 0;
3077 int i;
3078
3079 /*
3080 * Set the new MAC address as the new unicast filter and reflect this
3081 * change on the device address registered with the OS. It will be
3082 * at offset 0.
3083 */
3084 for (i = 0; i < ETH_ALEN; i++) {
3085 mac_addr <<= 8;
3086 mac_addr |= addr[i];
3087 }
3088
3089 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3090 &bar0->rmac_addr_data0_mem);
3091
3092 val64 =
3093 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3094 RMAC_ADDR_CMD_MEM_OFFSET(0);
3095 writeq(val64, &bar0->rmac_addr_cmd_mem);
3096 /* Wait till command completes */
3097 if (wait_for_cmd_complete(sp)) {
3098 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
3099 return FAILURE;
3100 }
3101
3102 return SUCCESS;
3103}
3104
3105/**
3106 * s2io_ethtool_sset - Sets different link parameters.
3107 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3108 * @info: pointer to the structure with parameters given by ethtool to set
3109 * link information.
3110 * Description:
3111 * The function sets different link parameters provided by the user onto
3112 * the NIC.
3113 * Return value:
3114 * 0 on success.
3115*/
3116
3117static int s2io_ethtool_sset(struct net_device *dev,
3118 struct ethtool_cmd *info)
3119{
3120 nic_t *sp = dev->priv;
3121 if ((info->autoneg == AUTONEG_ENABLE) ||
3122 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
3123 return -EINVAL;
3124 else {
3125 s2io_close(sp->dev);
3126 s2io_open(sp->dev);
3127 }
3128
3129 return 0;
3130}
3131
3132/**
3133 * s2io_ethtol_gset - Return link specific information.
3134 * @sp : private member of the device structure, pointer to the
3135 * s2io_nic structure.
3136 * @info : pointer to the structure with parameters given by ethtool
3137 * to return link information.
3138 * Description:
3139 * Returns link specific information like speed, duplex etc.. to ethtool.
3140 * Return value :
3141 * return 0 on success.
3142 */
3143
3144static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
3145{
3146 nic_t *sp = dev->priv;
3147 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3148 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3149 info->port = PORT_FIBRE;
3150 /* info->transceiver?? TODO */
3151
3152 if (netif_carrier_ok(sp->dev)) {
3153 info->speed = 10000;
3154 info->duplex = DUPLEX_FULL;
3155 } else {
3156 info->speed = -1;
3157 info->duplex = -1;
3158 }
3159
3160 info->autoneg = AUTONEG_DISABLE;
3161 return 0;
3162}
3163
3164/**
3165 * s2io_ethtool_gdrvinfo - Returns driver specific information.
3166 * @sp : private member of the device structure, which is a pointer to the
3167 * s2io_nic structure.
3168 * @info : pointer to the structure with parameters given by ethtool to
3169 * return driver information.
3170 * Description:
3171 * Returns driver specefic information like name, version etc.. to ethtool.
3172 * Return value:
3173 * void
3174 */
3175
3176static void s2io_ethtool_gdrvinfo(struct net_device *dev,
3177 struct ethtool_drvinfo *info)
3178{
3179 nic_t *sp = dev->priv;
3180
3181 strncpy(info->driver, s2io_driver_name, sizeof(s2io_driver_name));
3182 strncpy(info->version, s2io_driver_version,
3183 sizeof(s2io_driver_version));
3184 strncpy(info->fw_version, "", 32);
3185 strncpy(info->bus_info, pci_name(sp->pdev), 32);
3186 info->regdump_len = XENA_REG_SPACE;
3187 info->eedump_len = XENA_EEPROM_SPACE;
3188 info->testinfo_len = S2IO_TEST_LEN;
3189 info->n_stats = S2IO_STAT_LEN;
3190}
3191
3192/**
3193 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
3194 * @sp: private member of the device structure, which is a pointer to the
3195 * s2io_nic structure.
3196 * @regs : pointer to the structure with parameters given by ethtool for
3197 * dumping the registers.
3198 * @reg_space: The input argumnet into which all the registers are dumped.
3199 * Description:
3200 * Dumps the entire register space of xFrame NIC into the user given
3201 * buffer area.
3202 * Return value :
3203 * void .
3204*/
3205
3206static void s2io_ethtool_gregs(struct net_device *dev,
3207 struct ethtool_regs *regs, void *space)
3208{
3209 int i;
3210 u64 reg;
3211 u8 *reg_space = (u8 *) space;
3212 nic_t *sp = dev->priv;
3213
3214 regs->len = XENA_REG_SPACE;
3215 regs->version = sp->pdev->subsystem_device;
3216
3217 for (i = 0; i < regs->len; i += 8) {
3218 reg = readq(sp->bar0 + i);
3219 memcpy((reg_space + i), &reg, 8);
3220 }
3221}
3222
3223/**
3224 * s2io_phy_id - timer function that alternates adapter LED.
3225 * @data : address of the private member of the device structure, which
3226 * is a pointer to the s2io_nic structure, provided as an u32.
3227 * Description: This is actually the timer function that alternates the
3228 * adapter LED bit of the adapter control bit to set/reset every time on
3229 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
3230 * once every second.
3231*/
3232static void s2io_phy_id(unsigned long data)
3233{
3234 nic_t *sp = (nic_t *) data;
3235 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3236 u64 val64 = 0;
3237 u16 subid;
3238
3239 subid = sp->pdev->subsystem_device;
3240 if ((subid & 0xFF) >= 0x07) {
3241 val64 = readq(&bar0->gpio_control);
3242 val64 ^= GPIO_CTRL_GPIO_0;
3243 writeq(val64, &bar0->gpio_control);
3244 } else {
3245 val64 = readq(&bar0->adapter_control);
3246 val64 ^= ADAPTER_LED_ON;
3247 writeq(val64, &bar0->adapter_control);
3248 }
3249
3250 mod_timer(&sp->id_timer, jiffies + HZ / 2);
3251}
3252
3253/**
3254 * s2io_ethtool_idnic - To physically identify the nic on the system.
3255 * @sp : private member of the device structure, which is a pointer to the
3256 * s2io_nic structure.
3257 * @id : pointer to the structure with identification parameters given by
3258 * ethtool.
3259 * Description: Used to physically identify the NIC on the system.
3260 * The Link LED will blink for a time specified by the user for
3261 * identification.
3262 * NOTE: The Link has to be Up to be able to blink the LED. Hence
3263 * identification is possible only if it's link is up.
3264 * Return value:
3265 * int , returns 0 on success
3266 */
3267
3268static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
3269{
3270 u64 val64 = 0, last_gpio_ctrl_val;
3271 nic_t *sp = dev->priv;
3272 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3273 u16 subid;
3274
3275 subid = sp->pdev->subsystem_device;
3276 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3277 if ((subid & 0xFF) < 0x07) {
3278 val64 = readq(&bar0->adapter_control);
3279 if (!(val64 & ADAPTER_CNTL_EN)) {
3280 printk(KERN_ERR
3281 "Adapter Link down, cannot blink LED\n");
3282 return -EFAULT;
3283 }
3284 }
3285 if (sp->id_timer.function == NULL) {
3286 init_timer(&sp->id_timer);
3287 sp->id_timer.function = s2io_phy_id;
3288 sp->id_timer.data = (unsigned long) sp;
3289 }
3290 mod_timer(&sp->id_timer, jiffies);
3291 if (data)
3292 msleep(data * 1000);
3293 else
3294 msleep(0xFFFFFFFF);
3295 del_timer_sync(&sp->id_timer);
3296
3297 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
3298 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
3299 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3300 }
3301
3302 return 0;
3303}
3304
3305/**
3306 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
3307 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3308 * @ep : pointer to the structure with pause parameters given by ethtool.
3309 * Description:
3310 * Returns the Pause frame generation and reception capability of the NIC.
3311 * Return value:
3312 * void
3313 */
3314static void s2io_ethtool_getpause_data(struct net_device *dev,
3315 struct ethtool_pauseparam *ep)
3316{
3317 u64 val64;
3318 nic_t *sp = dev->priv;
3319 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3320
3321 val64 = readq(&bar0->rmac_pause_cfg);
3322 if (val64 & RMAC_PAUSE_GEN_ENABLE)
3323 ep->tx_pause = TRUE;
3324 if (val64 & RMAC_PAUSE_RX_ENABLE)
3325 ep->rx_pause = TRUE;
3326 ep->autoneg = FALSE;
3327}
3328
3329/**
3330 * s2io_ethtool_setpause_data - set/reset pause frame generation.
3331 * @sp : private member of the device structure, which is a pointer to the
3332 * s2io_nic structure.
3333 * @ep : pointer to the structure with pause parameters given by ethtool.
3334 * Description:
3335 * It can be used to set or reset Pause frame generation or reception
3336 * support of the NIC.
3337 * Return value:
3338 * int, returns 0 on Success
3339 */
3340
3341static int s2io_ethtool_setpause_data(struct net_device *dev,
3342 struct ethtool_pauseparam *ep)
3343{
3344 u64 val64;
3345 nic_t *sp = dev->priv;
3346 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3347
3348 val64 = readq(&bar0->rmac_pause_cfg);
3349 if (ep->tx_pause)
3350 val64 |= RMAC_PAUSE_GEN_ENABLE;
3351 else
3352 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
3353 if (ep->rx_pause)
3354 val64 |= RMAC_PAUSE_RX_ENABLE;
3355 else
3356 val64 &= ~RMAC_PAUSE_RX_ENABLE;
3357 writeq(val64, &bar0->rmac_pause_cfg);
3358 return 0;
3359}
3360
3361/**
3362 * read_eeprom - reads 4 bytes of data from user given offset.
3363 * @sp : private member of the device structure, which is a pointer to the
3364 * s2io_nic structure.
3365 * @off : offset at which the data must be written
3366 * @data : Its an output parameter where the data read at the given
3367 * offset is stored.
3368 * Description:
3369 * Will read 4 bytes of data from the user given offset and return the
3370 * read data.
3371 * NOTE: Will allow to read only part of the EEPROM visible through the
3372 * I2C bus.
3373 * Return value:
3374 * -1 on failure and 0 on success.
3375 */
3376
3377#define S2IO_DEV_ID 5
3378static int read_eeprom(nic_t * sp, int off, u32 * data)
3379{
3380 int ret = -1;
3381 u32 exit_cnt = 0;
3382 u64 val64;
3383 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3384
3385 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3386 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
3387 I2C_CONTROL_CNTL_START;
3388 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3389
3390 while (exit_cnt < 5) {
3391 val64 = readq(&bar0->i2c_control);
3392 if (I2C_CONTROL_CNTL_END(val64)) {
3393 *data = I2C_CONTROL_GET_DATA(val64);
3394 ret = 0;
3395 break;
3396 }
3397 msleep(50);
3398 exit_cnt++;
3399 }
3400
3401 return ret;
3402}
3403
3404/**
3405 * write_eeprom - actually writes the relevant part of the data value.
3406 * @sp : private member of the device structure, which is a pointer to the
3407 * s2io_nic structure.
3408 * @off : offset at which the data must be written
3409 * @data : The data that is to be written
3410 * @cnt : Number of bytes of the data that are actually to be written into
3411 * the Eeprom. (max of 3)
3412 * Description:
3413 * Actually writes the relevant part of the data value into the Eeprom
3414 * through the I2C bus.
3415 * Return value:
3416 * 0 on success, -1 on failure.
3417 */
3418
3419static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
3420{
3421 int exit_cnt = 0, ret = -1;
3422 u64 val64;
3423 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3424
3425 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3426 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA(data) |
3427 I2C_CONTROL_CNTL_START;
3428 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3429
3430 while (exit_cnt < 5) {
3431 val64 = readq(&bar0->i2c_control);
3432 if (I2C_CONTROL_CNTL_END(val64)) {
3433 if (!(val64 & I2C_CONTROL_NACK))
3434 ret = 0;
3435 break;
3436 }
3437 msleep(50);
3438 exit_cnt++;
3439 }
3440
3441 return ret;
3442}
3443
3444/**
3445 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
3446 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3447 * @eeprom : pointer to the user level structure provided by ethtool,
3448 * containing all relevant information.
3449 * @data_buf : user defined value to be written into Eeprom.
3450 * Description: Reads the values stored in the Eeprom at given offset
3451 * for a given length. Stores these values int the input argument data
3452 * buffer 'data_buf' and returns these to the caller (ethtool.)
3453 * Return value:
3454 * int 0 on success
3455 */
3456
3457static int s2io_ethtool_geeprom(struct net_device *dev,
3458 struct ethtool_eeprom *eeprom, u8 * data_buf)
3459{
3460 u32 data, i, valid;
3461 nic_t *sp = dev->priv;
3462
3463 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
3464
3465 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
3466 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
3467
3468 for (i = 0; i < eeprom->len; i += 4) {
3469 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
3470 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
3471 return -EFAULT;
3472 }
3473 valid = INV(data);
3474 memcpy((data_buf + i), &valid, 4);
3475 }
3476 return 0;
3477}
3478
3479/**
3480 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
3481 * @sp : private member of the device structure, which is a pointer to the
3482 * s2io_nic structure.
3483 * @eeprom : pointer to the user level structure provided by ethtool,
3484 * containing all relevant information.
3485 * @data_buf ; user defined value to be written into Eeprom.
3486 * Description:
3487 * Tries to write the user provided value in the Eeprom, at the offset
3488 * given by the user.
3489 * Return value:
3490 * 0 on success, -EFAULT on failure.
3491 */
3492
3493static int s2io_ethtool_seeprom(struct net_device *dev,
3494 struct ethtool_eeprom *eeprom,
3495 u8 * data_buf)
3496{
3497 int len = eeprom->len, cnt = 0;
3498 u32 valid = 0, data;
3499 nic_t *sp = dev->priv;
3500
3501 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
3502 DBG_PRINT(ERR_DBG,
3503 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
3504 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
3505 eeprom->magic);
3506 return -EFAULT;
3507 }
3508
3509 while (len) {
3510 data = (u32) data_buf[cnt] & 0x000000FF;
3511 if (data) {
3512 valid = (u32) (data << 24);
3513 } else
3514 valid = data;
3515
3516 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
3517 DBG_PRINT(ERR_DBG,
3518 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
3519 DBG_PRINT(ERR_DBG,
3520 "write into the specified offset\n");
3521 return -EFAULT;
3522 }
3523 cnt++;
3524 len--;
3525 }
3526
3527 return 0;
3528}
3529
3530/**
3531 * s2io_register_test - reads and writes into all clock domains.
3532 * @sp : private member of the device structure, which is a pointer to the
3533 * s2io_nic structure.
3534 * @data : variable that returns the result of each of the test conducted b
3535 * by the driver.
3536 * Description:
3537 * Read and write into all clock domains. The NIC has 3 clock domains,
3538 * see that registers in all the three regions are accessible.
3539 * Return value:
3540 * 0 on success.
3541 */
3542
3543static int s2io_register_test(nic_t * sp, uint64_t * data)
3544{
3545 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3546 u64 val64 = 0;
3547 int fail = 0;
3548
3549 val64 = readq(&bar0->pcc_enable);
3550 if (val64 != 0xff00000000000000ULL) {
3551 fail = 1;
3552 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
3553 }
3554
3555 val64 = readq(&bar0->rmac_pause_cfg);
3556 if (val64 != 0xc000ffff00000000ULL) {
3557 fail = 1;
3558 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
3559 }
3560
3561 val64 = readq(&bar0->rx_queue_cfg);
3562 if (val64 != 0x0808080808080808ULL) {
3563 fail = 1;
3564 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
3565 }
3566
3567 val64 = readq(&bar0->xgxs_efifo_cfg);
3568 if (val64 != 0x000000001923141EULL) {
3569 fail = 1;
3570 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
3571 }
3572
3573 val64 = 0x5A5A5A5A5A5A5A5AULL;
3574 writeq(val64, &bar0->xmsi_data);
3575 val64 = readq(&bar0->xmsi_data);
3576 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
3577 fail = 1;
3578 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
3579 }
3580
3581 val64 = 0xA5A5A5A5A5A5A5A5ULL;
3582 writeq(val64, &bar0->xmsi_data);
3583 val64 = readq(&bar0->xmsi_data);
3584 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
3585 fail = 1;
3586 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
3587 }
3588
3589 *data = fail;
3590 return 0;
3591}
3592
3593/**
3594 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
3595 * @sp : private member of the device structure, which is a pointer to the
3596 * s2io_nic structure.
3597 * @data:variable that returns the result of each of the test conducted by
3598 * the driver.
3599 * Description:
3600 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
3601 * register.
3602 * Return value:
3603 * 0 on success.
3604 */
3605
3606static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
3607{
3608 int fail = 0;
3609 u32 ret_data;
3610
3611 /* Test Write Error at offset 0 */
3612 if (!write_eeprom(sp, 0, 0, 3))
3613 fail = 1;
3614
3615 /* Test Write at offset 4f0 */
3616 if (write_eeprom(sp, 0x4F0, 0x01234567, 3))
3617 fail = 1;
3618 if (read_eeprom(sp, 0x4F0, &ret_data))
3619 fail = 1;
3620
3621 if (ret_data != 0x01234567)
3622 fail = 1;
3623
3624 /* Reset the EEPROM data go FFFF */
3625 write_eeprom(sp, 0x4F0, 0xFFFFFFFF, 3);
3626
3627 /* Test Write Request Error at offset 0x7c */
3628 if (!write_eeprom(sp, 0x07C, 0, 3))
3629 fail = 1;
3630
3631 /* Test Write Request at offset 0x7fc */
3632 if (write_eeprom(sp, 0x7FC, 0x01234567, 3))
3633 fail = 1;
3634 if (read_eeprom(sp, 0x7FC, &ret_data))
3635 fail = 1;
3636
3637 if (ret_data != 0x01234567)
3638 fail = 1;
3639
3640 /* Reset the EEPROM data go FFFF */
3641 write_eeprom(sp, 0x7FC, 0xFFFFFFFF, 3);
3642
3643 /* Test Write Error at offset 0x80 */
3644 if (!write_eeprom(sp, 0x080, 0, 3))
3645 fail = 1;
3646
3647 /* Test Write Error at offset 0xfc */
3648 if (!write_eeprom(sp, 0x0FC, 0, 3))
3649 fail = 1;
3650
3651 /* Test Write Error at offset 0x100 */
3652 if (!write_eeprom(sp, 0x100, 0, 3))
3653 fail = 1;
3654
3655 /* Test Write Error at offset 4ec */
3656 if (!write_eeprom(sp, 0x4EC, 0, 3))
3657 fail = 1;
3658
3659 *data = fail;
3660 return 0;
3661}
3662
3663/**
3664 * s2io_bist_test - invokes the MemBist test of the card .
3665 * @sp : private member of the device structure, which is a pointer to the
3666 * s2io_nic structure.
3667 * @data:variable that returns the result of each of the test conducted by
3668 * the driver.
3669 * Description:
3670 * This invokes the MemBist test of the card. We give around
3671 * 2 secs time for the Test to complete. If it's still not complete
3672 * within this peiod, we consider that the test failed.
3673 * Return value:
3674 * 0 on success and -1 on failure.
3675 */
3676
3677static int s2io_bist_test(nic_t * sp, uint64_t * data)
3678{
3679 u8 bist = 0;
3680 int cnt = 0, ret = -1;
3681
3682 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
3683 bist |= PCI_BIST_START;
3684 pci_write_config_word(sp->pdev, PCI_BIST, bist);
3685
3686 while (cnt < 20) {
3687 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
3688 if (!(bist & PCI_BIST_START)) {
3689 *data = (bist & PCI_BIST_CODE_MASK);
3690 ret = 0;
3691 break;
3692 }
3693 msleep(100);
3694 cnt++;
3695 }
3696
3697 return ret;
3698}
3699
3700/**
3701 * s2io-link_test - verifies the link state of the nic
3702 * @sp ; private member of the device structure, which is a pointer to the
3703 * s2io_nic structure.
3704 * @data: variable that returns the result of each of the test conducted by
3705 * the driver.
3706 * Description:
3707 * The function verifies the link state of the NIC and updates the input
3708 * argument 'data' appropriately.
3709 * Return value:
3710 * 0 on success.
3711 */
3712
3713static int s2io_link_test(nic_t * sp, uint64_t * data)
3714{
3715 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3716 u64 val64;
3717
3718 val64 = readq(&bar0->adapter_status);
3719 if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT)
3720 *data = 1;
3721
3722 return 0;
3723}
3724
3725/**
3726 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
3727 * @sp - private member of the device structure, which is a pointer to the
3728 * s2io_nic structure.
3729 * @data - variable that returns the result of each of the test
3730 * conducted by the driver.
3731 * Description:
3732 * This is one of the offline test that tests the read and write
3733 * access to the RldRam chip on the NIC.
3734 * Return value:
3735 * 0 on success.
3736 */
3737
3738static int s2io_rldram_test(nic_t * sp, uint64_t * data)
3739{
3740 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3741 u64 val64;
3742 int cnt, iteration = 0, test_pass = 0;
3743
3744 val64 = readq(&bar0->adapter_control);
3745 val64 &= ~ADAPTER_ECC_EN;
3746 writeq(val64, &bar0->adapter_control);
3747
3748 val64 = readq(&bar0->mc_rldram_test_ctrl);
3749 val64 |= MC_RLDRAM_TEST_MODE;
3750 writeq(val64, &bar0->mc_rldram_test_ctrl);
3751
3752 val64 = readq(&bar0->mc_rldram_mrs);
3753 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
3754 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
3755
3756 val64 |= MC_RLDRAM_MRS_ENABLE;
3757 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
3758
3759 while (iteration < 2) {
3760 val64 = 0x55555555aaaa0000ULL;
3761 if (iteration == 1) {
3762 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3763 }
3764 writeq(val64, &bar0->mc_rldram_test_d0);
3765
3766 val64 = 0xaaaa5a5555550000ULL;
3767 if (iteration == 1) {
3768 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3769 }
3770 writeq(val64, &bar0->mc_rldram_test_d1);
3771
3772 val64 = 0x55aaaaaaaa5a0000ULL;
3773 if (iteration == 1) {
3774 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3775 }
3776 writeq(val64, &bar0->mc_rldram_test_d2);
3777
3778 val64 = (u64) (0x0000003fffff0000ULL);
3779 writeq(val64, &bar0->mc_rldram_test_add);
3780
3781
3782 val64 = MC_RLDRAM_TEST_MODE;
3783 writeq(val64, &bar0->mc_rldram_test_ctrl);
3784
3785 val64 |=
3786 MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
3787 MC_RLDRAM_TEST_GO;
3788 writeq(val64, &bar0->mc_rldram_test_ctrl);
3789
3790 for (cnt = 0; cnt < 5; cnt++) {
3791 val64 = readq(&bar0->mc_rldram_test_ctrl);
3792 if (val64 & MC_RLDRAM_TEST_DONE)
3793 break;
3794 msleep(200);
3795 }
3796
3797 if (cnt == 5)
3798 break;
3799
3800 val64 = MC_RLDRAM_TEST_MODE;
3801 writeq(val64, &bar0->mc_rldram_test_ctrl);
3802
3803 val64 |= MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
3804 writeq(val64, &bar0->mc_rldram_test_ctrl);
3805
3806 for (cnt = 0; cnt < 5; cnt++) {
3807 val64 = readq(&bar0->mc_rldram_test_ctrl);
3808 if (val64 & MC_RLDRAM_TEST_DONE)
3809 break;
3810 msleep(500);
3811 }
3812
3813 if (cnt == 5)
3814 break;
3815
3816 val64 = readq(&bar0->mc_rldram_test_ctrl);
3817 if (val64 & MC_RLDRAM_TEST_PASS)
3818 test_pass = 1;
3819
3820 iteration++;
3821 }
3822
3823 if (!test_pass)
3824 *data = 1;
3825 else
3826 *data = 0;
3827
3828 return 0;
3829}
3830
3831/**
3832 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
3833 * @sp : private member of the device structure, which is a pointer to the
3834 * s2io_nic structure.
3835 * @ethtest : pointer to a ethtool command specific structure that will be
3836 * returned to the user.
3837 * @data : variable that returns the result of each of the test
3838 * conducted by the driver.
3839 * Description:
3840 * This function conducts 6 tests ( 4 offline and 2 online) to determine
3841 * the health of the card.
3842 * Return value:
3843 * void
3844 */
3845
3846static void s2io_ethtool_test(struct net_device *dev,
3847 struct ethtool_test *ethtest,
3848 uint64_t * data)
3849{
3850 nic_t *sp = dev->priv;
3851 int orig_state = netif_running(sp->dev);
3852
3853 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
3854 /* Offline Tests. */
3855 if (orig_state) {
3856 s2io_close(sp->dev);
3857 s2io_set_swapper(sp);
3858 } else
3859 s2io_set_swapper(sp);
3860
3861 if (s2io_register_test(sp, &data[0]))
3862 ethtest->flags |= ETH_TEST_FL_FAILED;
3863
3864 s2io_reset(sp);
3865 s2io_set_swapper(sp);
3866
3867 if (s2io_rldram_test(sp, &data[3]))
3868 ethtest->flags |= ETH_TEST_FL_FAILED;
3869
3870 s2io_reset(sp);
3871 s2io_set_swapper(sp);
3872
3873 if (s2io_eeprom_test(sp, &data[1]))
3874 ethtest->flags |= ETH_TEST_FL_FAILED;
3875
3876 if (s2io_bist_test(sp, &data[4]))
3877 ethtest->flags |= ETH_TEST_FL_FAILED;
3878
3879 if (orig_state)
3880 s2io_open(sp->dev);
3881
3882 data[2] = 0;
3883 } else {
3884 /* Online Tests. */
3885 if (!orig_state) {
3886 DBG_PRINT(ERR_DBG,
3887 "%s: is not up, cannot run test\n",
3888 dev->name);
3889 data[0] = -1;
3890 data[1] = -1;
3891 data[2] = -1;
3892 data[3] = -1;
3893 data[4] = -1;
3894 }
3895
3896 if (s2io_link_test(sp, &data[2]))
3897 ethtest->flags |= ETH_TEST_FL_FAILED;
3898
3899 data[0] = 0;
3900 data[1] = 0;
3901 data[3] = 0;
3902 data[4] = 0;
3903 }
3904}
3905
3906static void s2io_get_ethtool_stats(struct net_device *dev,
3907 struct ethtool_stats *estats,
3908 u64 * tmp_stats)
3909{
3910 int i = 0;
3911 nic_t *sp = dev->priv;
3912 StatInfo_t *stat_info = sp->mac_control.stats_info;
3913
3914 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_frms);
3915 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_data_octets);
3916 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
3917 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_mcst_frms);
3918 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_bcst_frms);
3919 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
3920 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_any_err_frms);
3921 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
3922 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_vld_ip);
3923 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_drop_ip);
3924 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_icmp);
3925 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_rst_tcp);
3926 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
3927 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_udp);
3928 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_frms);
3929 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_data_octets);
3930 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
3931 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
3932 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_mcst_frms);
3933 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_bcst_frms);
3934 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
3935 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
3936 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
3937 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_discarded_frms);
3938 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_usized_frms);
3939 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_osized_frms);
3940 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_frag_frms);
3941 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_jabber_frms);
3942 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ip);
3943 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
3944 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
3945 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_drop_ip);
3946 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_icmp);
3947 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
3948 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_udp);
3949 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_drp_udp);
3950 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pause_cnt);
3951 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_accepted_ip);
3952 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
3953}
3954
3955static int s2io_ethtool_get_regs_len(struct net_device *dev)
3956{
3957 return (XENA_REG_SPACE);
3958}
3959
3960
3961static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
3962{
3963 nic_t *sp = dev->priv;
3964
3965 return (sp->rx_csum);
3966}
3967
3968static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
3969{
3970 nic_t *sp = dev->priv;
3971
3972 if (data)
3973 sp->rx_csum = 1;
3974 else
3975 sp->rx_csum = 0;
3976
3977 return 0;
3978}
3979
3980static int s2io_get_eeprom_len(struct net_device *dev)
3981{
3982 return (XENA_EEPROM_SPACE);
3983}
3984
3985static int s2io_ethtool_self_test_count(struct net_device *dev)
3986{
3987 return (S2IO_TEST_LEN);
3988}
3989
3990static void s2io_ethtool_get_strings(struct net_device *dev,
3991 u32 stringset, u8 * data)
3992{
3993 switch (stringset) {
3994 case ETH_SS_TEST:
3995 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
3996 break;
3997 case ETH_SS_STATS:
3998 memcpy(data, &ethtool_stats_keys,
3999 sizeof(ethtool_stats_keys));
4000 }
4001}
4002
4003static int s2io_ethtool_get_stats_count(struct net_device *dev)
4004{
4005 return (S2IO_STAT_LEN);
4006}
4007
4008static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
4009{
4010 if (data)
4011 dev->features |= NETIF_F_IP_CSUM;
4012 else
4013 dev->features &= ~NETIF_F_IP_CSUM;
4014
4015 return 0;
4016}
4017
4018
4019static struct ethtool_ops netdev_ethtool_ops = {
4020 .get_settings = s2io_ethtool_gset,
4021 .set_settings = s2io_ethtool_sset,
4022 .get_drvinfo = s2io_ethtool_gdrvinfo,
4023 .get_regs_len = s2io_ethtool_get_regs_len,
4024 .get_regs = s2io_ethtool_gregs,
4025 .get_link = ethtool_op_get_link,
4026 .get_eeprom_len = s2io_get_eeprom_len,
4027 .get_eeprom = s2io_ethtool_geeprom,
4028 .set_eeprom = s2io_ethtool_seeprom,
4029 .get_pauseparam = s2io_ethtool_getpause_data,
4030 .set_pauseparam = s2io_ethtool_setpause_data,
4031 .get_rx_csum = s2io_ethtool_get_rx_csum,
4032 .set_rx_csum = s2io_ethtool_set_rx_csum,
4033 .get_tx_csum = ethtool_op_get_tx_csum,
4034 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
4035 .get_sg = ethtool_op_get_sg,
4036 .set_sg = ethtool_op_set_sg,
4037#ifdef NETIF_F_TSO
4038 .get_tso = ethtool_op_get_tso,
4039 .set_tso = ethtool_op_set_tso,
4040#endif
4041 .self_test_count = s2io_ethtool_self_test_count,
4042 .self_test = s2io_ethtool_test,
4043 .get_strings = s2io_ethtool_get_strings,
4044 .phys_id = s2io_ethtool_idnic,
4045 .get_stats_count = s2io_ethtool_get_stats_count,
4046 .get_ethtool_stats = s2io_get_ethtool_stats
4047};
4048
4049/**
4050 * s2io_ioctl - Entry point for the Ioctl
4051 * @dev : Device pointer.
4052 * @ifr : An IOCTL specefic structure, that can contain a pointer to
4053 * a proprietary structure used to pass information to the driver.
4054 * @cmd : This is used to distinguish between the different commands that
4055 * can be passed to the IOCTL functions.
4056 * Description:
4057 * This function has support for ethtool, adding multiple MAC addresses on
4058 * the NIC and some DBG commands for the util tool.
4059 * Return value:
4060 * Currently the IOCTL supports no operations, hence by default this
4061 * function returns OP NOT SUPPORTED value.
4062 */
4063
4064static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4065{
4066 return -EOPNOTSUPP;
4067}
4068
4069/**
4070 * s2io_change_mtu - entry point to change MTU size for the device.
4071 * @dev : device pointer.
4072 * @new_mtu : the new MTU size for the device.
4073 * Description: A driver entry point to change MTU size for the device.
4074 * Before changing the MTU the device must be stopped.
4075 * Return value:
4076 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4077 * file on failure.
4078 */
4079
4080static int s2io_change_mtu(struct net_device *dev, int new_mtu)
4081{
4082 nic_t *sp = dev->priv;
4083 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4084 register u64 val64;
4085
4086 if (netif_running(dev)) {
4087 DBG_PRINT(ERR_DBG, "%s: Must be stopped to ", dev->name);
4088 DBG_PRINT(ERR_DBG, "change its MTU \n");
4089 return -EBUSY;
4090 }
4091
4092 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
4093 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
4094 dev->name);
4095 return -EPERM;
4096 }
4097
4098 /* Set the new MTU into the PYLD register of the NIC */
4099 val64 = new_mtu;
4100 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
4101
4102 dev->mtu = new_mtu;
4103
4104 return 0;
4105}
4106
4107/**
4108 * s2io_tasklet - Bottom half of the ISR.
4109 * @dev_adr : address of the device structure in dma_addr_t format.
4110 * Description:
4111 * This is the tasklet or the bottom half of the ISR. This is
4112 * an extension of the ISR which is scheduled by the scheduler to be run
4113 * when the load on the CPU is low. All low priority tasks of the ISR can
4114 * be pushed into the tasklet. For now the tasklet is used only to
4115 * replenish the Rx buffers in the Rx buffer descriptors.
4116 * Return value:
4117 * void.
4118 */
4119
4120static void s2io_tasklet(unsigned long dev_addr)
4121{
4122 struct net_device *dev = (struct net_device *) dev_addr;
4123 nic_t *sp = dev->priv;
4124 int i, ret;
4125 mac_info_t *mac_control;
4126 struct config_param *config;
4127
4128 mac_control = &sp->mac_control;
4129 config = &sp->config;
4130
4131 if (!TASKLET_IN_USE) {
4132 for (i = 0; i < config->rx_ring_num; i++) {
4133 ret = fill_rx_buffers(sp, i);
4134 if (ret == -ENOMEM) {
4135 DBG_PRINT(ERR_DBG, "%s: Out of ",
4136 dev->name);
4137 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
4138 break;
4139 } else if (ret == -EFILL) {
4140 DBG_PRINT(ERR_DBG,
4141 "%s: Rx Ring %d is full\n",
4142 dev->name, i);
4143 break;
4144 }
4145 }
4146 clear_bit(0, (&sp->tasklet_status));
4147 }
4148}
4149
4150/**
4151 * s2io_set_link - Set the LInk status
4152 * @data: long pointer to device private structue
4153 * Description: Sets the link status for the adapter
4154 */
4155
4156static void s2io_set_link(unsigned long data)
4157{
4158 nic_t *nic = (nic_t *) data;
4159 struct net_device *dev = nic->dev;
4160 XENA_dev_config_t __iomem *bar0 = nic->bar0;
4161 register u64 val64;
4162 u16 subid;
4163
4164 if (test_and_set_bit(0, &(nic->link_state))) {
4165 /* The card is being reset, no point doing anything */
4166 return;
4167 }
4168
4169 subid = nic->pdev->subsystem_device;
4170 /*
4171 * Allow a small delay for the NICs self initiated
4172 * cleanup to complete.
4173 */
4174 msleep(100);
4175
4176 val64 = readq(&bar0->adapter_status);
4177 if (verify_xena_quiescence(val64, nic->device_enabled_once)) {
4178 if (LINK_IS_UP(val64)) {
4179 val64 = readq(&bar0->adapter_control);
4180 val64 |= ADAPTER_CNTL_EN;
4181 writeq(val64, &bar0->adapter_control);
4182 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
4183 val64 = readq(&bar0->gpio_control);
4184 val64 |= GPIO_CTRL_GPIO_0;
4185 writeq(val64, &bar0->gpio_control);
4186 val64 = readq(&bar0->gpio_control);
4187 } else {
4188 val64 |= ADAPTER_LED_ON;
4189 writeq(val64, &bar0->adapter_control);
4190 }
4191 val64 = readq(&bar0->adapter_status);
4192 if (!LINK_IS_UP(val64)) {
4193 DBG_PRINT(ERR_DBG, "%s:", dev->name);
4194 DBG_PRINT(ERR_DBG, " Link down");
4195 DBG_PRINT(ERR_DBG, "after ");
4196 DBG_PRINT(ERR_DBG, "enabling ");
4197 DBG_PRINT(ERR_DBG, "device \n");
4198 }
4199 if (nic->device_enabled_once == FALSE) {
4200 nic->device_enabled_once = TRUE;
4201 }
4202 s2io_link(nic, LINK_UP);
4203 } else {
4204 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
4205 val64 = readq(&bar0->gpio_control);
4206 val64 &= ~GPIO_CTRL_GPIO_0;
4207 writeq(val64, &bar0->gpio_control);
4208 val64 = readq(&bar0->gpio_control);
4209 }
4210 s2io_link(nic, LINK_DOWN);
4211 }
4212 } else { /* NIC is not Quiescent. */
4213 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
4214 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
4215 netif_stop_queue(dev);
4216 }
4217 clear_bit(0, &(nic->link_state));
4218}
4219
4220static void s2io_card_down(nic_t * sp)
4221{
4222 int cnt = 0;
4223 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4224 unsigned long flags;
4225 register u64 val64 = 0;
4226
4227 /* If s2io_set_link task is executing, wait till it completes. */
4228 while (test_and_set_bit(0, &(sp->link_state)))
4229 msleep(50);
4230 atomic_set(&sp->card_state, CARD_DOWN);
4231
4232 /* disable Tx and Rx traffic on the NIC */
4233 stop_nic(sp);
4234
4235 /* Kill tasklet. */
4236 tasklet_kill(&sp->task);
4237
4238 /* Check if the device is Quiescent and then Reset the NIC */
4239 do {
4240 val64 = readq(&bar0->adapter_status);
4241 if (verify_xena_quiescence(val64, sp->device_enabled_once)) {
4242 break;
4243 }
4244
4245 msleep(50);
4246 cnt++;
4247 if (cnt == 10) {
4248 DBG_PRINT(ERR_DBG,
4249 "s2io_close:Device not Quiescent ");
4250 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
4251 (unsigned long long) val64);
4252 break;
4253 }
4254 } while (1);
4255 spin_lock_irqsave(&sp->tx_lock, flags);
4256 s2io_reset(sp);
4257
4258 /* Free all unused Tx and Rx buffers */
4259 free_tx_buffers(sp);
4260 free_rx_buffers(sp);
4261
4262 spin_unlock_irqrestore(&sp->tx_lock, flags);
4263 clear_bit(0, &(sp->link_state));
4264}
4265
4266static int s2io_card_up(nic_t * sp)
4267{
4268 int i, ret;
4269 mac_info_t *mac_control;
4270 struct config_param *config;
4271 struct net_device *dev = (struct net_device *) sp->dev;
4272
4273 /* Initialize the H/W I/O registers */
4274 if (init_nic(sp) != 0) {
4275 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4276 dev->name);
4277 return -ENODEV;
4278 }
4279
4280 /*
4281 * Initializing the Rx buffers. For now we are considering only 1
4282 * Rx ring and initializing buffers into 30 Rx blocks
4283 */
4284 mac_control = &sp->mac_control;
4285 config = &sp->config;
4286
4287 for (i = 0; i < config->rx_ring_num; i++) {
4288 if ((ret = fill_rx_buffers(sp, i))) {
4289 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
4290 dev->name);
4291 s2io_reset(sp);
4292 free_rx_buffers(sp);
4293 return -ENOMEM;
4294 }
4295 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
4296 atomic_read(&sp->rx_bufs_left[i]));
4297 }
4298
4299 /* Setting its receive mode */
4300 s2io_set_multicast(dev);
4301
4302 /* Enable tasklet for the device */
4303 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
4304
4305 /* Enable Rx Traffic and interrupts on the NIC */
4306 if (start_nic(sp)) {
4307 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
4308 tasklet_kill(&sp->task);
4309 s2io_reset(sp);
4310 free_irq(dev->irq, dev);
4311 free_rx_buffers(sp);
4312 return -ENODEV;
4313 }
4314
4315 atomic_set(&sp->card_state, CARD_UP);
4316 return 0;
4317}
4318
4319/**
4320 * s2io_restart_nic - Resets the NIC.
4321 * @data : long pointer to the device private structure
4322 * Description:
4323 * This function is scheduled to be run by the s2io_tx_watchdog
4324 * function after 0.5 secs to reset the NIC. The idea is to reduce
4325 * the run time of the watch dog routine which is run holding a
4326 * spin lock.
4327 */
4328
4329static void s2io_restart_nic(unsigned long data)
4330{
4331 struct net_device *dev = (struct net_device *) data;
4332 nic_t *sp = dev->priv;
4333
4334 s2io_card_down(sp);
4335 if (s2io_card_up(sp)) {
4336 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4337 dev->name);
4338 }
4339 netif_wake_queue(dev);
4340 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
4341 dev->name);
4342}
4343
4344/**
4345 * s2io_tx_watchdog - Watchdog for transmit side.
4346 * @dev : Pointer to net device structure
4347 * Description:
4348 * This function is triggered if the Tx Queue is stopped
4349 * for a pre-defined amount of time when the Interface is still up.
4350 * If the Interface is jammed in such a situation, the hardware is
4351 * reset (by s2io_close) and restarted again (by s2io_open) to
4352 * overcome any problem that might have been caused in the hardware.
4353 * Return value:
4354 * void
4355 */
4356
4357static void s2io_tx_watchdog(struct net_device *dev)
4358{
4359 nic_t *sp = dev->priv;
4360
4361 if (netif_carrier_ok(dev)) {
4362 schedule_work(&sp->rst_timer_task);
4363 }
4364}
4365
4366/**
4367 * rx_osm_handler - To perform some OS related operations on SKB.
4368 * @sp: private member of the device structure,pointer to s2io_nic structure.
4369 * @skb : the socket buffer pointer.
4370 * @len : length of the packet
4371 * @cksum : FCS checksum of the frame.
4372 * @ring_no : the ring from which this RxD was extracted.
4373 * Description:
4374 * This function is called by the Tx interrupt serivce routine to perform
4375 * some OS related operations on the SKB before passing it to the upper
4376 * layers. It mainly checks if the checksum is OK, if so adds it to the
4377 * SKBs cksum variable, increments the Rx packet count and passes the SKB
4378 * to the upper layer. If the checksum is wrong, it increments the Rx
4379 * packet error count, frees the SKB and returns error.
4380 * Return value:
4381 * SUCCESS on success and -1 on failure.
4382 */
4383#ifndef CONFIG_2BUFF_MODE
4384static int rx_osm_handler(nic_t * sp, u16 len, RxD_t * rxdp, int ring_no)
4385#else
4386static int rx_osm_handler(nic_t * sp, RxD_t * rxdp, int ring_no,
4387 buffAdd_t * ba)
4388#endif
4389{
4390 struct net_device *dev = (struct net_device *) sp->dev;
4391 struct sk_buff *skb =
4392 (struct sk_buff *) ((unsigned long) rxdp->Host_Control);
4393 u16 l3_csum, l4_csum;
4394#ifdef CONFIG_2BUFF_MODE
4395 int buf0_len, buf2_len;
4396 unsigned char *buff;
4397#endif
4398
4399 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
4400 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && (sp->rx_csum)) {
4401 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
4402 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
4403 /*
4404 * NIC verifies if the Checksum of the received
4405 * frame is Ok or not and accordingly returns
4406 * a flag in the RxD.
4407 */
4408 skb->ip_summed = CHECKSUM_UNNECESSARY;
4409 } else {
4410 /*
4411 * Packet with erroneous checksum, let the
4412 * upper layers deal with it.
4413 */
4414 skb->ip_summed = CHECKSUM_NONE;
4415 }
4416 } else {
4417 skb->ip_summed = CHECKSUM_NONE;
4418 }
4419
4420 if (rxdp->Control_1 & RXD_T_CODE) {
4421 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
4422 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
4423 dev->name, err);
4424 }
4425#ifdef CONFIG_2BUFF_MODE
4426 buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
4427 buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
4428#endif
4429
4430 skb->dev = dev;
4431#ifndef CONFIG_2BUFF_MODE
4432 skb_put(skb, len);
4433 skb->protocol = eth_type_trans(skb, dev);
4434#else
4435 buff = skb_push(skb, buf0_len);
4436 memcpy(buff, ba->ba_0, buf0_len);
4437 skb_put(skb, buf2_len);
4438 skb->protocol = eth_type_trans(skb, dev);
4439#endif
4440
4441#ifdef CONFIG_S2IO_NAPI
4442 netif_receive_skb(skb);
4443#else
4444 netif_rx(skb);
4445#endif
4446
4447 dev->last_rx = jiffies;
4448 sp->rx_pkt_count++;
4449 sp->stats.rx_packets++;
4450#ifndef CONFIG_2BUFF_MODE
4451 sp->stats.rx_bytes += len;
4452#else
4453 sp->stats.rx_bytes += buf0_len + buf2_len;
4454#endif
4455
4456 atomic_dec(&sp->rx_bufs_left[ring_no]);
4457 rxdp->Host_Control = 0;
4458 return SUCCESS;
4459}
4460
4461/**
4462 * s2io_link - stops/starts the Tx queue.
4463 * @sp : private member of the device structure, which is a pointer to the
4464 * s2io_nic structure.
4465 * @link : inidicates whether link is UP/DOWN.
4466 * Description:
4467 * This function stops/starts the Tx queue depending on whether the link
4468 * status of the NIC is is down or up. This is called by the Alarm
4469 * interrupt handler whenever a link change interrupt comes up.
4470 * Return value:
4471 * void.
4472 */
4473
4474static void s2io_link(nic_t * sp, int link)
4475{
4476 struct net_device *dev = (struct net_device *) sp->dev;
4477
4478 if (link != sp->last_link_state) {
4479 if (link == LINK_DOWN) {
4480 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
4481 netif_carrier_off(dev);
4482 } else {
4483 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
4484 netif_carrier_on(dev);
4485 }
4486 }
4487 sp->last_link_state = link;
4488}
4489
4490/**
4491 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
4492 * @sp : private member of the device structure, which is a pointer to the
4493 * s2io_nic structure.
4494 * Description:
4495 * This function initializes a few of the PCI and PCI-X configuration registers
4496 * with recommended values.
4497 * Return value:
4498 * void
4499 */
4500
4501static void s2io_init_pci(nic_t * sp)
4502{
4503 u16 pci_cmd = 0;
4504
4505 /* Enable Data Parity Error Recovery in PCI-X command register. */
4506 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4507 &(sp->pcix_cmd));
4508 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4509 (sp->pcix_cmd | 1));
4510 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4511 &(sp->pcix_cmd));
4512
4513 /* Set the PErr Response bit in PCI command register. */
4514 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4515 pci_write_config_word(sp->pdev, PCI_COMMAND,
4516 (pci_cmd | PCI_COMMAND_PARITY));
4517 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4518
4519 /* Set MMRB count to 1024 in PCI-X Command register. */
4520 sp->pcix_cmd &= 0xFFF3;
4521 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, (sp->pcix_cmd | (0x1 << 2))); /* MMRBC 1K */
4522 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4523 &(sp->pcix_cmd));
4524
4525 /* Setting Maximum outstanding splits based on system type. */
4526 sp->pcix_cmd &= 0xFF8F;
4527
4528 sp->pcix_cmd |= XENA_MAX_OUTSTANDING_SPLITS(0x1); /* 2 splits. */
4529 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4530 sp->pcix_cmd);
4531 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4532 &(sp->pcix_cmd));
4533 /* Forcibly disabling relaxed ordering capability of the card. */
4534 sp->pcix_cmd &= 0xfffd;
4535 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4536 sp->pcix_cmd);
4537 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4538 &(sp->pcix_cmd));
4539}
4540
4541MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
4542MODULE_LICENSE("GPL");
4543module_param(tx_fifo_num, int, 0);
4544module_param_array(tx_fifo_len, int, NULL, 0);
4545module_param(rx_ring_num, int, 0);
4546module_param_array(rx_ring_sz, int, NULL, 0);
4547module_param(Stats_refresh_time, int, 0);
4548module_param(rmac_pause_time, int, 0);
4549module_param(mc_pause_threshold_q0q3, int, 0);
4550module_param(mc_pause_threshold_q4q7, int, 0);
4551module_param(shared_splits, int, 0);
4552module_param(tmac_util_period, int, 0);
4553module_param(rmac_util_period, int, 0);
4554#ifndef CONFIG_S2IO_NAPI
4555module_param(indicate_max_pkts, int, 0);
4556#endif
4557/**
4558 * s2io_init_nic - Initialization of the adapter .
4559 * @pdev : structure containing the PCI related information of the device.
4560 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
4561 * Description:
4562 * The function initializes an adapter identified by the pci_dec structure.
4563 * All OS related initialization including memory and device structure and
4564 * initlaization of the device private variable is done. Also the swapper
4565 * control register is initialized to enable read and write into the I/O
4566 * registers of the device.
4567 * Return value:
4568 * returns 0 on success and negative on failure.
4569 */
4570
4571static int __devinit
4572s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4573{
4574 nic_t *sp;
4575 struct net_device *dev;
4576 char *dev_name = "S2IO 10GE NIC";
4577 int i, j, ret;
4578 int dma_flag = FALSE;
4579 u32 mac_up, mac_down;
4580 u64 val64 = 0, tmp64 = 0;
4581 XENA_dev_config_t __iomem *bar0 = NULL;
4582 u16 subid;
4583 mac_info_t *mac_control;
4584 struct config_param *config;
4585
4586
4587 DBG_PRINT(ERR_DBG, "Loading S2IO driver with %s\n",
4588 s2io_driver_version);
4589
4590 if ((ret = pci_enable_device(pdev))) {
4591 DBG_PRINT(ERR_DBG,
4592 "s2io_init_nic: pci_enable_device failed\n");
4593 return ret;
4594 }
4595
4596 if (!pci_set_dma_mask(pdev, 0xffffffffffffffffULL)) {
4597 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
4598 dma_flag = TRUE;
4599
4600 if (pci_set_consistent_dma_mask
4601 (pdev, 0xffffffffffffffffULL)) {
4602 DBG_PRINT(ERR_DBG,
4603 "Unable to obtain 64bit DMA for \
4604 consistent allocations\n");
4605 pci_disable_device(pdev);
4606 return -ENOMEM;
4607 }
4608 } else if (!pci_set_dma_mask(pdev, 0xffffffffUL)) {
4609 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
4610 } else {
4611 pci_disable_device(pdev);
4612 return -ENOMEM;
4613 }
4614
4615 if (pci_request_regions(pdev, s2io_driver_name)) {
4616 DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
4617 pci_disable_device(pdev);
4618 return -ENODEV;
4619 }
4620
4621 dev = alloc_etherdev(sizeof(nic_t));
4622 if (dev == NULL) {
4623 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
4624 pci_disable_device(pdev);
4625 pci_release_regions(pdev);
4626 return -ENODEV;
4627 }
4628
4629 pci_set_master(pdev);
4630 pci_set_drvdata(pdev, dev);
4631 SET_MODULE_OWNER(dev);
4632 SET_NETDEV_DEV(dev, &pdev->dev);
4633
4634 /* Private member variable initialized to s2io NIC structure */
4635 sp = dev->priv;
4636 memset(sp, 0, sizeof(nic_t));
4637 sp->dev = dev;
4638 sp->pdev = pdev;
4639 sp->vendor_id = pdev->vendor;
4640 sp->device_id = pdev->device;
4641 sp->high_dma_flag = dma_flag;
4642 sp->irq = pdev->irq;
4643 sp->device_enabled_once = FALSE;
4644 strcpy(sp->name, dev_name);
4645
4646 /* Initialize some PCI/PCI-X fields of the NIC. */
4647 s2io_init_pci(sp);
4648
4649 /*
4650 * Setting the device configuration parameters.
4651 * Most of these parameters can be specified by the user during
4652 * module insertion as they are module loadable parameters. If
4653 * these parameters are not not specified during load time, they
4654 * are initialized with default values.
4655 */
4656 mac_control = &sp->mac_control;
4657 config = &sp->config;
4658
4659 /* Tx side parameters. */
4660 tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */
4661 config->tx_fifo_num = tx_fifo_num;
4662 for (i = 0; i < MAX_TX_FIFOS; i++) {
4663 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
4664 config->tx_cfg[i].fifo_priority = i;
4665 }
4666
4667 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
4668 for (i = 0; i < config->tx_fifo_num; i++) {
4669 config->tx_cfg[i].f_no_snoop =
4670 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
4671 if (config->tx_cfg[i].fifo_len < 65) {
4672 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
4673 break;
4674 }
4675 }
4676 config->max_txds = MAX_SKB_FRAGS;
4677
4678 /* Rx side parameters. */
4679 rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */
4680 config->rx_ring_num = rx_ring_num;
4681 for (i = 0; i < MAX_RX_RINGS; i++) {
4682 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
4683 (MAX_RXDS_PER_BLOCK + 1);
4684 config->rx_cfg[i].ring_priority = i;
4685 }
4686
4687 for (i = 0; i < rx_ring_num; i++) {
4688 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
4689 config->rx_cfg[i].f_no_snoop =
4690 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
4691 }
4692
4693 /* Setting Mac Control parameters */
4694 mac_control->rmac_pause_time = rmac_pause_time;
4695 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
4696 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
4697
4698
4699 /* Initialize Ring buffer parameters. */
4700 for (i = 0; i < config->rx_ring_num; i++)
4701 atomic_set(&sp->rx_bufs_left[i], 0);
4702
4703 /* initialize the shared memory used by the NIC and the host */
4704 if (init_shared_mem(sp)) {
4705 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
4706 dev->name);
4707 ret = -ENOMEM;
4708 goto mem_alloc_failed;
4709 }
4710
4711 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
4712 pci_resource_len(pdev, 0));
4713 if (!sp->bar0) {
4714 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n",
4715 dev->name);
4716 ret = -ENOMEM;
4717 goto bar0_remap_failed;
4718 }
4719
4720 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
4721 pci_resource_len(pdev, 2));
4722 if (!sp->bar1) {
4723 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n",
4724 dev->name);
4725 ret = -ENOMEM;
4726 goto bar1_remap_failed;
4727 }
4728
4729 dev->irq = pdev->irq;
4730 dev->base_addr = (unsigned long) sp->bar0;
4731
4732 /* Initializing the BAR1 address as the start of the FIFO pointer. */
4733 for (j = 0; j < MAX_TX_FIFOS; j++) {
4734 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
4735 (sp->bar1 + (j * 0x00020000));
4736 }
4737
4738 /* Driver entry points */
4739 dev->open = &s2io_open;
4740 dev->stop = &s2io_close;
4741 dev->hard_start_xmit = &s2io_xmit;
4742 dev->get_stats = &s2io_get_stats;
4743 dev->set_multicast_list = &s2io_set_multicast;
4744 dev->do_ioctl = &s2io_ioctl;
4745 dev->change_mtu = &s2io_change_mtu;
4746 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
4747 /*
4748 * will use eth_mac_addr() for dev->set_mac_address
4749 * mac address will be set every time dev->open() is called
4750 */
4751#ifdef CONFIG_S2IO_NAPI
4752 dev->poll = s2io_poll;
4753 dev->weight = 90;
4754#endif
4755
4756 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
4757 if (sp->high_dma_flag == TRUE)
4758 dev->features |= NETIF_F_HIGHDMA;
4759#ifdef NETIF_F_TSO
4760 dev->features |= NETIF_F_TSO;
4761#endif
4762
4763 dev->tx_timeout = &s2io_tx_watchdog;
4764 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
4765 INIT_WORK(&sp->rst_timer_task,
4766 (void (*)(void *)) s2io_restart_nic, dev);
4767 INIT_WORK(&sp->set_link_task,
4768 (void (*)(void *)) s2io_set_link, sp);
4769
4770 pci_save_state(sp->pdev);
4771
4772 /* Setting swapper control on the NIC, for proper reset operation */
4773 if (s2io_set_swapper(sp)) {
4774 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
4775 dev->name);
4776 ret = -EAGAIN;
4777 goto set_swap_failed;
4778 }
4779
4780 /* Fix for all "FFs" MAC address problems observed on Alpha platforms */
4781 fix_mac_address(sp);
4782 s2io_reset(sp);
4783
4784 /*
4785 * Setting swapper control on the NIC, so the MAC address can be read.
4786 */
4787 if (s2io_set_swapper(sp)) {
4788 DBG_PRINT(ERR_DBG,
4789 "%s: S2IO: swapper settings are wrong\n",
4790 dev->name);
4791 ret = -EAGAIN;
4792 goto set_swap_failed;
4793 }
4794
4795 /*
4796 * MAC address initialization.
4797 * For now only one mac address will be read and used.
4798 */
4799 bar0 = sp->bar0;
4800 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4801 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
4802 writeq(val64, &bar0->rmac_addr_cmd_mem);
4803 wait_for_cmd_complete(sp);
4804
4805 tmp64 = readq(&bar0->rmac_addr_data0_mem);
4806 mac_down = (u32) tmp64;
4807 mac_up = (u32) (tmp64 >> 32);
4808
4809 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
4810
4811 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
4812 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
4813 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
4814 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
4815 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
4816 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
4817
4818 DBG_PRINT(INIT_DBG,
4819 "DEFAULT MAC ADDR:0x%02x-%02x-%02x-%02x-%02x-%02x\n",
4820 sp->def_mac_addr[0].mac_addr[0],
4821 sp->def_mac_addr[0].mac_addr[1],
4822 sp->def_mac_addr[0].mac_addr[2],
4823 sp->def_mac_addr[0].mac_addr[3],
4824 sp->def_mac_addr[0].mac_addr[4],
4825 sp->def_mac_addr[0].mac_addr[5]);
4826
4827 /* Set the factory defined MAC address initially */
4828 dev->addr_len = ETH_ALEN;
4829 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
4830
4831 /*
4832 * Initialize the tasklet status and link state flags
4833 * and the card statte parameter
4834 */
4835 atomic_set(&(sp->card_state), 0);
4836 sp->tasklet_status = 0;
4837 sp->link_state = 0;
4838
4839
4840 /* Initialize spinlocks */
4841 spin_lock_init(&sp->tx_lock);
4842#ifndef CONFIG_S2IO_NAPI
4843 spin_lock_init(&sp->put_lock);
4844#endif
4845
4846 /*
4847 * SXE-002: Configure link and activity LED to init state
4848 * on driver load.
4849 */
4850 subid = sp->pdev->subsystem_device;
4851 if ((subid & 0xFF) >= 0x07) {
4852 val64 = readq(&bar0->gpio_control);
4853 val64 |= 0x0000800000000000ULL;
4854 writeq(val64, &bar0->gpio_control);
4855 val64 = 0x0411040400000000ULL;
4856 writeq(val64, (void __iomem *) bar0 + 0x2700);
4857 val64 = readq(&bar0->gpio_control);
4858 }
4859
4860 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
4861
4862 if (register_netdev(dev)) {
4863 DBG_PRINT(ERR_DBG, "Device registration failed\n");
4864 ret = -ENODEV;
4865 goto register_failed;
4866 }
4867
4868 /*
4869 * Make Link state as off at this point, when the Link change
4870 * interrupt comes the state will be automatically changed to
4871 * the right state.
4872 */
4873 netif_carrier_off(dev);
4874 sp->last_link_state = LINK_DOWN;
4875
4876 return 0;
4877
4878 register_failed:
4879 set_swap_failed:
4880 iounmap(sp->bar1);
4881 bar1_remap_failed:
4882 iounmap(sp->bar0);
4883 bar0_remap_failed:
4884 mem_alloc_failed:
4885 free_shared_mem(sp);
4886 pci_disable_device(pdev);
4887 pci_release_regions(pdev);
4888 pci_set_drvdata(pdev, NULL);
4889 free_netdev(dev);
4890
4891 return ret;
4892}
4893
4894/**
4895 * s2io_rem_nic - Free the PCI device
4896 * @pdev: structure containing the PCI related information of the device.
4897 * Description: This function is called by the Pci subsystem to release a
4898 * PCI device and free up all resource held up by the device. This could
4899 * be in response to a Hot plug event or when the driver is to be removed
4900 * from memory.
4901 */
4902
4903static void __devexit s2io_rem_nic(struct pci_dev *pdev)
4904{
4905 struct net_device *dev =
4906 (struct net_device *) pci_get_drvdata(pdev);
4907 nic_t *sp;
4908
4909 if (dev == NULL) {
4910 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
4911 return;
4912 }
4913
4914 sp = dev->priv;
4915 unregister_netdev(dev);
4916
4917 free_shared_mem(sp);
4918 iounmap(sp->bar0);
4919 iounmap(sp->bar1);
4920 pci_disable_device(pdev);
4921 pci_release_regions(pdev);
4922 pci_set_drvdata(pdev, NULL);
4923
4924 free_netdev(dev);
4925}
4926
4927/**
4928 * s2io_starter - Entry point for the driver
4929 * Description: This function is the entry point for the driver. It verifies
4930 * the module loadable parameters and initializes PCI configuration space.
4931 */
4932
4933int __init s2io_starter(void)
4934{
4935 return pci_module_init(&s2io_driver);
4936}
4937
4938/**
4939 * s2io_closer - Cleanup routine for the driver
4940 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
4941 */
4942
4943static void s2io_closer(void)
4944{
4945 pci_unregister_driver(&s2io_driver);
4946 DBG_PRINT(INIT_DBG, "cleanup done\n");
4947}
4948
4949module_init(s2io_starter);
4950module_exit(s2io_closer);