aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/s2io.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/s2io.c')
-rw-r--r--drivers/net/s2io.c672
1 files changed, 317 insertions, 355 deletions
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index c6b77acb35ef..e72e0e099060 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -71,12 +71,13 @@
71#include <asm/uaccess.h> 71#include <asm/uaccess.h>
72#include <asm/io.h> 72#include <asm/io.h>
73#include <asm/div64.h> 73#include <asm/div64.h>
74#include <asm/irq.h>
74 75
75/* local include */ 76/* local include */
76#include "s2io.h" 77#include "s2io.h"
77#include "s2io-regs.h" 78#include "s2io-regs.h"
78 79
79#define DRV_VERSION "2.0.14.2" 80#define DRV_VERSION "2.0.15.2"
80 81
81/* S2io Driver name & version. */ 82/* S2io Driver name & version. */
82static char s2io_driver_name[] = "Neterion"; 83static char s2io_driver_name[] = "Neterion";
@@ -370,38 +371,50 @@ static const u64 fix_mac[] = {
370 END_SIGN 371 END_SIGN
371}; 372};
372 373
374MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
375MODULE_LICENSE("GPL");
376MODULE_VERSION(DRV_VERSION);
377
378
373/* Module Loadable parameters. */ 379/* Module Loadable parameters. */
374static unsigned int tx_fifo_num = 1; 380S2IO_PARM_INT(tx_fifo_num, 1);
375static unsigned int tx_fifo_len[MAX_TX_FIFOS] = 381S2IO_PARM_INT(rx_ring_num, 1);
376 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN}; 382
377static unsigned int rx_ring_num = 1; 383
378static unsigned int rx_ring_sz[MAX_RX_RINGS] = 384S2IO_PARM_INT(rx_ring_mode, 1);
379 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT}; 385S2IO_PARM_INT(use_continuous_tx_intrs, 1);
380static unsigned int rts_frm_len[MAX_RX_RINGS] = 386S2IO_PARM_INT(rmac_pause_time, 0x100);
381 {[0 ...(MAX_RX_RINGS - 1)] = 0 }; 387S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
382static unsigned int rx_ring_mode = 1; 388S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
383static unsigned int use_continuous_tx_intrs = 1; 389S2IO_PARM_INT(shared_splits, 0);
384static unsigned int rmac_pause_time = 0x100; 390S2IO_PARM_INT(tmac_util_period, 5);
385static unsigned int mc_pause_threshold_q0q3 = 187; 391S2IO_PARM_INT(rmac_util_period, 5);
386static unsigned int mc_pause_threshold_q4q7 = 187; 392S2IO_PARM_INT(bimodal, 0);
387static unsigned int shared_splits; 393S2IO_PARM_INT(l3l4hdr_size, 128);
388static unsigned int tmac_util_period = 5;
389static unsigned int rmac_util_period = 5;
390static unsigned int bimodal = 0;
391static unsigned int l3l4hdr_size = 128;
392#ifndef CONFIG_S2IO_NAPI
393static unsigned int indicate_max_pkts;
394#endif
395/* Frequency of Rx desc syncs expressed as power of 2 */ 394/* Frequency of Rx desc syncs expressed as power of 2 */
396static unsigned int rxsync_frequency = 3; 395S2IO_PARM_INT(rxsync_frequency, 3);
397/* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */ 396/* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
398static unsigned int intr_type = 0; 397S2IO_PARM_INT(intr_type, 0);
399/* Large receive offload feature */ 398/* Large receive offload feature */
400static unsigned int lro = 0; 399S2IO_PARM_INT(lro, 0);
401/* Max pkts to be aggregated by LRO at one time. If not specified, 400/* Max pkts to be aggregated by LRO at one time. If not specified,
402 * aggregation happens until we hit max IP pkt size(64K) 401 * aggregation happens until we hit max IP pkt size(64K)
403 */ 402 */
404static unsigned int lro_max_pkts = 0xFFFF; 403S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
404#ifndef CONFIG_S2IO_NAPI
405S2IO_PARM_INT(indicate_max_pkts, 0);
406#endif
407
408static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
409 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
410static unsigned int rx_ring_sz[MAX_RX_RINGS] =
411 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
412static unsigned int rts_frm_len[MAX_RX_RINGS] =
413 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
414
415module_param_array(tx_fifo_len, uint, NULL, 0);
416module_param_array(rx_ring_sz, uint, NULL, 0);
417module_param_array(rts_frm_len, uint, NULL, 0);
405 418
406/* 419/*
407 * S2IO device table. 420 * S2IO device table.
@@ -464,10 +477,9 @@ static int init_shared_mem(struct s2io_nic *nic)
464 size += config->tx_cfg[i].fifo_len; 477 size += config->tx_cfg[i].fifo_len;
465 } 478 }
466 if (size > MAX_AVAILABLE_TXDS) { 479 if (size > MAX_AVAILABLE_TXDS) {
467 DBG_PRINT(ERR_DBG, "%s: Requested TxDs too high, ", 480 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
468 __FUNCTION__);
469 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size); 481 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
470 return FAILURE; 482 return -EINVAL;
471 } 483 }
472 484
473 lst_size = (sizeof(TxD_t) * config->max_txds); 485 lst_size = (sizeof(TxD_t) * config->max_txds);
@@ -547,6 +559,7 @@ static int init_shared_mem(struct s2io_nic *nic)
547 nic->ufo_in_band_v = kmalloc((sizeof(u64) * size), GFP_KERNEL); 559 nic->ufo_in_band_v = kmalloc((sizeof(u64) * size), GFP_KERNEL);
548 if (!nic->ufo_in_band_v) 560 if (!nic->ufo_in_band_v)
549 return -ENOMEM; 561 return -ENOMEM;
562 memset(nic->ufo_in_band_v, 0, size);
550 563
551 /* Allocation and initialization of RXDs in Rings */ 564 /* Allocation and initialization of RXDs in Rings */
552 size = 0; 565 size = 0;
@@ -1213,7 +1226,7 @@ static int init_nic(struct s2io_nic *nic)
1213 break; 1226 break;
1214 } 1227 }
1215 1228
1216 /* Enable Tx FIFO partition 0. */ 1229 /* Enable all configured Tx FIFO partitions */
1217 val64 = readq(&bar0->tx_fifo_partition_0); 1230 val64 = readq(&bar0->tx_fifo_partition_0);
1218 val64 |= (TX_FIFO_PARTITION_EN); 1231 val64 |= (TX_FIFO_PARTITION_EN);
1219 writeq(val64, &bar0->tx_fifo_partition_0); 1232 writeq(val64, &bar0->tx_fifo_partition_0);
@@ -1650,7 +1663,7 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1650 writeq(temp64, &bar0->general_int_mask); 1663 writeq(temp64, &bar0->general_int_mask);
1651 /* 1664 /*
1652 * If Hercules adapter enable GPIO otherwise 1665 * If Hercules adapter enable GPIO otherwise
1653 * disabled all PCIX, Flash, MDIO, IIC and GPIO 1666 * disable all PCIX, Flash, MDIO, IIC and GPIO
1654 * interrupts for now. 1667 * interrupts for now.
1655 * TODO 1668 * TODO
1656 */ 1669 */
@@ -1976,7 +1989,6 @@ static int start_nic(struct s2io_nic *nic)
1976 XENA_dev_config_t __iomem *bar0 = nic->bar0; 1989 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1977 struct net_device *dev = nic->dev; 1990 struct net_device *dev = nic->dev;
1978 register u64 val64 = 0; 1991 register u64 val64 = 0;
1979 u16 interruptible;
1980 u16 subid, i; 1992 u16 subid, i;
1981 mac_info_t *mac_control; 1993 mac_info_t *mac_control;
1982 struct config_param *config; 1994 struct config_param *config;
@@ -2047,16 +2059,6 @@ static int start_nic(struct s2io_nic *nic)
2047 return FAILURE; 2059 return FAILURE;
2048 } 2060 }
2049 2061
2050 /* Enable select interrupts */
2051 if (nic->intr_type != INTA)
2052 en_dis_able_nic_intrs(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2053 else {
2054 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2055 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2056 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2057 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
2058 }
2059
2060 /* 2062 /*
2061 * With some switches, link might be already up at this point. 2063 * With some switches, link might be already up at this point.
2062 * Because of this weird behavior, when we enable laser, 2064 * Because of this weird behavior, when we enable laser,
@@ -2130,7 +2132,7 @@ static struct sk_buff *s2io_txdl_getskb(fifo_info_t *fifo_data, TxD_t *txdlp, in
2130 frag->size, PCI_DMA_TODEVICE); 2132 frag->size, PCI_DMA_TODEVICE);
2131 } 2133 }
2132 } 2134 }
2133 txdlp->Host_Control = 0; 2135 memset(txdlp,0, (sizeof(TxD_t) * fifo_data->max_txds));
2134 return(skb); 2136 return(skb);
2135} 2137}
2136 2138
@@ -2382,9 +2384,14 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2382 skb->data = (void *) (unsigned long)tmp; 2384 skb->data = (void *) (unsigned long)tmp;
2383 skb->tail = (void *) (unsigned long)tmp; 2385 skb->tail = (void *) (unsigned long)tmp;
2384 2386
2385 ((RxD3_t*)rxdp)->Buffer0_ptr = 2387 if (!(((RxD3_t*)rxdp)->Buffer0_ptr))
2386 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN, 2388 ((RxD3_t*)rxdp)->Buffer0_ptr =
2389 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2387 PCI_DMA_FROMDEVICE); 2390 PCI_DMA_FROMDEVICE);
2391 else
2392 pci_dma_sync_single_for_device(nic->pdev,
2393 (dma_addr_t) ((RxD3_t*)rxdp)->Buffer0_ptr,
2394 BUF0_LEN, PCI_DMA_FROMDEVICE);
2388 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); 2395 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2389 if (nic->rxd_mode == RXD_MODE_3B) { 2396 if (nic->rxd_mode == RXD_MODE_3B) {
2390 /* Two buffer mode */ 2397 /* Two buffer mode */
@@ -2397,10 +2404,13 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2397 (nic->pdev, skb->data, dev->mtu + 4, 2404 (nic->pdev, skb->data, dev->mtu + 4,
2398 PCI_DMA_FROMDEVICE); 2405 PCI_DMA_FROMDEVICE);
2399 2406
2400 /* Buffer-1 will be dummy buffer not used */ 2407 /* Buffer-1 will be dummy buffer. Not used */
2401 ((RxD3_t*)rxdp)->Buffer1_ptr = 2408 if (!(((RxD3_t*)rxdp)->Buffer1_ptr)) {
2402 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN, 2409 ((RxD3_t*)rxdp)->Buffer1_ptr =
2403 PCI_DMA_FROMDEVICE); 2410 pci_map_single(nic->pdev,
2411 ba->ba_1, BUF1_LEN,
2412 PCI_DMA_FROMDEVICE);
2413 }
2404 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); 2414 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2405 rxdp->Control_2 |= SET_BUFFER2_SIZE_3 2415 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2406 (dev->mtu + 4); 2416 (dev->mtu + 4);
@@ -2625,23 +2635,23 @@ no_rx:
2625} 2635}
2626#endif 2636#endif
2627 2637
2638#ifdef CONFIG_NET_POLL_CONTROLLER
2628/** 2639/**
2629 * s2io_netpoll - Rx interrupt service handler for netpoll support 2640 * s2io_netpoll - netpoll event handler entry point
2630 * @dev : pointer to the device structure. 2641 * @dev : pointer to the device structure.
2631 * Description: 2642 * Description:
2632 * Polling 'interrupt' - used by things like netconsole to send skbs 2643 * This function will be called by upper layer to check for events on the
2633 * without having to re-enable interrupts. It's not called while 2644 * interface in situations where interrupts are disabled. It is used for
2634 * the interrupt routine is executing. 2645 * specific in-kernel networking tasks, such as remote consoles and kernel
2646 * debugging over the network (example netdump in RedHat).
2635 */ 2647 */
2636
2637#ifdef CONFIG_NET_POLL_CONTROLLER
2638static void s2io_netpoll(struct net_device *dev) 2648static void s2io_netpoll(struct net_device *dev)
2639{ 2649{
2640 nic_t *nic = dev->priv; 2650 nic_t *nic = dev->priv;
2641 mac_info_t *mac_control; 2651 mac_info_t *mac_control;
2642 struct config_param *config; 2652 struct config_param *config;
2643 XENA_dev_config_t __iomem *bar0 = nic->bar0; 2653 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2644 u64 val64; 2654 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2645 int i; 2655 int i;
2646 2656
2647 disable_irq(dev->irq); 2657 disable_irq(dev->irq);
@@ -2650,9 +2660,17 @@ static void s2io_netpoll(struct net_device *dev)
2650 mac_control = &nic->mac_control; 2660 mac_control = &nic->mac_control;
2651 config = &nic->config; 2661 config = &nic->config;
2652 2662
2653 val64 = readq(&bar0->rx_traffic_int);
2654 writeq(val64, &bar0->rx_traffic_int); 2663 writeq(val64, &bar0->rx_traffic_int);
2664 writeq(val64, &bar0->tx_traffic_int);
2655 2665
2666 /* we need to free up the transmitted skbufs or else netpoll will
2667 * run out of skbs and will fail and eventually netpoll application such
2668 * as netdump will fail.
2669 */
2670 for (i = 0; i < config->tx_fifo_num; i++)
2671 tx_intr_handler(&mac_control->fifos[i]);
2672
2673 /* check for received packet and indicate up to network */
2656 for (i = 0; i < config->rx_ring_num; i++) 2674 for (i = 0; i < config->rx_ring_num; i++)
2657 rx_intr_handler(&mac_control->rings[i]); 2675 rx_intr_handler(&mac_control->rings[i]);
2658 2676
@@ -2719,7 +2737,7 @@ static void rx_intr_handler(ring_info_t *ring_data)
2719 /* If your are next to put index then it's FIFO full condition */ 2737 /* If your are next to put index then it's FIFO full condition */
2720 if ((get_block == put_block) && 2738 if ((get_block == put_block) &&
2721 (get_info.offset + 1) == put_info.offset) { 2739 (get_info.offset + 1) == put_info.offset) {
2722 DBG_PRINT(ERR_DBG, "%s: Ring Full\n",dev->name); 2740 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
2723 break; 2741 break;
2724 } 2742 }
2725 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control); 2743 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
@@ -2739,18 +2757,15 @@ static void rx_intr_handler(ring_info_t *ring_data)
2739 HEADER_SNAP_SIZE, 2757 HEADER_SNAP_SIZE,
2740 PCI_DMA_FROMDEVICE); 2758 PCI_DMA_FROMDEVICE);
2741 } else if (nic->rxd_mode == RXD_MODE_3B) { 2759 } else if (nic->rxd_mode == RXD_MODE_3B) {
2742 pci_unmap_single(nic->pdev, (dma_addr_t) 2760 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2743 ((RxD3_t*)rxdp)->Buffer0_ptr, 2761 ((RxD3_t*)rxdp)->Buffer0_ptr,
2744 BUF0_LEN, PCI_DMA_FROMDEVICE); 2762 BUF0_LEN, PCI_DMA_FROMDEVICE);
2745 pci_unmap_single(nic->pdev, (dma_addr_t) 2763 pci_unmap_single(nic->pdev, (dma_addr_t)
2746 ((RxD3_t*)rxdp)->Buffer1_ptr,
2747 BUF1_LEN, PCI_DMA_FROMDEVICE);
2748 pci_unmap_single(nic->pdev, (dma_addr_t)
2749 ((RxD3_t*)rxdp)->Buffer2_ptr, 2764 ((RxD3_t*)rxdp)->Buffer2_ptr,
2750 dev->mtu + 4, 2765 dev->mtu + 4,
2751 PCI_DMA_FROMDEVICE); 2766 PCI_DMA_FROMDEVICE);
2752 } else { 2767 } else {
2753 pci_unmap_single(nic->pdev, (dma_addr_t) 2768 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2754 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN, 2769 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN,
2755 PCI_DMA_FROMDEVICE); 2770 PCI_DMA_FROMDEVICE);
2756 pci_unmap_single(nic->pdev, (dma_addr_t) 2771 pci_unmap_single(nic->pdev, (dma_addr_t)
@@ -3338,7 +3353,7 @@ static void s2io_reset(nic_t * sp)
3338 3353
3339 /* Clear certain PCI/PCI-X fields after reset */ 3354 /* Clear certain PCI/PCI-X fields after reset */
3340 if (sp->device_type == XFRAME_II_DEVICE) { 3355 if (sp->device_type == XFRAME_II_DEVICE) {
3341 /* Clear parity err detect bit */ 3356 /* Clear "detected parity error" bit */
3342 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000); 3357 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3343 3358
3344 /* Clearing PCIX Ecc status register */ 3359 /* Clearing PCIX Ecc status register */
@@ -3539,7 +3554,7 @@ static void restore_xmsi_data(nic_t *nic)
3539 u64 val64; 3554 u64 val64;
3540 int i; 3555 int i;
3541 3556
3542 for (i=0; i< nic->avail_msix_vectors; i++) { 3557 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3543 writeq(nic->msix_info[i].addr, &bar0->xmsi_address); 3558 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3544 writeq(nic->msix_info[i].data, &bar0->xmsi_data); 3559 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3545 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6)); 3560 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
@@ -3558,7 +3573,7 @@ static void store_xmsi_data(nic_t *nic)
3558 int i; 3573 int i;
3559 3574
3560 /* Store and display */ 3575 /* Store and display */
3561 for (i=0; i< nic->avail_msix_vectors; i++) { 3576 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3562 val64 = (BIT(15) | vBIT(i, 26, 6)); 3577 val64 = (BIT(15) | vBIT(i, 26, 6));
3563 writeq(val64, &bar0->xmsi_access); 3578 writeq(val64, &bar0->xmsi_access);
3564 if (wait_for_msix_trans(nic, i)) { 3579 if (wait_for_msix_trans(nic, i)) {
@@ -3749,101 +3764,19 @@ static int s2io_open(struct net_device *dev)
3749 if (err) { 3764 if (err) {
3750 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n", 3765 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3751 dev->name); 3766 dev->name);
3752 if (err == -ENODEV) 3767 goto hw_init_failed;
3753 goto hw_init_failed;
3754 else
3755 goto hw_enable_failed;
3756 }
3757
3758 /* Store the values of the MSIX table in the nic_t structure */
3759 store_xmsi_data(sp);
3760
3761 /* After proper initialization of H/W, register ISR */
3762 if (sp->intr_type == MSI) {
3763 err = request_irq((int) sp->pdev->irq, s2io_msi_handle,
3764 IRQF_SHARED, sp->name, dev);
3765 if (err) {
3766 DBG_PRINT(ERR_DBG, "%s: MSI registration \
3767failed\n", dev->name);
3768 goto isr_registration_failed;
3769 }
3770 }
3771 if (sp->intr_type == MSI_X) {
3772 int i;
3773
3774 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
3775 if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
3776 sprintf(sp->desc1, "%s:MSI-X-%d-TX",
3777 dev->name, i);
3778 err = request_irq(sp->entries[i].vector,
3779 s2io_msix_fifo_handle, 0, sp->desc1,
3780 sp->s2io_entries[i].arg);
3781 DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc1,
3782 (unsigned long long)sp->msix_info[i].addr);
3783 } else {
3784 sprintf(sp->desc2, "%s:MSI-X-%d-RX",
3785 dev->name, i);
3786 err = request_irq(sp->entries[i].vector,
3787 s2io_msix_ring_handle, 0, sp->desc2,
3788 sp->s2io_entries[i].arg);
3789 DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc2,
3790 (unsigned long long)sp->msix_info[i].addr);
3791 }
3792 if (err) {
3793 DBG_PRINT(ERR_DBG, "%s: MSI-X-%d registration \
3794failed\n", dev->name, i);
3795 DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
3796 goto isr_registration_failed;
3797 }
3798 sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
3799 }
3800 }
3801 if (sp->intr_type == INTA) {
3802 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
3803 sp->name, dev);
3804 if (err) {
3805 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
3806 dev->name);
3807 goto isr_registration_failed;
3808 }
3809 } 3768 }
3810 3769
3811 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) { 3770 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
3812 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n"); 3771 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3772 s2io_card_down(sp);
3813 err = -ENODEV; 3773 err = -ENODEV;
3814 goto setting_mac_address_failed; 3774 goto hw_init_failed;
3815 } 3775 }
3816 3776
3817 netif_start_queue(dev); 3777 netif_start_queue(dev);
3818 return 0; 3778 return 0;
3819 3779
3820setting_mac_address_failed:
3821 if (sp->intr_type != MSI_X)
3822 free_irq(sp->pdev->irq, dev);
3823isr_registration_failed:
3824 del_timer_sync(&sp->alarm_timer);
3825 if (sp->intr_type == MSI_X) {
3826 int i;
3827 u16 msi_control; /* Temp variable */
3828
3829 for (i=1; (sp->s2io_entries[i].in_use ==
3830 MSIX_REGISTERED_SUCCESS); i++) {
3831 int vector = sp->entries[i].vector;
3832 void *arg = sp->s2io_entries[i].arg;
3833
3834 free_irq(vector, arg);
3835 }
3836 pci_disable_msix(sp->pdev);
3837
3838 /* Temp */
3839 pci_read_config_word(sp->pdev, 0x42, &msi_control);
3840 msi_control &= 0xFFFE; /* Disable MSI */
3841 pci_write_config_word(sp->pdev, 0x42, msi_control);
3842 }
3843 else if (sp->intr_type == MSI)
3844 pci_disable_msi(sp->pdev);
3845hw_enable_failed:
3846 s2io_reset(sp);
3847hw_init_failed: 3780hw_init_failed:
3848 if (sp->intr_type == MSI_X) { 3781 if (sp->intr_type == MSI_X) {
3849 if (sp->entries) 3782 if (sp->entries)
@@ -3874,7 +3807,7 @@ static int s2io_close(struct net_device *dev)
3874 flush_scheduled_work(); 3807 flush_scheduled_work();
3875 netif_stop_queue(dev); 3808 netif_stop_queue(dev);
3876 /* Reset card, kill tasklet and free Tx and Rx buffers. */ 3809 /* Reset card, kill tasklet and free Tx and Rx buffers. */
3877 s2io_card_down(sp, 1); 3810 s2io_card_down(sp);
3878 3811
3879 sp->device_close_flag = TRUE; /* Device is shut down. */ 3812 sp->device_close_flag = TRUE; /* Device is shut down. */
3880 return 0; 3813 return 0;
@@ -3901,13 +3834,11 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3901 TxD_t *txdp; 3834 TxD_t *txdp;
3902 TxFIFO_element_t __iomem *tx_fifo; 3835 TxFIFO_element_t __iomem *tx_fifo;
3903 unsigned long flags; 3836 unsigned long flags;
3904#ifdef NETIF_F_TSO
3905 int mss;
3906#endif
3907 u16 vlan_tag = 0; 3837 u16 vlan_tag = 0;
3908 int vlan_priority = 0; 3838 int vlan_priority = 0;
3909 mac_info_t *mac_control; 3839 mac_info_t *mac_control;
3910 struct config_param *config; 3840 struct config_param *config;
3841 int offload_type;
3911 3842
3912 mac_control = &sp->mac_control; 3843 mac_control = &sp->mac_control;
3913 config = &sp->config; 3844 config = &sp->config;
@@ -3955,13 +3886,11 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3955 return 0; 3886 return 0;
3956 } 3887 }
3957 3888
3958 txdp->Control_1 = 0; 3889 offload_type = s2io_offload_type(skb);
3959 txdp->Control_2 = 0;
3960#ifdef NETIF_F_TSO 3890#ifdef NETIF_F_TSO
3961 mss = skb_shinfo(skb)->gso_size; 3891 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3962 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3963 txdp->Control_1 |= TXD_TCP_LSO_EN; 3892 txdp->Control_1 |= TXD_TCP_LSO_EN;
3964 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss); 3893 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
3965 } 3894 }
3966#endif 3895#endif
3967 if (skb->ip_summed == CHECKSUM_HW) { 3896 if (skb->ip_summed == CHECKSUM_HW) {
@@ -3979,10 +3908,10 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3979 } 3908 }
3980 3909
3981 frg_len = skb->len - skb->data_len; 3910 frg_len = skb->len - skb->data_len;
3982 if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) { 3911 if (offload_type == SKB_GSO_UDP) {
3983 int ufo_size; 3912 int ufo_size;
3984 3913
3985 ufo_size = skb_shinfo(skb)->gso_size; 3914 ufo_size = s2io_udp_mss(skb);
3986 ufo_size &= ~7; 3915 ufo_size &= ~7;
3987 txdp->Control_1 |= TXD_UFO_EN; 3916 txdp->Control_1 |= TXD_UFO_EN;
3988 txdp->Control_1 |= TXD_UFO_MSS(ufo_size); 3917 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
@@ -3999,16 +3928,13 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3999 sp->ufo_in_band_v, 3928 sp->ufo_in_band_v,
4000 sizeof(u64), PCI_DMA_TODEVICE); 3929 sizeof(u64), PCI_DMA_TODEVICE);
4001 txdp++; 3930 txdp++;
4002 txdp->Control_1 = 0;
4003 txdp->Control_2 = 0;
4004 } 3931 }
4005 3932
4006 txdp->Buffer_Pointer = pci_map_single 3933 txdp->Buffer_Pointer = pci_map_single
4007 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE); 3934 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
4008 txdp->Host_Control = (unsigned long) skb; 3935 txdp->Host_Control = (unsigned long) skb;
4009 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len); 3936 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4010 3937 if (offload_type == SKB_GSO_UDP)
4011 if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP)
4012 txdp->Control_1 |= TXD_UFO_EN; 3938 txdp->Control_1 |= TXD_UFO_EN;
4013 3939
4014 frg_cnt = skb_shinfo(skb)->nr_frags; 3940 frg_cnt = skb_shinfo(skb)->nr_frags;
@@ -4023,12 +3949,12 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4023 (sp->pdev, frag->page, frag->page_offset, 3949 (sp->pdev, frag->page, frag->page_offset,
4024 frag->size, PCI_DMA_TODEVICE); 3950 frag->size, PCI_DMA_TODEVICE);
4025 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size); 3951 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4026 if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) 3952 if (offload_type == SKB_GSO_UDP)
4027 txdp->Control_1 |= TXD_UFO_EN; 3953 txdp->Control_1 |= TXD_UFO_EN;
4028 } 3954 }
4029 txdp->Control_1 |= TXD_GATHER_CODE_LAST; 3955 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4030 3956
4031 if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) 3957 if (offload_type == SKB_GSO_UDP)
4032 frg_cnt++; /* as Txd0 was used for inband header */ 3958 frg_cnt++; /* as Txd0 was used for inband header */
4033 3959
4034 tx_fifo = mac_control->tx_FIFO_start[queue]; 3960 tx_fifo = mac_control->tx_FIFO_start[queue];
@@ -4037,13 +3963,9 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4037 3963
4038 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST | 3964 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4039 TX_FIFO_LAST_LIST); 3965 TX_FIFO_LAST_LIST);
4040 3966 if (offload_type)
4041#ifdef NETIF_F_TSO
4042 if (mss)
4043 val64 |= TX_FIFO_SPECIAL_FUNC;
4044#endif
4045 if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP)
4046 val64 |= TX_FIFO_SPECIAL_FUNC; 3967 val64 |= TX_FIFO_SPECIAL_FUNC;
3968
4047 writeq(val64, &tx_fifo->List_Control); 3969 writeq(val64, &tx_fifo->List_Control);
4048 3970
4049 mmiowb(); 3971 mmiowb();
@@ -4077,13 +3999,41 @@ s2io_alarm_handle(unsigned long data)
4077 mod_timer(&sp->alarm_timer, jiffies + HZ / 2); 3999 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4078} 4000}
4079 4001
4002static int s2io_chk_rx_buffers(nic_t *sp, int rng_n)
4003{
4004 int rxb_size, level;
4005
4006 if (!sp->lro) {
4007 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
4008 level = rx_buffer_level(sp, rxb_size, rng_n);
4009
4010 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4011 int ret;
4012 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4013 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4014 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
4015 DBG_PRINT(ERR_DBG, "Out of memory in %s",
4016 __FUNCTION__);
4017 clear_bit(0, (&sp->tasklet_status));
4018 return -1;
4019 }
4020 clear_bit(0, (&sp->tasklet_status));
4021 } else if (level == LOW)
4022 tasklet_schedule(&sp->task);
4023
4024 } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4025 DBG_PRINT(ERR_DBG, "%s:Out of memory", sp->dev->name);
4026 DBG_PRINT(ERR_DBG, " in Rx Intr!!\n");
4027 }
4028 return 0;
4029}
4030
4080static irqreturn_t 4031static irqreturn_t
4081s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs) 4032s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs)
4082{ 4033{
4083 struct net_device *dev = (struct net_device *) dev_id; 4034 struct net_device *dev = (struct net_device *) dev_id;
4084 nic_t *sp = dev->priv; 4035 nic_t *sp = dev->priv;
4085 int i; 4036 int i;
4086 int ret;
4087 mac_info_t *mac_control; 4037 mac_info_t *mac_control;
4088 struct config_param *config; 4038 struct config_param *config;
4089 4039
@@ -4105,35 +4055,8 @@ s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs)
4105 * reallocate the buffers from the interrupt handler itself, 4055 * reallocate the buffers from the interrupt handler itself,
4106 * else schedule a tasklet to reallocate the buffers. 4056 * else schedule a tasklet to reallocate the buffers.
4107 */ 4057 */
4108 for (i = 0; i < config->rx_ring_num; i++) { 4058 for (i = 0; i < config->rx_ring_num; i++)
4109 if (!sp->lro) { 4059 s2io_chk_rx_buffers(sp, i);
4110 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
4111 int level = rx_buffer_level(sp, rxb_size, i);
4112
4113 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4114 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ",
4115 dev->name);
4116 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4117 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
4118 DBG_PRINT(ERR_DBG, "%s:Out of memory",
4119 dev->name);
4120 DBG_PRINT(ERR_DBG, " in ISR!!\n");
4121 clear_bit(0, (&sp->tasklet_status));
4122 atomic_dec(&sp->isr_cnt);
4123 return IRQ_HANDLED;
4124 }
4125 clear_bit(0, (&sp->tasklet_status));
4126 } else if (level == LOW) {
4127 tasklet_schedule(&sp->task);
4128 }
4129 }
4130 else if (fill_rx_buffers(sp, i) == -ENOMEM) {
4131 DBG_PRINT(ERR_DBG, "%s:Out of memory",
4132 dev->name);
4133 DBG_PRINT(ERR_DBG, " in Rx Intr!!\n");
4134 break;
4135 }
4136 }
4137 4060
4138 atomic_dec(&sp->isr_cnt); 4061 atomic_dec(&sp->isr_cnt);
4139 return IRQ_HANDLED; 4062 return IRQ_HANDLED;
@@ -4144,39 +4067,13 @@ s2io_msix_ring_handle(int irq, void *dev_id, struct pt_regs *regs)
4144{ 4067{
4145 ring_info_t *ring = (ring_info_t *)dev_id; 4068 ring_info_t *ring = (ring_info_t *)dev_id;
4146 nic_t *sp = ring->nic; 4069 nic_t *sp = ring->nic;
4147 struct net_device *dev = (struct net_device *) dev_id;
4148 int rxb_size, level, rng_n;
4149 4070
4150 atomic_inc(&sp->isr_cnt); 4071 atomic_inc(&sp->isr_cnt);
4151 rx_intr_handler(ring);
4152 4072
4153 rng_n = ring->ring_no; 4073 rx_intr_handler(ring);
4154 if (!sp->lro) { 4074 s2io_chk_rx_buffers(sp, ring->ring_no);
4155 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
4156 level = rx_buffer_level(sp, rxb_size, rng_n);
4157
4158 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4159 int ret;
4160 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4161 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4162 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
4163 DBG_PRINT(ERR_DBG, "Out of memory in %s",
4164 __FUNCTION__);
4165 clear_bit(0, (&sp->tasklet_status));
4166 return IRQ_HANDLED;
4167 }
4168 clear_bit(0, (&sp->tasklet_status));
4169 } else if (level == LOW) {
4170 tasklet_schedule(&sp->task);
4171 }
4172 }
4173 else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4174 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
4175 DBG_PRINT(ERR_DBG, " in Rx Intr!!\n");
4176 }
4177 4075
4178 atomic_dec(&sp->isr_cnt); 4076 atomic_dec(&sp->isr_cnt);
4179
4180 return IRQ_HANDLED; 4077 return IRQ_HANDLED;
4181} 4078}
4182 4079
@@ -4341,37 +4238,8 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
4341 * else schedule a tasklet to reallocate the buffers. 4238 * else schedule a tasklet to reallocate the buffers.
4342 */ 4239 */
4343#ifndef CONFIG_S2IO_NAPI 4240#ifndef CONFIG_S2IO_NAPI
4344 for (i = 0; i < config->rx_ring_num; i++) { 4241 for (i = 0; i < config->rx_ring_num; i++)
4345 if (!sp->lro) { 4242 s2io_chk_rx_buffers(sp, i);
4346 int ret;
4347 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
4348 int level = rx_buffer_level(sp, rxb_size, i);
4349
4350 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4351 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ",
4352 dev->name);
4353 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4354 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
4355 DBG_PRINT(ERR_DBG, "%s:Out of memory",
4356 dev->name);
4357 DBG_PRINT(ERR_DBG, " in ISR!!\n");
4358 clear_bit(0, (&sp->tasklet_status));
4359 atomic_dec(&sp->isr_cnt);
4360 writeq(org_mask, &bar0->general_int_mask);
4361 return IRQ_HANDLED;
4362 }
4363 clear_bit(0, (&sp->tasklet_status));
4364 } else if (level == LOW) {
4365 tasklet_schedule(&sp->task);
4366 }
4367 }
4368 else if (fill_rx_buffers(sp, i) == -ENOMEM) {
4369 DBG_PRINT(ERR_DBG, "%s:Out of memory",
4370 dev->name);
4371 DBG_PRINT(ERR_DBG, " in Rx intr!!\n");
4372 break;
4373 }
4374 }
4375#endif 4243#endif
4376 writeq(org_mask, &bar0->general_int_mask); 4244 writeq(org_mask, &bar0->general_int_mask);
4377 atomic_dec(&sp->isr_cnt); 4245 atomic_dec(&sp->isr_cnt);
@@ -4401,6 +4269,8 @@ static void s2io_updt_stats(nic_t *sp)
4401 if (cnt == 5) 4269 if (cnt == 5)
4402 break; /* Updt failed */ 4270 break; /* Updt failed */
4403 } while(1); 4271 } while(1);
4272 } else {
4273 memset(sp->mac_control.stats_info, 0, sizeof(StatInfo_t));
4404 } 4274 }
4405} 4275}
4406 4276
@@ -5035,7 +4905,8 @@ static int write_eeprom(nic_t * sp, int off, u64 data, int cnt)
5035} 4905}
5036static void s2io_vpd_read(nic_t *nic) 4906static void s2io_vpd_read(nic_t *nic)
5037{ 4907{
5038 u8 vpd_data[256],data; 4908 u8 *vpd_data;
4909 u8 data;
5039 int i=0, cnt, fail = 0; 4910 int i=0, cnt, fail = 0;
5040 int vpd_addr = 0x80; 4911 int vpd_addr = 0x80;
5041 4912
@@ -5048,6 +4919,10 @@ static void s2io_vpd_read(nic_t *nic)
5048 vpd_addr = 0x50; 4919 vpd_addr = 0x50;
5049 } 4920 }
5050 4921
4922 vpd_data = kmalloc(256, GFP_KERNEL);
4923 if (!vpd_data)
4924 return;
4925
5051 for (i = 0; i < 256; i +=4 ) { 4926 for (i = 0; i < 256; i +=4 ) {
5052 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i); 4927 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5053 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data); 4928 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
@@ -5070,6 +4945,7 @@ static void s2io_vpd_read(nic_t *nic)
5070 memset(nic->product_name, 0, vpd_data[1]); 4945 memset(nic->product_name, 0, vpd_data[1]);
5071 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]); 4946 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5072 } 4947 }
4948 kfree(vpd_data);
5073} 4949}
5074 4950
5075/** 4951/**
@@ -5388,7 +5264,7 @@ static int s2io_link_test(nic_t * sp, uint64_t * data)
5388 else 5264 else
5389 *data = 0; 5265 *data = 0;
5390 5266
5391 return 0; 5267 return *data;
5392} 5268}
5393 5269
5394/** 5270/**
@@ -5846,6 +5722,19 @@ static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
5846 return 0; 5722 return 0;
5847} 5723}
5848 5724
5725static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
5726{
5727 return (dev->features & NETIF_F_TSO) != 0;
5728}
5729static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
5730{
5731 if (data)
5732 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
5733 else
5734 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
5735
5736 return 0;
5737}
5849 5738
5850static struct ethtool_ops netdev_ethtool_ops = { 5739static struct ethtool_ops netdev_ethtool_ops = {
5851 .get_settings = s2io_ethtool_gset, 5740 .get_settings = s2io_ethtool_gset,
@@ -5866,8 +5755,8 @@ static struct ethtool_ops netdev_ethtool_ops = {
5866 .get_sg = ethtool_op_get_sg, 5755 .get_sg = ethtool_op_get_sg,
5867 .set_sg = ethtool_op_set_sg, 5756 .set_sg = ethtool_op_set_sg,
5868#ifdef NETIF_F_TSO 5757#ifdef NETIF_F_TSO
5869 .get_tso = ethtool_op_get_tso, 5758 .get_tso = s2io_ethtool_op_get_tso,
5870 .set_tso = ethtool_op_set_tso, 5759 .set_tso = s2io_ethtool_op_set_tso,
5871#endif 5760#endif
5872 .get_ufo = ethtool_op_get_ufo, 5761 .get_ufo = ethtool_op_get_ufo,
5873 .set_ufo = ethtool_op_set_ufo, 5762 .set_ufo = ethtool_op_set_ufo,
@@ -5919,7 +5808,7 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu)
5919 5808
5920 dev->mtu = new_mtu; 5809 dev->mtu = new_mtu;
5921 if (netif_running(dev)) { 5810 if (netif_running(dev)) {
5922 s2io_card_down(sp, 0); 5811 s2io_card_down(sp);
5923 netif_stop_queue(dev); 5812 netif_stop_queue(dev);
5924 if (s2io_card_up(sp)) { 5813 if (s2io_card_up(sp)) {
5925 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", 5814 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
@@ -6216,43 +6105,106 @@ static int rxd_owner_bit_reset(nic_t *sp)
6216 6105
6217} 6106}
6218 6107
6219static void s2io_card_down(nic_t * sp, int flag) 6108static int s2io_add_isr(nic_t * sp)
6220{ 6109{
6221 int cnt = 0; 6110 int ret = 0;
6222 XENA_dev_config_t __iomem *bar0 = sp->bar0;
6223 unsigned long flags;
6224 register u64 val64 = 0;
6225 struct net_device *dev = sp->dev; 6111 struct net_device *dev = sp->dev;
6112 int err = 0;
6226 6113
6227 del_timer_sync(&sp->alarm_timer); 6114 if (sp->intr_type == MSI)
6228 /* If s2io_set_link task is executing, wait till it completes. */ 6115 ret = s2io_enable_msi(sp);
6229 while (test_and_set_bit(0, &(sp->link_state))) { 6116 else if (sp->intr_type == MSI_X)
6230 msleep(50); 6117 ret = s2io_enable_msi_x(sp);
6118 if (ret) {
6119 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6120 sp->intr_type = INTA;
6231 } 6121 }
6232 atomic_set(&sp->card_state, CARD_DOWN);
6233 6122
6234 /* disable Tx and Rx traffic on the NIC */ 6123 /* Store the values of the MSIX table in the nic_t structure */
6235 stop_nic(sp); 6124 store_xmsi_data(sp);
6236 if (flag) {
6237 if (sp->intr_type == MSI_X) {
6238 int i;
6239 u16 msi_control;
6240 6125
6241 for (i=1; (sp->s2io_entries[i].in_use == 6126 /* After proper initialization of H/W, register ISR */
6242 MSIX_REGISTERED_SUCCESS); i++) { 6127 if (sp->intr_type == MSI) {
6243 int vector = sp->entries[i].vector; 6128 err = request_irq((int) sp->pdev->irq, s2io_msi_handle,
6244 void *arg = sp->s2io_entries[i].arg; 6129 IRQF_SHARED, sp->name, dev);
6130 if (err) {
6131 pci_disable_msi(sp->pdev);
6132 DBG_PRINT(ERR_DBG, "%s: MSI registration failed\n",
6133 dev->name);
6134 return -1;
6135 }
6136 }
6137 if (sp->intr_type == MSI_X) {
6138 int i;
6245 6139
6246 free_irq(vector, arg); 6140 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
6141 if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
6142 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
6143 dev->name, i);
6144 err = request_irq(sp->entries[i].vector,
6145 s2io_msix_fifo_handle, 0, sp->desc[i],
6146 sp->s2io_entries[i].arg);
6147 DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc[i],
6148 (unsigned long long)sp->msix_info[i].addr);
6149 } else {
6150 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
6151 dev->name, i);
6152 err = request_irq(sp->entries[i].vector,
6153 s2io_msix_ring_handle, 0, sp->desc[i],
6154 sp->s2io_entries[i].arg);
6155 DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc[i],
6156 (unsigned long long)sp->msix_info[i].addr);
6247 } 6157 }
6248 pci_read_config_word(sp->pdev, 0x42, &msi_control); 6158 if (err) {
6249 msi_control &= 0xFFFE; /* Disable MSI */ 6159 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
6250 pci_write_config_word(sp->pdev, 0x42, msi_control); 6160 "failed\n", dev->name, i);
6251 pci_disable_msix(sp->pdev); 6161 DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
6252 } else { 6162 return -1;
6253 free_irq(sp->pdev->irq, dev); 6163 }
6254 if (sp->intr_type == MSI) 6164 sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
6255 pci_disable_msi(sp->pdev); 6165 }
6166 }
6167 if (sp->intr_type == INTA) {
6168 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
6169 sp->name, dev);
6170 if (err) {
6171 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6172 dev->name);
6173 return -1;
6174 }
6175 }
6176 return 0;
6177}
6178static void s2io_rem_isr(nic_t * sp)
6179{
6180 int cnt = 0;
6181 struct net_device *dev = sp->dev;
6182
6183 if (sp->intr_type == MSI_X) {
6184 int i;
6185 u16 msi_control;
6186
6187 for (i=1; (sp->s2io_entries[i].in_use ==
6188 MSIX_REGISTERED_SUCCESS); i++) {
6189 int vector = sp->entries[i].vector;
6190 void *arg = sp->s2io_entries[i].arg;
6191
6192 free_irq(vector, arg);
6193 }
6194 pci_read_config_word(sp->pdev, 0x42, &msi_control);
6195 msi_control &= 0xFFFE; /* Disable MSI */
6196 pci_write_config_word(sp->pdev, 0x42, msi_control);
6197
6198 pci_disable_msix(sp->pdev);
6199 } else {
6200 free_irq(sp->pdev->irq, dev);
6201 if (sp->intr_type == MSI) {
6202 u16 val;
6203
6204 pci_disable_msi(sp->pdev);
6205 pci_read_config_word(sp->pdev, 0x4c, &val);
6206 val ^= 0x1;
6207 pci_write_config_word(sp->pdev, 0x4c, val);
6256 } 6208 }
6257 } 6209 }
6258 /* Waiting till all Interrupt handlers are complete */ 6210 /* Waiting till all Interrupt handlers are complete */
@@ -6263,6 +6215,26 @@ static void s2io_card_down(nic_t * sp, int flag)
6263 break; 6215 break;
6264 cnt++; 6216 cnt++;
6265 } while(cnt < 5); 6217 } while(cnt < 5);
6218}
6219
6220static void s2io_card_down(nic_t * sp)
6221{
6222 int cnt = 0;
6223 XENA_dev_config_t __iomem *bar0 = sp->bar0;
6224 unsigned long flags;
6225 register u64 val64 = 0;
6226
6227 del_timer_sync(&sp->alarm_timer);
6228 /* If s2io_set_link task is executing, wait till it completes. */
6229 while (test_and_set_bit(0, &(sp->link_state))) {
6230 msleep(50);
6231 }
6232 atomic_set(&sp->card_state, CARD_DOWN);
6233
6234 /* disable Tx and Rx traffic on the NIC */
6235 stop_nic(sp);
6236
6237 s2io_rem_isr(sp);
6266 6238
6267 /* Kill tasklet. */ 6239 /* Kill tasklet. */
6268 tasklet_kill(&sp->task); 6240 tasklet_kill(&sp->task);
@@ -6314,23 +6286,16 @@ static int s2io_card_up(nic_t * sp)
6314 mac_info_t *mac_control; 6286 mac_info_t *mac_control;
6315 struct config_param *config; 6287 struct config_param *config;
6316 struct net_device *dev = (struct net_device *) sp->dev; 6288 struct net_device *dev = (struct net_device *) sp->dev;
6289 u16 interruptible;
6317 6290
6318 /* Initialize the H/W I/O registers */ 6291 /* Initialize the H/W I/O registers */
6319 if (init_nic(sp) != 0) { 6292 if (init_nic(sp) != 0) {
6320 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n", 6293 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
6321 dev->name); 6294 dev->name);
6295 s2io_reset(sp);
6322 return -ENODEV; 6296 return -ENODEV;
6323 } 6297 }
6324 6298
6325 if (sp->intr_type == MSI)
6326 ret = s2io_enable_msi(sp);
6327 else if (sp->intr_type == MSI_X)
6328 ret = s2io_enable_msi_x(sp);
6329 if (ret) {
6330 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6331 sp->intr_type = INTA;
6332 }
6333
6334 /* 6299 /*
6335 * Initializing the Rx buffers. For now we are considering only 1 6300 * Initializing the Rx buffers. For now we are considering only 1
6336 * Rx ring and initializing buffers into 30 Rx blocks 6301 * Rx ring and initializing buffers into 30 Rx blocks
@@ -6354,28 +6319,46 @@ static int s2io_card_up(nic_t * sp)
6354 s2io_set_multicast(dev); 6319 s2io_set_multicast(dev);
6355 6320
6356 if (sp->lro) { 6321 if (sp->lro) {
6357 /* Initialize max aggregatable pkts based on MTU */ 6322 /* Initialize max aggregatable pkts per session based on MTU */
6358 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu; 6323 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
6359 /* Check if we can use(if specified) user provided value */ 6324 /* Check if we can use(if specified) user provided value */
6360 if (lro_max_pkts < sp->lro_max_aggr_per_sess) 6325 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
6361 sp->lro_max_aggr_per_sess = lro_max_pkts; 6326 sp->lro_max_aggr_per_sess = lro_max_pkts;
6362 } 6327 }
6363 6328
6364 /* Enable tasklet for the device */
6365 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
6366
6367 /* Enable Rx Traffic and interrupts on the NIC */ 6329 /* Enable Rx Traffic and interrupts on the NIC */
6368 if (start_nic(sp)) { 6330 if (start_nic(sp)) {
6369 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name); 6331 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
6370 tasklet_kill(&sp->task);
6371 s2io_reset(sp); 6332 s2io_reset(sp);
6372 free_irq(dev->irq, dev); 6333 free_rx_buffers(sp);
6334 return -ENODEV;
6335 }
6336
6337 /* Add interrupt service routine */
6338 if (s2io_add_isr(sp) != 0) {
6339 if (sp->intr_type == MSI_X)
6340 s2io_rem_isr(sp);
6341 s2io_reset(sp);
6373 free_rx_buffers(sp); 6342 free_rx_buffers(sp);
6374 return -ENODEV; 6343 return -ENODEV;
6375 } 6344 }
6376 6345
6377 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2)); 6346 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
6378 6347
6348 /* Enable tasklet for the device */
6349 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
6350
6351 /* Enable select interrupts */
6352 if (sp->intr_type != INTA)
6353 en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS);
6354 else {
6355 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
6356 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
6357 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
6358 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
6359 }
6360
6361
6379 atomic_set(&sp->card_state, CARD_UP); 6362 atomic_set(&sp->card_state, CARD_UP);
6380 return 0; 6363 return 0;
6381} 6364}
@@ -6395,7 +6378,7 @@ static void s2io_restart_nic(unsigned long data)
6395 struct net_device *dev = (struct net_device *) data; 6378 struct net_device *dev = (struct net_device *) data;
6396 nic_t *sp = dev->priv; 6379 nic_t *sp = dev->priv;
6397 6380
6398 s2io_card_down(sp, 0); 6381 s2io_card_down(sp);
6399 if (s2io_card_up(sp)) { 6382 if (s2io_card_up(sp)) {
6400 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", 6383 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6401 dev->name); 6384 dev->name);
@@ -6437,7 +6420,7 @@ static void s2io_tx_watchdog(struct net_device *dev)
6437 * @cksum : FCS checksum of the frame. 6420 * @cksum : FCS checksum of the frame.
6438 * @ring_no : the ring from which this RxD was extracted. 6421 * @ring_no : the ring from which this RxD was extracted.
6439 * Description: 6422 * Description:
6440 * This function is called by the Tx interrupt serivce routine to perform 6423 * This function is called by the Rx interrupt serivce routine to perform
6441 * some OS related operations on the SKB before passing it to the upper 6424 * some OS related operations on the SKB before passing it to the upper
6442 * layers. It mainly checks if the checksum is OK, if so adds it to the 6425 * layers. It mainly checks if the checksum is OK, if so adds it to the
6443 * SKBs cksum variable, increments the Rx packet count and passes the SKB 6426 * SKBs cksum variable, increments the Rx packet count and passes the SKB
@@ -6697,33 +6680,6 @@ static void s2io_init_pci(nic_t * sp)
6697 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd); 6680 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
6698} 6681}
6699 6682
6700MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
6701MODULE_LICENSE("GPL");
6702MODULE_VERSION(DRV_VERSION);
6703
6704module_param(tx_fifo_num, int, 0);
6705module_param(rx_ring_num, int, 0);
6706module_param(rx_ring_mode, int, 0);
6707module_param_array(tx_fifo_len, uint, NULL, 0);
6708module_param_array(rx_ring_sz, uint, NULL, 0);
6709module_param_array(rts_frm_len, uint, NULL, 0);
6710module_param(use_continuous_tx_intrs, int, 1);
6711module_param(rmac_pause_time, int, 0);
6712module_param(mc_pause_threshold_q0q3, int, 0);
6713module_param(mc_pause_threshold_q4q7, int, 0);
6714module_param(shared_splits, int, 0);
6715module_param(tmac_util_period, int, 0);
6716module_param(rmac_util_period, int, 0);
6717module_param(bimodal, bool, 0);
6718module_param(l3l4hdr_size, int , 0);
6719#ifndef CONFIG_S2IO_NAPI
6720module_param(indicate_max_pkts, int, 0);
6721#endif
6722module_param(rxsync_frequency, int, 0);
6723module_param(intr_type, int, 0);
6724module_param(lro, int, 0);
6725module_param(lro_max_pkts, int, 0);
6726
6727static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type) 6683static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
6728{ 6684{
6729 if ( tx_fifo_num > 8) { 6685 if ( tx_fifo_num > 8) {
@@ -6831,8 +6787,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6831 } 6787 }
6832 if (dev_intr_type != MSI_X) { 6788 if (dev_intr_type != MSI_X) {
6833 if (pci_request_regions(pdev, s2io_driver_name)) { 6789 if (pci_request_regions(pdev, s2io_driver_name)) {
6834 DBG_PRINT(ERR_DBG, "Request Regions failed\n"), 6790 DBG_PRINT(ERR_DBG, "Request Regions failed\n");
6835 pci_disable_device(pdev); 6791 pci_disable_device(pdev);
6836 return -ENODEV; 6792 return -ENODEV;
6837 } 6793 }
6838 } 6794 }
@@ -6956,7 +6912,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6956 /* initialize the shared memory used by the NIC and the host */ 6912 /* initialize the shared memory used by the NIC and the host */
6957 if (init_shared_mem(sp)) { 6913 if (init_shared_mem(sp)) {
6958 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", 6914 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
6959 __FUNCTION__); 6915 dev->name);
6960 ret = -ENOMEM; 6916 ret = -ENOMEM;
6961 goto mem_alloc_failed; 6917 goto mem_alloc_failed;
6962 } 6918 }
@@ -7093,6 +7049,9 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7093 dev->addr_len = ETH_ALEN; 7049 dev->addr_len = ETH_ALEN;
7094 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN); 7050 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7095 7051
7052 /* reset Nic and bring it to known state */
7053 s2io_reset(sp);
7054
7096 /* 7055 /*
7097 * Initialize the tasklet status and link state flags 7056 * Initialize the tasklet status and link state flags
7098 * and the card state parameter 7057 * and the card state parameter
@@ -7130,11 +7089,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7130 goto register_failed; 7089 goto register_failed;
7131 } 7090 }
7132 s2io_vpd_read(sp); 7091 s2io_vpd_read(sp);
7133 DBG_PRINT(ERR_DBG, "%s: Neterion %s",dev->name, sp->product_name);
7134 DBG_PRINT(ERR_DBG, "(rev %d), Driver version %s\n",
7135 get_xena_rev_id(sp->pdev),
7136 s2io_driver_version);
7137 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2005 Neterion Inc.\n"); 7092 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2005 Neterion Inc.\n");
7093 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
7094 sp->product_name, get_xena_rev_id(sp->pdev));
7095 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
7096 s2io_driver_version);
7138 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: " 7097 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: "
7139 "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name, 7098 "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name,
7140 sp->def_mac_addr[0].mac_addr[0], 7099 sp->def_mac_addr[0].mac_addr[0],
@@ -7435,8 +7394,13 @@ static int verify_l3_l4_lro_capable(lro_t *l_lro, struct iphdr *ip,
7435 if (ip->ihl != 5) /* IP has options */ 7394 if (ip->ihl != 5) /* IP has options */
7436 return -1; 7395 return -1;
7437 7396
7397 /* If we see CE codepoint in IP header, packet is not mergeable */
7398 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
7399 return -1;
7400
7401 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
7438 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin || 7402 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
7439 !tcp->ack) { 7403 tcp->ece || tcp->cwr || !tcp->ack) {
7440 /* 7404 /*
7441 * Currently recognize only the ack control word and 7405 * Currently recognize only the ack control word and
7442 * any other control field being set would result in 7406 * any other control field being set would result in
@@ -7590,18 +7554,16 @@ static void queue_rx_frame(struct sk_buff *skb)
7590static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, 7554static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb,
7591 u32 tcp_len) 7555 u32 tcp_len)
7592{ 7556{
7593 struct sk_buff *tmp, *first = lro->parent; 7557 struct sk_buff *first = lro->parent;
7594 7558
7595 first->len += tcp_len; 7559 first->len += tcp_len;
7596 first->data_len = lro->frags_len; 7560 first->data_len = lro->frags_len;
7597 skb_pull(skb, (skb->len - tcp_len)); 7561 skb_pull(skb, (skb->len - tcp_len));
7598 if ((tmp = skb_shinfo(first)->frag_list)) { 7562 if (skb_shinfo(first)->frag_list)
7599 while (tmp->next) 7563 lro->last_frag->next = skb;
7600 tmp = tmp->next;
7601 tmp->next = skb;
7602 }
7603 else 7564 else
7604 skb_shinfo(first)->frag_list = skb; 7565 skb_shinfo(first)->frag_list = skb;
7566 lro->last_frag = skb;
7605 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++; 7567 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
7606 return; 7568 return;
7607} 7569}