diff options
Diffstat (limited to 'drivers/net/s2io.c')
| -rw-r--r-- | drivers/net/s2io.c | 386 |
1 files changed, 173 insertions, 213 deletions
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index e1fe3a0a7b0b..132ed32bce1a 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c | |||
| @@ -76,7 +76,7 @@ | |||
| 76 | #include "s2io.h" | 76 | #include "s2io.h" |
| 77 | #include "s2io-regs.h" | 77 | #include "s2io-regs.h" |
| 78 | 78 | ||
| 79 | #define DRV_VERSION "2.0.14.2" | 79 | #define DRV_VERSION "2.0.15.2" |
| 80 | 80 | ||
| 81 | /* S2io Driver name & version. */ | 81 | /* S2io Driver name & version. */ |
| 82 | static char s2io_driver_name[] = "Neterion"; | 82 | static char s2io_driver_name[] = "Neterion"; |
| @@ -370,38 +370,50 @@ static const u64 fix_mac[] = { | |||
| 370 | END_SIGN | 370 | END_SIGN |
| 371 | }; | 371 | }; |
| 372 | 372 | ||
| 373 | MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>"); | ||
| 374 | MODULE_LICENSE("GPL"); | ||
| 375 | MODULE_VERSION(DRV_VERSION); | ||
| 376 | |||
| 377 | |||
| 373 | /* Module Loadable parameters. */ | 378 | /* Module Loadable parameters. */ |
| 374 | static unsigned int tx_fifo_num = 1; | 379 | S2IO_PARM_INT(tx_fifo_num, 1); |
| 375 | static unsigned int tx_fifo_len[MAX_TX_FIFOS] = | 380 | S2IO_PARM_INT(rx_ring_num, 1); |
| 376 | {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN}; | 381 | |
| 377 | static unsigned int rx_ring_num = 1; | 382 | |
| 378 | static unsigned int rx_ring_sz[MAX_RX_RINGS] = | 383 | S2IO_PARM_INT(rx_ring_mode, 1); |
| 379 | {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT}; | 384 | S2IO_PARM_INT(use_continuous_tx_intrs, 1); |
| 380 | static unsigned int rts_frm_len[MAX_RX_RINGS] = | 385 | S2IO_PARM_INT(rmac_pause_time, 0x100); |
| 381 | {[0 ...(MAX_RX_RINGS - 1)] = 0 }; | 386 | S2IO_PARM_INT(mc_pause_threshold_q0q3, 187); |
| 382 | static unsigned int rx_ring_mode = 1; | 387 | S2IO_PARM_INT(mc_pause_threshold_q4q7, 187); |
| 383 | static unsigned int use_continuous_tx_intrs = 1; | 388 | S2IO_PARM_INT(shared_splits, 0); |
| 384 | static unsigned int rmac_pause_time = 0x100; | 389 | S2IO_PARM_INT(tmac_util_period, 5); |
| 385 | static unsigned int mc_pause_threshold_q0q3 = 187; | 390 | S2IO_PARM_INT(rmac_util_period, 5); |
| 386 | static unsigned int mc_pause_threshold_q4q7 = 187; | 391 | S2IO_PARM_INT(bimodal, 0); |
| 387 | static unsigned int shared_splits; | 392 | S2IO_PARM_INT(l3l4hdr_size, 128); |
| 388 | static unsigned int tmac_util_period = 5; | ||
| 389 | static unsigned int rmac_util_period = 5; | ||
| 390 | static unsigned int bimodal = 0; | ||
| 391 | static unsigned int l3l4hdr_size = 128; | ||
| 392 | #ifndef CONFIG_S2IO_NAPI | ||
| 393 | static unsigned int indicate_max_pkts; | ||
| 394 | #endif | ||
| 395 | /* Frequency of Rx desc syncs expressed as power of 2 */ | 393 | /* Frequency of Rx desc syncs expressed as power of 2 */ |
| 396 | static unsigned int rxsync_frequency = 3; | 394 | S2IO_PARM_INT(rxsync_frequency, 3); |
| 397 | /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */ | 395 | /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */ |
| 398 | static unsigned int intr_type = 0; | 396 | S2IO_PARM_INT(intr_type, 0); |
| 399 | /* Large receive offload feature */ | 397 | /* Large receive offload feature */ |
| 400 | static unsigned int lro = 0; | 398 | S2IO_PARM_INT(lro, 0); |
| 401 | /* Max pkts to be aggregated by LRO at one time. If not specified, | 399 | /* Max pkts to be aggregated by LRO at one time. If not specified, |
| 402 | * aggregation happens until we hit max IP pkt size(64K) | 400 | * aggregation happens until we hit max IP pkt size(64K) |
| 403 | */ | 401 | */ |
| 404 | static unsigned int lro_max_pkts = 0xFFFF; | 402 | S2IO_PARM_INT(lro_max_pkts, 0xFFFF); |
| 403 | #ifndef CONFIG_S2IO_NAPI | ||
| 404 | S2IO_PARM_INT(indicate_max_pkts, 0); | ||
| 405 | #endif | ||
| 406 | |||
| 407 | static unsigned int tx_fifo_len[MAX_TX_FIFOS] = | ||
| 408 | {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN}; | ||
| 409 | static unsigned int rx_ring_sz[MAX_RX_RINGS] = | ||
| 410 | {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT}; | ||
| 411 | static unsigned int rts_frm_len[MAX_RX_RINGS] = | ||
| 412 | {[0 ...(MAX_RX_RINGS - 1)] = 0 }; | ||
| 413 | |||
| 414 | module_param_array(tx_fifo_len, uint, NULL, 0); | ||
| 415 | module_param_array(rx_ring_sz, uint, NULL, 0); | ||
| 416 | module_param_array(rts_frm_len, uint, NULL, 0); | ||
| 405 | 417 | ||
| 406 | /* | 418 | /* |
| 407 | * S2IO device table. | 419 | * S2IO device table. |
| @@ -464,10 +476,9 @@ static int init_shared_mem(struct s2io_nic *nic) | |||
| 464 | size += config->tx_cfg[i].fifo_len; | 476 | size += config->tx_cfg[i].fifo_len; |
| 465 | } | 477 | } |
| 466 | if (size > MAX_AVAILABLE_TXDS) { | 478 | if (size > MAX_AVAILABLE_TXDS) { |
| 467 | DBG_PRINT(ERR_DBG, "%s: Requested TxDs too high, ", | 479 | DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, "); |
| 468 | __FUNCTION__); | ||
| 469 | DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size); | 480 | DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size); |
| 470 | return FAILURE; | 481 | return -EINVAL; |
| 471 | } | 482 | } |
| 472 | 483 | ||
| 473 | lst_size = (sizeof(TxD_t) * config->max_txds); | 484 | lst_size = (sizeof(TxD_t) * config->max_txds); |
| @@ -547,6 +558,7 @@ static int init_shared_mem(struct s2io_nic *nic) | |||
| 547 | nic->ufo_in_band_v = kmalloc((sizeof(u64) * size), GFP_KERNEL); | 558 | nic->ufo_in_band_v = kmalloc((sizeof(u64) * size), GFP_KERNEL); |
| 548 | if (!nic->ufo_in_band_v) | 559 | if (!nic->ufo_in_band_v) |
| 549 | return -ENOMEM; | 560 | return -ENOMEM; |
| 561 | memset(nic->ufo_in_band_v, 0, size); | ||
| 550 | 562 | ||
| 551 | /* Allocation and initialization of RXDs in Rings */ | 563 | /* Allocation and initialization of RXDs in Rings */ |
| 552 | size = 0; | 564 | size = 0; |
| @@ -1213,7 +1225,7 @@ static int init_nic(struct s2io_nic *nic) | |||
| 1213 | break; | 1225 | break; |
| 1214 | } | 1226 | } |
| 1215 | 1227 | ||
| 1216 | /* Enable Tx FIFO partition 0. */ | 1228 | /* Enable all configured Tx FIFO partitions */ |
| 1217 | val64 = readq(&bar0->tx_fifo_partition_0); | 1229 | val64 = readq(&bar0->tx_fifo_partition_0); |
| 1218 | val64 |= (TX_FIFO_PARTITION_EN); | 1230 | val64 |= (TX_FIFO_PARTITION_EN); |
| 1219 | writeq(val64, &bar0->tx_fifo_partition_0); | 1231 | writeq(val64, &bar0->tx_fifo_partition_0); |
| @@ -1650,7 +1662,7 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag) | |||
| 1650 | writeq(temp64, &bar0->general_int_mask); | 1662 | writeq(temp64, &bar0->general_int_mask); |
| 1651 | /* | 1663 | /* |
| 1652 | * If Hercules adapter enable GPIO otherwise | 1664 | * If Hercules adapter enable GPIO otherwise |
| 1653 | * disabled all PCIX, Flash, MDIO, IIC and GPIO | 1665 | * disable all PCIX, Flash, MDIO, IIC and GPIO |
| 1654 | * interrupts for now. | 1666 | * interrupts for now. |
| 1655 | * TODO | 1667 | * TODO |
| 1656 | */ | 1668 | */ |
| @@ -2119,7 +2131,7 @@ static struct sk_buff *s2io_txdl_getskb(fifo_info_t *fifo_data, TxD_t *txdlp, in | |||
| 2119 | frag->size, PCI_DMA_TODEVICE); | 2131 | frag->size, PCI_DMA_TODEVICE); |
| 2120 | } | 2132 | } |
| 2121 | } | 2133 | } |
| 2122 | txdlp->Host_Control = 0; | 2134 | memset(txdlp,0, (sizeof(TxD_t) * fifo_data->max_txds)); |
| 2123 | return(skb); | 2135 | return(skb); |
| 2124 | } | 2136 | } |
| 2125 | 2137 | ||
| @@ -2371,9 +2383,14 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) | |||
| 2371 | skb->data = (void *) (unsigned long)tmp; | 2383 | skb->data = (void *) (unsigned long)tmp; |
| 2372 | skb->tail = (void *) (unsigned long)tmp; | 2384 | skb->tail = (void *) (unsigned long)tmp; |
| 2373 | 2385 | ||
| 2374 | ((RxD3_t*)rxdp)->Buffer0_ptr = | 2386 | if (!(((RxD3_t*)rxdp)->Buffer0_ptr)) |
| 2375 | pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN, | 2387 | ((RxD3_t*)rxdp)->Buffer0_ptr = |
| 2388 | pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN, | ||
| 2376 | PCI_DMA_FROMDEVICE); | 2389 | PCI_DMA_FROMDEVICE); |
| 2390 | else | ||
| 2391 | pci_dma_sync_single_for_device(nic->pdev, | ||
| 2392 | (dma_addr_t) ((RxD3_t*)rxdp)->Buffer0_ptr, | ||
| 2393 | BUF0_LEN, PCI_DMA_FROMDEVICE); | ||
| 2377 | rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); | 2394 | rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); |
| 2378 | if (nic->rxd_mode == RXD_MODE_3B) { | 2395 | if (nic->rxd_mode == RXD_MODE_3B) { |
| 2379 | /* Two buffer mode */ | 2396 | /* Two buffer mode */ |
| @@ -2386,10 +2403,13 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) | |||
| 2386 | (nic->pdev, skb->data, dev->mtu + 4, | 2403 | (nic->pdev, skb->data, dev->mtu + 4, |
| 2387 | PCI_DMA_FROMDEVICE); | 2404 | PCI_DMA_FROMDEVICE); |
| 2388 | 2405 | ||
| 2389 | /* Buffer-1 will be dummy buffer not used */ | 2406 | /* Buffer-1 will be dummy buffer. Not used */ |
| 2390 | ((RxD3_t*)rxdp)->Buffer1_ptr = | 2407 | if (!(((RxD3_t*)rxdp)->Buffer1_ptr)) { |
| 2391 | pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN, | 2408 | ((RxD3_t*)rxdp)->Buffer1_ptr = |
| 2392 | PCI_DMA_FROMDEVICE); | 2409 | pci_map_single(nic->pdev, |
| 2410 | ba->ba_1, BUF1_LEN, | ||
| 2411 | PCI_DMA_FROMDEVICE); | ||
| 2412 | } | ||
| 2393 | rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); | 2413 | rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); |
| 2394 | rxdp->Control_2 |= SET_BUFFER2_SIZE_3 | 2414 | rxdp->Control_2 |= SET_BUFFER2_SIZE_3 |
| 2395 | (dev->mtu + 4); | 2415 | (dev->mtu + 4); |
| @@ -2614,23 +2634,23 @@ no_rx: | |||
| 2614 | } | 2634 | } |
| 2615 | #endif | 2635 | #endif |
| 2616 | 2636 | ||
| 2637 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 2617 | /** | 2638 | /** |
| 2618 | * s2io_netpoll - Rx interrupt service handler for netpoll support | 2639 | * s2io_netpoll - netpoll event handler entry point |
| 2619 | * @dev : pointer to the device structure. | 2640 | * @dev : pointer to the device structure. |
| 2620 | * Description: | 2641 | * Description: |
| 2621 | * Polling 'interrupt' - used by things like netconsole to send skbs | 2642 | * This function will be called by upper layer to check for events on the |
| 2622 | * without having to re-enable interrupts. It's not called while | 2643 | * interface in situations where interrupts are disabled. It is used for |
| 2623 | * the interrupt routine is executing. | 2644 | * specific in-kernel networking tasks, such as remote consoles and kernel |
| 2645 | * debugging over the network (example netdump in RedHat). | ||
| 2624 | */ | 2646 | */ |
| 2625 | |||
| 2626 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 2627 | static void s2io_netpoll(struct net_device *dev) | 2647 | static void s2io_netpoll(struct net_device *dev) |
| 2628 | { | 2648 | { |
| 2629 | nic_t *nic = dev->priv; | 2649 | nic_t *nic = dev->priv; |
| 2630 | mac_info_t *mac_control; | 2650 | mac_info_t *mac_control; |
| 2631 | struct config_param *config; | 2651 | struct config_param *config; |
| 2632 | XENA_dev_config_t __iomem *bar0 = nic->bar0; | 2652 | XENA_dev_config_t __iomem *bar0 = nic->bar0; |
| 2633 | u64 val64; | 2653 | u64 val64 = 0xFFFFFFFFFFFFFFFFULL; |
| 2634 | int i; | 2654 | int i; |
| 2635 | 2655 | ||
| 2636 | disable_irq(dev->irq); | 2656 | disable_irq(dev->irq); |
| @@ -2639,9 +2659,17 @@ static void s2io_netpoll(struct net_device *dev) | |||
| 2639 | mac_control = &nic->mac_control; | 2659 | mac_control = &nic->mac_control; |
| 2640 | config = &nic->config; | 2660 | config = &nic->config; |
| 2641 | 2661 | ||
| 2642 | val64 = readq(&bar0->rx_traffic_int); | ||
| 2643 | writeq(val64, &bar0->rx_traffic_int); | 2662 | writeq(val64, &bar0->rx_traffic_int); |
| 2663 | writeq(val64, &bar0->tx_traffic_int); | ||
| 2644 | 2664 | ||
| 2665 | /* we need to free up the transmitted skbufs or else netpoll will | ||
| 2666 | * run out of skbs and will fail and eventually netpoll application such | ||
| 2667 | * as netdump will fail. | ||
| 2668 | */ | ||
| 2669 | for (i = 0; i < config->tx_fifo_num; i++) | ||
| 2670 | tx_intr_handler(&mac_control->fifos[i]); | ||
| 2671 | |||
| 2672 | /* check for received packet and indicate up to network */ | ||
| 2645 | for (i = 0; i < config->rx_ring_num; i++) | 2673 | for (i = 0; i < config->rx_ring_num; i++) |
| 2646 | rx_intr_handler(&mac_control->rings[i]); | 2674 | rx_intr_handler(&mac_control->rings[i]); |
| 2647 | 2675 | ||
| @@ -2708,7 +2736,7 @@ static void rx_intr_handler(ring_info_t *ring_data) | |||
| 2708 | /* If your are next to put index then it's FIFO full condition */ | 2736 | /* If your are next to put index then it's FIFO full condition */ |
| 2709 | if ((get_block == put_block) && | 2737 | if ((get_block == put_block) && |
| 2710 | (get_info.offset + 1) == put_info.offset) { | 2738 | (get_info.offset + 1) == put_info.offset) { |
| 2711 | DBG_PRINT(ERR_DBG, "%s: Ring Full\n",dev->name); | 2739 | DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name); |
| 2712 | break; | 2740 | break; |
| 2713 | } | 2741 | } |
| 2714 | skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control); | 2742 | skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control); |
| @@ -2728,18 +2756,15 @@ static void rx_intr_handler(ring_info_t *ring_data) | |||
| 2728 | HEADER_SNAP_SIZE, | 2756 | HEADER_SNAP_SIZE, |
| 2729 | PCI_DMA_FROMDEVICE); | 2757 | PCI_DMA_FROMDEVICE); |
| 2730 | } else if (nic->rxd_mode == RXD_MODE_3B) { | 2758 | } else if (nic->rxd_mode == RXD_MODE_3B) { |
| 2731 | pci_unmap_single(nic->pdev, (dma_addr_t) | 2759 | pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t) |
| 2732 | ((RxD3_t*)rxdp)->Buffer0_ptr, | 2760 | ((RxD3_t*)rxdp)->Buffer0_ptr, |
| 2733 | BUF0_LEN, PCI_DMA_FROMDEVICE); | 2761 | BUF0_LEN, PCI_DMA_FROMDEVICE); |
| 2734 | pci_unmap_single(nic->pdev, (dma_addr_t) | 2762 | pci_unmap_single(nic->pdev, (dma_addr_t) |
| 2735 | ((RxD3_t*)rxdp)->Buffer1_ptr, | ||
| 2736 | BUF1_LEN, PCI_DMA_FROMDEVICE); | ||
| 2737 | pci_unmap_single(nic->pdev, (dma_addr_t) | ||
| 2738 | ((RxD3_t*)rxdp)->Buffer2_ptr, | 2763 | ((RxD3_t*)rxdp)->Buffer2_ptr, |
| 2739 | dev->mtu + 4, | 2764 | dev->mtu + 4, |
| 2740 | PCI_DMA_FROMDEVICE); | 2765 | PCI_DMA_FROMDEVICE); |
| 2741 | } else { | 2766 | } else { |
| 2742 | pci_unmap_single(nic->pdev, (dma_addr_t) | 2767 | pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t) |
| 2743 | ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN, | 2768 | ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN, |
| 2744 | PCI_DMA_FROMDEVICE); | 2769 | PCI_DMA_FROMDEVICE); |
| 2745 | pci_unmap_single(nic->pdev, (dma_addr_t) | 2770 | pci_unmap_single(nic->pdev, (dma_addr_t) |
| @@ -3327,7 +3352,7 @@ static void s2io_reset(nic_t * sp) | |||
| 3327 | 3352 | ||
| 3328 | /* Clear certain PCI/PCI-X fields after reset */ | 3353 | /* Clear certain PCI/PCI-X fields after reset */ |
| 3329 | if (sp->device_type == XFRAME_II_DEVICE) { | 3354 | if (sp->device_type == XFRAME_II_DEVICE) { |
| 3330 | /* Clear parity err detect bit */ | 3355 | /* Clear "detected parity error" bit */ |
| 3331 | pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000); | 3356 | pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000); |
| 3332 | 3357 | ||
| 3333 | /* Clearing PCIX Ecc status register */ | 3358 | /* Clearing PCIX Ecc status register */ |
| @@ -3528,7 +3553,7 @@ static void restore_xmsi_data(nic_t *nic) | |||
| 3528 | u64 val64; | 3553 | u64 val64; |
| 3529 | int i; | 3554 | int i; |
| 3530 | 3555 | ||
| 3531 | for (i=0; i< nic->avail_msix_vectors; i++) { | 3556 | for (i=0; i < MAX_REQUESTED_MSI_X; i++) { |
| 3532 | writeq(nic->msix_info[i].addr, &bar0->xmsi_address); | 3557 | writeq(nic->msix_info[i].addr, &bar0->xmsi_address); |
| 3533 | writeq(nic->msix_info[i].data, &bar0->xmsi_data); | 3558 | writeq(nic->msix_info[i].data, &bar0->xmsi_data); |
| 3534 | val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6)); | 3559 | val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6)); |
| @@ -3547,7 +3572,7 @@ static void store_xmsi_data(nic_t *nic) | |||
| 3547 | int i; | 3572 | int i; |
| 3548 | 3573 | ||
| 3549 | /* Store and display */ | 3574 | /* Store and display */ |
| 3550 | for (i=0; i< nic->avail_msix_vectors; i++) { | 3575 | for (i=0; i < MAX_REQUESTED_MSI_X; i++) { |
| 3551 | val64 = (BIT(15) | vBIT(i, 26, 6)); | 3576 | val64 = (BIT(15) | vBIT(i, 26, 6)); |
| 3552 | writeq(val64, &bar0->xmsi_access); | 3577 | writeq(val64, &bar0->xmsi_access); |
| 3553 | if (wait_for_msix_trans(nic, i)) { | 3578 | if (wait_for_msix_trans(nic, i)) { |
| @@ -3808,13 +3833,11 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 3808 | TxD_t *txdp; | 3833 | TxD_t *txdp; |
| 3809 | TxFIFO_element_t __iomem *tx_fifo; | 3834 | TxFIFO_element_t __iomem *tx_fifo; |
| 3810 | unsigned long flags; | 3835 | unsigned long flags; |
| 3811 | #ifdef NETIF_F_TSO | ||
| 3812 | int mss; | ||
| 3813 | #endif | ||
| 3814 | u16 vlan_tag = 0; | 3836 | u16 vlan_tag = 0; |
| 3815 | int vlan_priority = 0; | 3837 | int vlan_priority = 0; |
| 3816 | mac_info_t *mac_control; | 3838 | mac_info_t *mac_control; |
| 3817 | struct config_param *config; | 3839 | struct config_param *config; |
| 3840 | int offload_type; | ||
| 3818 | 3841 | ||
| 3819 | mac_control = &sp->mac_control; | 3842 | mac_control = &sp->mac_control; |
| 3820 | config = &sp->config; | 3843 | config = &sp->config; |
| @@ -3862,13 +3885,11 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 3862 | return 0; | 3885 | return 0; |
| 3863 | } | 3886 | } |
| 3864 | 3887 | ||
| 3865 | txdp->Control_1 = 0; | 3888 | offload_type = s2io_offload_type(skb); |
| 3866 | txdp->Control_2 = 0; | ||
| 3867 | #ifdef NETIF_F_TSO | 3889 | #ifdef NETIF_F_TSO |
| 3868 | mss = skb_shinfo(skb)->gso_size; | 3890 | if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { |
| 3869 | if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { | ||
| 3870 | txdp->Control_1 |= TXD_TCP_LSO_EN; | 3891 | txdp->Control_1 |= TXD_TCP_LSO_EN; |
| 3871 | txdp->Control_1 |= TXD_TCP_LSO_MSS(mss); | 3892 | txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb)); |
| 3872 | } | 3893 | } |
| 3873 | #endif | 3894 | #endif |
| 3874 | if (skb->ip_summed == CHECKSUM_HW) { | 3895 | if (skb->ip_summed == CHECKSUM_HW) { |
| @@ -3886,10 +3907,10 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 3886 | } | 3907 | } |
| 3887 | 3908 | ||
| 3888 | frg_len = skb->len - skb->data_len; | 3909 | frg_len = skb->len - skb->data_len; |
| 3889 | if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) { | 3910 | if (offload_type == SKB_GSO_UDP) { |
| 3890 | int ufo_size; | 3911 | int ufo_size; |
| 3891 | 3912 | ||
| 3892 | ufo_size = skb_shinfo(skb)->gso_size; | 3913 | ufo_size = s2io_udp_mss(skb); |
| 3893 | ufo_size &= ~7; | 3914 | ufo_size &= ~7; |
| 3894 | txdp->Control_1 |= TXD_UFO_EN; | 3915 | txdp->Control_1 |= TXD_UFO_EN; |
| 3895 | txdp->Control_1 |= TXD_UFO_MSS(ufo_size); | 3916 | txdp->Control_1 |= TXD_UFO_MSS(ufo_size); |
| @@ -3906,16 +3927,13 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 3906 | sp->ufo_in_band_v, | 3927 | sp->ufo_in_band_v, |
| 3907 | sizeof(u64), PCI_DMA_TODEVICE); | 3928 | sizeof(u64), PCI_DMA_TODEVICE); |
| 3908 | txdp++; | 3929 | txdp++; |
| 3909 | txdp->Control_1 = 0; | ||
| 3910 | txdp->Control_2 = 0; | ||
| 3911 | } | 3930 | } |
| 3912 | 3931 | ||
| 3913 | txdp->Buffer_Pointer = pci_map_single | 3932 | txdp->Buffer_Pointer = pci_map_single |
| 3914 | (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE); | 3933 | (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE); |
| 3915 | txdp->Host_Control = (unsigned long) skb; | 3934 | txdp->Host_Control = (unsigned long) skb; |
| 3916 | txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len); | 3935 | txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len); |
| 3917 | 3936 | if (offload_type == SKB_GSO_UDP) | |
| 3918 | if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) | ||
| 3919 | txdp->Control_1 |= TXD_UFO_EN; | 3937 | txdp->Control_1 |= TXD_UFO_EN; |
| 3920 | 3938 | ||
| 3921 | frg_cnt = skb_shinfo(skb)->nr_frags; | 3939 | frg_cnt = skb_shinfo(skb)->nr_frags; |
| @@ -3930,12 +3948,12 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 3930 | (sp->pdev, frag->page, frag->page_offset, | 3948 | (sp->pdev, frag->page, frag->page_offset, |
| 3931 | frag->size, PCI_DMA_TODEVICE); | 3949 | frag->size, PCI_DMA_TODEVICE); |
| 3932 | txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size); | 3950 | txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size); |
| 3933 | if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) | 3951 | if (offload_type == SKB_GSO_UDP) |
| 3934 | txdp->Control_1 |= TXD_UFO_EN; | 3952 | txdp->Control_1 |= TXD_UFO_EN; |
| 3935 | } | 3953 | } |
| 3936 | txdp->Control_1 |= TXD_GATHER_CODE_LAST; | 3954 | txdp->Control_1 |= TXD_GATHER_CODE_LAST; |
| 3937 | 3955 | ||
| 3938 | if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) | 3956 | if (offload_type == SKB_GSO_UDP) |
| 3939 | frg_cnt++; /* as Txd0 was used for inband header */ | 3957 | frg_cnt++; /* as Txd0 was used for inband header */ |
| 3940 | 3958 | ||
| 3941 | tx_fifo = mac_control->tx_FIFO_start[queue]; | 3959 | tx_fifo = mac_control->tx_FIFO_start[queue]; |
| @@ -3944,13 +3962,9 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 3944 | 3962 | ||
| 3945 | val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST | | 3963 | val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST | |
| 3946 | TX_FIFO_LAST_LIST); | 3964 | TX_FIFO_LAST_LIST); |
| 3947 | 3965 | if (offload_type) | |
| 3948 | #ifdef NETIF_F_TSO | ||
| 3949 | if (mss) | ||
| 3950 | val64 |= TX_FIFO_SPECIAL_FUNC; | ||
| 3951 | #endif | ||
| 3952 | if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) | ||
| 3953 | val64 |= TX_FIFO_SPECIAL_FUNC; | 3966 | val64 |= TX_FIFO_SPECIAL_FUNC; |
| 3967 | |||
| 3954 | writeq(val64, &tx_fifo->List_Control); | 3968 | writeq(val64, &tx_fifo->List_Control); |
| 3955 | 3969 | ||
| 3956 | mmiowb(); | 3970 | mmiowb(); |
| @@ -3984,13 +3998,41 @@ s2io_alarm_handle(unsigned long data) | |||
| 3984 | mod_timer(&sp->alarm_timer, jiffies + HZ / 2); | 3998 | mod_timer(&sp->alarm_timer, jiffies + HZ / 2); |
| 3985 | } | 3999 | } |
| 3986 | 4000 | ||
| 4001 | static int s2io_chk_rx_buffers(nic_t *sp, int rng_n) | ||
| 4002 | { | ||
| 4003 | int rxb_size, level; | ||
| 4004 | |||
| 4005 | if (!sp->lro) { | ||
| 4006 | rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]); | ||
| 4007 | level = rx_buffer_level(sp, rxb_size, rng_n); | ||
| 4008 | |||
| 4009 | if ((level == PANIC) && (!TASKLET_IN_USE)) { | ||
| 4010 | int ret; | ||
| 4011 | DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__); | ||
| 4012 | DBG_PRINT(INTR_DBG, "PANIC levels\n"); | ||
| 4013 | if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) { | ||
| 4014 | DBG_PRINT(ERR_DBG, "Out of memory in %s", | ||
| 4015 | __FUNCTION__); | ||
| 4016 | clear_bit(0, (&sp->tasklet_status)); | ||
| 4017 | return -1; | ||
| 4018 | } | ||
| 4019 | clear_bit(0, (&sp->tasklet_status)); | ||
| 4020 | } else if (level == LOW) | ||
| 4021 | tasklet_schedule(&sp->task); | ||
| 4022 | |||
| 4023 | } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) { | ||
| 4024 | DBG_PRINT(ERR_DBG, "%s:Out of memory", sp->dev->name); | ||
| 4025 | DBG_PRINT(ERR_DBG, " in Rx Intr!!\n"); | ||
| 4026 | } | ||
| 4027 | return 0; | ||
| 4028 | } | ||
| 4029 | |||
| 3987 | static irqreturn_t | 4030 | static irqreturn_t |
| 3988 | s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs) | 4031 | s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs) |
| 3989 | { | 4032 | { |
| 3990 | struct net_device *dev = (struct net_device *) dev_id; | 4033 | struct net_device *dev = (struct net_device *) dev_id; |
| 3991 | nic_t *sp = dev->priv; | 4034 | nic_t *sp = dev->priv; |
| 3992 | int i; | 4035 | int i; |
| 3993 | int ret; | ||
| 3994 | mac_info_t *mac_control; | 4036 | mac_info_t *mac_control; |
| 3995 | struct config_param *config; | 4037 | struct config_param *config; |
| 3996 | 4038 | ||
| @@ -4012,35 +4054,8 @@ s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs) | |||
| 4012 | * reallocate the buffers from the interrupt handler itself, | 4054 | * reallocate the buffers from the interrupt handler itself, |
| 4013 | * else schedule a tasklet to reallocate the buffers. | 4055 | * else schedule a tasklet to reallocate the buffers. |
| 4014 | */ | 4056 | */ |
| 4015 | for (i = 0; i < config->rx_ring_num; i++) { | 4057 | for (i = 0; i < config->rx_ring_num; i++) |
| 4016 | if (!sp->lro) { | 4058 | s2io_chk_rx_buffers(sp, i); |
| 4017 | int rxb_size = atomic_read(&sp->rx_bufs_left[i]); | ||
| 4018 | int level = rx_buffer_level(sp, rxb_size, i); | ||
| 4019 | |||
| 4020 | if ((level == PANIC) && (!TASKLET_IN_USE)) { | ||
| 4021 | DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", | ||
| 4022 | dev->name); | ||
| 4023 | DBG_PRINT(INTR_DBG, "PANIC levels\n"); | ||
| 4024 | if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) { | ||
| 4025 | DBG_PRINT(ERR_DBG, "%s:Out of memory", | ||
| 4026 | dev->name); | ||
| 4027 | DBG_PRINT(ERR_DBG, " in ISR!!\n"); | ||
| 4028 | clear_bit(0, (&sp->tasklet_status)); | ||
| 4029 | atomic_dec(&sp->isr_cnt); | ||
| 4030 | return IRQ_HANDLED; | ||
| 4031 | } | ||
| 4032 | clear_bit(0, (&sp->tasklet_status)); | ||
| 4033 | } else if (level == LOW) { | ||
| 4034 | tasklet_schedule(&sp->task); | ||
| 4035 | } | ||
| 4036 | } | ||
| 4037 | else if (fill_rx_buffers(sp, i) == -ENOMEM) { | ||
| 4038 | DBG_PRINT(ERR_DBG, "%s:Out of memory", | ||
| 4039 | dev->name); | ||
| 4040 | DBG_PRINT(ERR_DBG, " in Rx Intr!!\n"); | ||
| 4041 | break; | ||
| 4042 | } | ||
| 4043 | } | ||
| 4044 | 4059 | ||
| 4045 | atomic_dec(&sp->isr_cnt); | 4060 | atomic_dec(&sp->isr_cnt); |
| 4046 | return IRQ_HANDLED; | 4061 | return IRQ_HANDLED; |
| @@ -4051,39 +4066,13 @@ s2io_msix_ring_handle(int irq, void *dev_id, struct pt_regs *regs) | |||
| 4051 | { | 4066 | { |
| 4052 | ring_info_t *ring = (ring_info_t *)dev_id; | 4067 | ring_info_t *ring = (ring_info_t *)dev_id; |
| 4053 | nic_t *sp = ring->nic; | 4068 | nic_t *sp = ring->nic; |
| 4054 | struct net_device *dev = (struct net_device *) dev_id; | ||
| 4055 | int rxb_size, level, rng_n; | ||
| 4056 | 4069 | ||
| 4057 | atomic_inc(&sp->isr_cnt); | 4070 | atomic_inc(&sp->isr_cnt); |
| 4058 | rx_intr_handler(ring); | ||
| 4059 | |||
| 4060 | rng_n = ring->ring_no; | ||
| 4061 | if (!sp->lro) { | ||
| 4062 | rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]); | ||
| 4063 | level = rx_buffer_level(sp, rxb_size, rng_n); | ||
| 4064 | 4071 | ||
| 4065 | if ((level == PANIC) && (!TASKLET_IN_USE)) { | 4072 | rx_intr_handler(ring); |
| 4066 | int ret; | 4073 | s2io_chk_rx_buffers(sp, ring->ring_no); |
| 4067 | DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__); | ||
| 4068 | DBG_PRINT(INTR_DBG, "PANIC levels\n"); | ||
| 4069 | if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) { | ||
| 4070 | DBG_PRINT(ERR_DBG, "Out of memory in %s", | ||
| 4071 | __FUNCTION__); | ||
| 4072 | clear_bit(0, (&sp->tasklet_status)); | ||
| 4073 | return IRQ_HANDLED; | ||
| 4074 | } | ||
| 4075 | clear_bit(0, (&sp->tasklet_status)); | ||
| 4076 | } else if (level == LOW) { | ||
| 4077 | tasklet_schedule(&sp->task); | ||
| 4078 | } | ||
| 4079 | } | ||
| 4080 | else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) { | ||
| 4081 | DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name); | ||
| 4082 | DBG_PRINT(ERR_DBG, " in Rx Intr!!\n"); | ||
| 4083 | } | ||
| 4084 | 4074 | ||
| 4085 | atomic_dec(&sp->isr_cnt); | 4075 | atomic_dec(&sp->isr_cnt); |
| 4086 | |||
| 4087 | return IRQ_HANDLED; | 4076 | return IRQ_HANDLED; |
| 4088 | } | 4077 | } |
| 4089 | 4078 | ||
| @@ -4248,37 +4237,8 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs) | |||
| 4248 | * else schedule a tasklet to reallocate the buffers. | 4237 | * else schedule a tasklet to reallocate the buffers. |
| 4249 | */ | 4238 | */ |
| 4250 | #ifndef CONFIG_S2IO_NAPI | 4239 | #ifndef CONFIG_S2IO_NAPI |
| 4251 | for (i = 0; i < config->rx_ring_num; i++) { | 4240 | for (i = 0; i < config->rx_ring_num; i++) |
| 4252 | if (!sp->lro) { | 4241 | s2io_chk_rx_buffers(sp, i); |
| 4253 | int ret; | ||
| 4254 | int rxb_size = atomic_read(&sp->rx_bufs_left[i]); | ||
| 4255 | int level = rx_buffer_level(sp, rxb_size, i); | ||
| 4256 | |||
| 4257 | if ((level == PANIC) && (!TASKLET_IN_USE)) { | ||
| 4258 | DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", | ||
| 4259 | dev->name); | ||
| 4260 | DBG_PRINT(INTR_DBG, "PANIC levels\n"); | ||
| 4261 | if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) { | ||
| 4262 | DBG_PRINT(ERR_DBG, "%s:Out of memory", | ||
| 4263 | dev->name); | ||
| 4264 | DBG_PRINT(ERR_DBG, " in ISR!!\n"); | ||
| 4265 | clear_bit(0, (&sp->tasklet_status)); | ||
| 4266 | atomic_dec(&sp->isr_cnt); | ||
| 4267 | writeq(org_mask, &bar0->general_int_mask); | ||
| 4268 | return IRQ_HANDLED; | ||
| 4269 | } | ||
| 4270 | clear_bit(0, (&sp->tasklet_status)); | ||
| 4271 | } else if (level == LOW) { | ||
| 4272 | tasklet_schedule(&sp->task); | ||
| 4273 | } | ||
| 4274 | } | ||
| 4275 | else if (fill_rx_buffers(sp, i) == -ENOMEM) { | ||
| 4276 | DBG_PRINT(ERR_DBG, "%s:Out of memory", | ||
| 4277 | dev->name); | ||
| 4278 | DBG_PRINT(ERR_DBG, " in Rx intr!!\n"); | ||
| 4279 | break; | ||
| 4280 | } | ||
| 4281 | } | ||
| 4282 | #endif | 4242 | #endif |
| 4283 | writeq(org_mask, &bar0->general_int_mask); | 4243 | writeq(org_mask, &bar0->general_int_mask); |
| 4284 | atomic_dec(&sp->isr_cnt); | 4244 | atomic_dec(&sp->isr_cnt); |
| @@ -4308,6 +4268,8 @@ static void s2io_updt_stats(nic_t *sp) | |||
| 4308 | if (cnt == 5) | 4268 | if (cnt == 5) |
| 4309 | break; /* Updt failed */ | 4269 | break; /* Updt failed */ |
| 4310 | } while(1); | 4270 | } while(1); |
| 4271 | } else { | ||
| 4272 | memset(sp->mac_control.stats_info, 0, sizeof(StatInfo_t)); | ||
| 4311 | } | 4273 | } |
| 4312 | } | 4274 | } |
| 4313 | 4275 | ||
| @@ -4942,7 +4904,8 @@ static int write_eeprom(nic_t * sp, int off, u64 data, int cnt) | |||
| 4942 | } | 4904 | } |
| 4943 | static void s2io_vpd_read(nic_t *nic) | 4905 | static void s2io_vpd_read(nic_t *nic) |
| 4944 | { | 4906 | { |
| 4945 | u8 vpd_data[256],data; | 4907 | u8 *vpd_data; |
| 4908 | u8 data; | ||
| 4946 | int i=0, cnt, fail = 0; | 4909 | int i=0, cnt, fail = 0; |
| 4947 | int vpd_addr = 0x80; | 4910 | int vpd_addr = 0x80; |
| 4948 | 4911 | ||
| @@ -4955,6 +4918,10 @@ static void s2io_vpd_read(nic_t *nic) | |||
| 4955 | vpd_addr = 0x50; | 4918 | vpd_addr = 0x50; |
| 4956 | } | 4919 | } |
| 4957 | 4920 | ||
| 4921 | vpd_data = kmalloc(256, GFP_KERNEL); | ||
| 4922 | if (!vpd_data) | ||
| 4923 | return; | ||
| 4924 | |||
| 4958 | for (i = 0; i < 256; i +=4 ) { | 4925 | for (i = 0; i < 256; i +=4 ) { |
| 4959 | pci_write_config_byte(nic->pdev, (vpd_addr + 2), i); | 4926 | pci_write_config_byte(nic->pdev, (vpd_addr + 2), i); |
| 4960 | pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data); | 4927 | pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data); |
| @@ -4977,6 +4944,7 @@ static void s2io_vpd_read(nic_t *nic) | |||
| 4977 | memset(nic->product_name, 0, vpd_data[1]); | 4944 | memset(nic->product_name, 0, vpd_data[1]); |
| 4978 | memcpy(nic->product_name, &vpd_data[3], vpd_data[1]); | 4945 | memcpy(nic->product_name, &vpd_data[3], vpd_data[1]); |
| 4979 | } | 4946 | } |
| 4947 | kfree(vpd_data); | ||
| 4980 | } | 4948 | } |
| 4981 | 4949 | ||
| 4982 | /** | 4950 | /** |
| @@ -5295,7 +5263,7 @@ static int s2io_link_test(nic_t * sp, uint64_t * data) | |||
| 5295 | else | 5263 | else |
| 5296 | *data = 0; | 5264 | *data = 0; |
| 5297 | 5265 | ||
| 5298 | return 0; | 5266 | return *data; |
| 5299 | } | 5267 | } |
| 5300 | 5268 | ||
| 5301 | /** | 5269 | /** |
| @@ -5753,6 +5721,19 @@ static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data) | |||
| 5753 | return 0; | 5721 | return 0; |
| 5754 | } | 5722 | } |
| 5755 | 5723 | ||
| 5724 | static u32 s2io_ethtool_op_get_tso(struct net_device *dev) | ||
| 5725 | { | ||
| 5726 | return (dev->features & NETIF_F_TSO) != 0; | ||
| 5727 | } | ||
| 5728 | static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data) | ||
| 5729 | { | ||
| 5730 | if (data) | ||
| 5731 | dev->features |= (NETIF_F_TSO | NETIF_F_TSO6); | ||
| 5732 | else | ||
| 5733 | dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); | ||
| 5734 | |||
| 5735 | return 0; | ||
| 5736 | } | ||
| 5756 | 5737 | ||
| 5757 | static struct ethtool_ops netdev_ethtool_ops = { | 5738 | static struct ethtool_ops netdev_ethtool_ops = { |
| 5758 | .get_settings = s2io_ethtool_gset, | 5739 | .get_settings = s2io_ethtool_gset, |
| @@ -5773,8 +5754,8 @@ static struct ethtool_ops netdev_ethtool_ops = { | |||
| 5773 | .get_sg = ethtool_op_get_sg, | 5754 | .get_sg = ethtool_op_get_sg, |
| 5774 | .set_sg = ethtool_op_set_sg, | 5755 | .set_sg = ethtool_op_set_sg, |
| 5775 | #ifdef NETIF_F_TSO | 5756 | #ifdef NETIF_F_TSO |
| 5776 | .get_tso = ethtool_op_get_tso, | 5757 | .get_tso = s2io_ethtool_op_get_tso, |
| 5777 | .set_tso = ethtool_op_set_tso, | 5758 | .set_tso = s2io_ethtool_op_set_tso, |
| 5778 | #endif | 5759 | #endif |
| 5779 | .get_ufo = ethtool_op_get_ufo, | 5760 | .get_ufo = ethtool_op_get_ufo, |
| 5780 | .set_ufo = ethtool_op_set_ufo, | 5761 | .set_ufo = ethtool_op_set_ufo, |
| @@ -6337,7 +6318,7 @@ static int s2io_card_up(nic_t * sp) | |||
| 6337 | s2io_set_multicast(dev); | 6318 | s2io_set_multicast(dev); |
| 6338 | 6319 | ||
| 6339 | if (sp->lro) { | 6320 | if (sp->lro) { |
| 6340 | /* Initialize max aggregatable pkts based on MTU */ | 6321 | /* Initialize max aggregatable pkts per session based on MTU */ |
| 6341 | sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu; | 6322 | sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu; |
| 6342 | /* Check if we can use(if specified) user provided value */ | 6323 | /* Check if we can use(if specified) user provided value */ |
| 6343 | if (lro_max_pkts < sp->lro_max_aggr_per_sess) | 6324 | if (lro_max_pkts < sp->lro_max_aggr_per_sess) |
| @@ -6438,7 +6419,7 @@ static void s2io_tx_watchdog(struct net_device *dev) | |||
| 6438 | * @cksum : FCS checksum of the frame. | 6419 | * @cksum : FCS checksum of the frame. |
| 6439 | * @ring_no : the ring from which this RxD was extracted. | 6420 | * @ring_no : the ring from which this RxD was extracted. |
| 6440 | * Description: | 6421 | * Description: |
| 6441 | * This function is called by the Tx interrupt serivce routine to perform | 6422 | * This function is called by the Rx interrupt serivce routine to perform |
| 6442 | * some OS related operations on the SKB before passing it to the upper | 6423 | * some OS related operations on the SKB before passing it to the upper |
| 6443 | * layers. It mainly checks if the checksum is OK, if so adds it to the | 6424 | * layers. It mainly checks if the checksum is OK, if so adds it to the |
| 6444 | * SKBs cksum variable, increments the Rx packet count and passes the SKB | 6425 | * SKBs cksum variable, increments the Rx packet count and passes the SKB |
| @@ -6698,33 +6679,6 @@ static void s2io_init_pci(nic_t * sp) | |||
| 6698 | pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd); | 6679 | pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd); |
| 6699 | } | 6680 | } |
| 6700 | 6681 | ||
| 6701 | MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>"); | ||
| 6702 | MODULE_LICENSE("GPL"); | ||
| 6703 | MODULE_VERSION(DRV_VERSION); | ||
| 6704 | |||
| 6705 | module_param(tx_fifo_num, int, 0); | ||
| 6706 | module_param(rx_ring_num, int, 0); | ||
| 6707 | module_param(rx_ring_mode, int, 0); | ||
| 6708 | module_param_array(tx_fifo_len, uint, NULL, 0); | ||
| 6709 | module_param_array(rx_ring_sz, uint, NULL, 0); | ||
| 6710 | module_param_array(rts_frm_len, uint, NULL, 0); | ||
| 6711 | module_param(use_continuous_tx_intrs, int, 1); | ||
| 6712 | module_param(rmac_pause_time, int, 0); | ||
| 6713 | module_param(mc_pause_threshold_q0q3, int, 0); | ||
| 6714 | module_param(mc_pause_threshold_q4q7, int, 0); | ||
| 6715 | module_param(shared_splits, int, 0); | ||
| 6716 | module_param(tmac_util_period, int, 0); | ||
| 6717 | module_param(rmac_util_period, int, 0); | ||
| 6718 | module_param(bimodal, bool, 0); | ||
| 6719 | module_param(l3l4hdr_size, int , 0); | ||
| 6720 | #ifndef CONFIG_S2IO_NAPI | ||
| 6721 | module_param(indicate_max_pkts, int, 0); | ||
| 6722 | #endif | ||
| 6723 | module_param(rxsync_frequency, int, 0); | ||
| 6724 | module_param(intr_type, int, 0); | ||
| 6725 | module_param(lro, int, 0); | ||
| 6726 | module_param(lro_max_pkts, int, 0); | ||
| 6727 | |||
| 6728 | static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type) | 6682 | static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type) |
| 6729 | { | 6683 | { |
| 6730 | if ( tx_fifo_num > 8) { | 6684 | if ( tx_fifo_num > 8) { |
| @@ -6832,8 +6786,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
| 6832 | } | 6786 | } |
| 6833 | if (dev_intr_type != MSI_X) { | 6787 | if (dev_intr_type != MSI_X) { |
| 6834 | if (pci_request_regions(pdev, s2io_driver_name)) { | 6788 | if (pci_request_regions(pdev, s2io_driver_name)) { |
| 6835 | DBG_PRINT(ERR_DBG, "Request Regions failed\n"), | 6789 | DBG_PRINT(ERR_DBG, "Request Regions failed\n"); |
| 6836 | pci_disable_device(pdev); | 6790 | pci_disable_device(pdev); |
| 6837 | return -ENODEV; | 6791 | return -ENODEV; |
| 6838 | } | 6792 | } |
| 6839 | } | 6793 | } |
| @@ -6957,7 +6911,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
| 6957 | /* initialize the shared memory used by the NIC and the host */ | 6911 | /* initialize the shared memory used by the NIC and the host */ |
| 6958 | if (init_shared_mem(sp)) { | 6912 | if (init_shared_mem(sp)) { |
| 6959 | DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", | 6913 | DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", |
| 6960 | __FUNCTION__); | 6914 | dev->name); |
| 6961 | ret = -ENOMEM; | 6915 | ret = -ENOMEM; |
| 6962 | goto mem_alloc_failed; | 6916 | goto mem_alloc_failed; |
| 6963 | } | 6917 | } |
| @@ -7094,6 +7048,9 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
| 7094 | dev->addr_len = ETH_ALEN; | 7048 | dev->addr_len = ETH_ALEN; |
| 7095 | memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN); | 7049 | memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN); |
| 7096 | 7050 | ||
| 7051 | /* reset Nic and bring it to known state */ | ||
| 7052 | s2io_reset(sp); | ||
| 7053 | |||
| 7097 | /* | 7054 | /* |
| 7098 | * Initialize the tasklet status and link state flags | 7055 | * Initialize the tasklet status and link state flags |
| 7099 | * and the card state parameter | 7056 | * and the card state parameter |
| @@ -7131,11 +7088,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
| 7131 | goto register_failed; | 7088 | goto register_failed; |
| 7132 | } | 7089 | } |
| 7133 | s2io_vpd_read(sp); | 7090 | s2io_vpd_read(sp); |
| 7134 | DBG_PRINT(ERR_DBG, "%s: Neterion %s",dev->name, sp->product_name); | ||
| 7135 | DBG_PRINT(ERR_DBG, "(rev %d), Driver version %s\n", | ||
| 7136 | get_xena_rev_id(sp->pdev), | ||
| 7137 | s2io_driver_version); | ||
| 7138 | DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2005 Neterion Inc.\n"); | 7091 | DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2005 Neterion Inc.\n"); |
| 7092 | DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name, | ||
| 7093 | sp->product_name, get_xena_rev_id(sp->pdev)); | ||
| 7094 | DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name, | ||
| 7095 | s2io_driver_version); | ||
| 7139 | DBG_PRINT(ERR_DBG, "%s: MAC ADDR: " | 7096 | DBG_PRINT(ERR_DBG, "%s: MAC ADDR: " |
| 7140 | "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name, | 7097 | "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name, |
| 7141 | sp->def_mac_addr[0].mac_addr[0], | 7098 | sp->def_mac_addr[0].mac_addr[0], |
| @@ -7436,8 +7393,13 @@ static int verify_l3_l4_lro_capable(lro_t *l_lro, struct iphdr *ip, | |||
| 7436 | if (ip->ihl != 5) /* IP has options */ | 7393 | if (ip->ihl != 5) /* IP has options */ |
| 7437 | return -1; | 7394 | return -1; |
| 7438 | 7395 | ||
| 7396 | /* If we see CE codepoint in IP header, packet is not mergeable */ | ||
| 7397 | if (INET_ECN_is_ce(ipv4_get_dsfield(ip))) | ||
| 7398 | return -1; | ||
| 7399 | |||
| 7400 | /* If we see ECE or CWR flags in TCP header, packet is not mergeable */ | ||
| 7439 | if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin || | 7401 | if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin || |
| 7440 | !tcp->ack) { | 7402 | tcp->ece || tcp->cwr || !tcp->ack) { |
| 7441 | /* | 7403 | /* |
| 7442 | * Currently recognize only the ack control word and | 7404 | * Currently recognize only the ack control word and |
| 7443 | * any other control field being set would result in | 7405 | * any other control field being set would result in |
| @@ -7591,18 +7553,16 @@ static void queue_rx_frame(struct sk_buff *skb) | |||
| 7591 | static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, | 7553 | static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, |
| 7592 | u32 tcp_len) | 7554 | u32 tcp_len) |
| 7593 | { | 7555 | { |
| 7594 | struct sk_buff *tmp, *first = lro->parent; | 7556 | struct sk_buff *first = lro->parent; |
| 7595 | 7557 | ||
| 7596 | first->len += tcp_len; | 7558 | first->len += tcp_len; |
| 7597 | first->data_len = lro->frags_len; | 7559 | first->data_len = lro->frags_len; |
| 7598 | skb_pull(skb, (skb->len - tcp_len)); | 7560 | skb_pull(skb, (skb->len - tcp_len)); |
| 7599 | if ((tmp = skb_shinfo(first)->frag_list)) { | 7561 | if (skb_shinfo(first)->frag_list) |
| 7600 | while (tmp->next) | 7562 | lro->last_frag->next = skb; |
| 7601 | tmp = tmp->next; | ||
| 7602 | tmp->next = skb; | ||
| 7603 | } | ||
| 7604 | else | 7563 | else |
| 7605 | skb_shinfo(first)->frag_list = skb; | 7564 | skb_shinfo(first)->frag_list = skb; |
| 7565 | lro->last_frag = skb; | ||
| 7606 | sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++; | 7566 | sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++; |
| 7607 | return; | 7567 | return; |
| 7608 | } | 7568 | } |
