aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorJohn W. Linville <linville@tuxdriver.com>2006-08-04 14:24:15 -0400
committerJohn W. Linville <linville@tuxdriver.com>2006-08-04 14:24:15 -0400
commit71bfe47f023c55c322607939b786ce0a44627dfc (patch)
treef59c37feb00f1df2e0f4ec282ae9c80ad6bb0cdc /drivers/net
parent73c1ac1e3b6c989b9b5f7b2313ac590a1c3b6d6a (diff)
parentefe78cda3596f8a6d1c2d4a6b1a221bafa3e1a48 (diff)
Merge branch 'from-linus' into upstream
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/appletalk/Kconfig2
-rw-r--r--drivers/net/e1000/e1000_main.c11
-rw-r--r--drivers/net/myri10ge/myri10ge.c24
-rw-r--r--drivers/net/phy/phy.c8
-rw-r--r--drivers/net/s2io.c386
-rw-r--r--drivers/net/s2io.h10
-rw-r--r--drivers/net/tg3.c10
7 files changed, 208 insertions, 243 deletions
diff --git a/drivers/net/appletalk/Kconfig b/drivers/net/appletalk/Kconfig
index b14e89004c3a..0a0e0cd81a23 100644
--- a/drivers/net/appletalk/Kconfig
+++ b/drivers/net/appletalk/Kconfig
@@ -29,7 +29,7 @@ config ATALK
29 even politically correct people are allowed to say Y here. 29 even politically correct people are allowed to say Y here.
30 30
31config DEV_APPLETALK 31config DEV_APPLETALK
32 bool "Appletalk interfaces support" 32 tristate "Appletalk interfaces support"
33 depends on ATALK 33 depends on ATALK
34 help 34 help
35 AppleTalk is the protocol that Apple computers can use to communicate 35 AppleTalk is the protocol that Apple computers can use to communicate
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index da62db897426..627f224d78bc 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -3127,7 +3127,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
3127 break; 3127 break;
3128 } 3128 }
3129 3129
3130 /* NOTE: dev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 3130 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3131 * means we reserve 2 more, this pushes us to allocate from the next 3131 * means we reserve 2 more, this pushes us to allocate from the next
3132 * larger slab size 3132 * larger slab size
3133 * i.e. RXBUFFER_2048 --> size-4096 slab */ 3133 * i.e. RXBUFFER_2048 --> size-4096 slab */
@@ -3708,7 +3708,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3708#define E1000_CB_LENGTH 256 3708#define E1000_CB_LENGTH 256
3709 if (length < E1000_CB_LENGTH) { 3709 if (length < E1000_CB_LENGTH) {
3710 struct sk_buff *new_skb = 3710 struct sk_buff *new_skb =
3711 dev_alloc_skb(length + NET_IP_ALIGN); 3711 netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
3712 if (new_skb) { 3712 if (new_skb) {
3713 skb_reserve(new_skb, NET_IP_ALIGN); 3713 skb_reserve(new_skb, NET_IP_ALIGN);
3714 new_skb->dev = netdev; 3714 new_skb->dev = netdev;
@@ -3979,7 +3979,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
3979 3979
3980 while (cleaned_count--) { 3980 while (cleaned_count--) {
3981 if (!(skb = buffer_info->skb)) 3981 if (!(skb = buffer_info->skb))
3982 skb = dev_alloc_skb(bufsz); 3982 skb = netdev_alloc_skb(netdev, bufsz);
3983 else { 3983 else {
3984 skb_trim(skb, 0); 3984 skb_trim(skb, 0);
3985 goto map_skb; 3985 goto map_skb;
@@ -3997,7 +3997,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
3997 DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes " 3997 DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
3998 "at %p\n", bufsz, skb->data); 3998 "at %p\n", bufsz, skb->data);
3999 /* Try again, without freeing the previous */ 3999 /* Try again, without freeing the previous */
4000 skb = dev_alloc_skb(bufsz); 4000 skb = netdev_alloc_skb(netdev, bufsz);
4001 /* Failed allocation, critical failure */ 4001 /* Failed allocation, critical failure */
4002 if (!skb) { 4002 if (!skb) {
4003 dev_kfree_skb(oldskb); 4003 dev_kfree_skb(oldskb);
@@ -4121,7 +4121,8 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
4121 rx_desc->read.buffer_addr[j+1] = ~0; 4121 rx_desc->read.buffer_addr[j+1] = ~0;
4122 } 4122 }
4123 4123
4124 skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN); 4124 skb = netdev_alloc_skb(netdev,
4125 adapter->rx_ps_bsize0 + NET_IP_ALIGN);
4125 4126
4126 if (unlikely(!skb)) { 4127 if (unlikely(!skb)) {
4127 adapter->alloc_rx_buff_failed++; 4128 adapter->alloc_rx_buff_failed++;
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index c3e52c806b13..06440a86baef 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -177,6 +177,7 @@ struct myri10ge_priv {
177 struct work_struct watchdog_work; 177 struct work_struct watchdog_work;
178 struct timer_list watchdog_timer; 178 struct timer_list watchdog_timer;
179 int watchdog_tx_done; 179 int watchdog_tx_done;
180 int watchdog_tx_req;
180 int watchdog_resets; 181 int watchdog_resets;
181 int tx_linearized; 182 int tx_linearized;
182 int pause; 183 int pause;
@@ -448,6 +449,7 @@ static int myri10ge_load_hotplug_firmware(struct myri10ge_priv *mgp, u32 * size)
448 struct mcp_gen_header *hdr; 449 struct mcp_gen_header *hdr;
449 size_t hdr_offset; 450 size_t hdr_offset;
450 int status; 451 int status;
452 unsigned i;
451 453
452 if ((status = request_firmware(&fw, mgp->fw_name, dev)) < 0) { 454 if ((status = request_firmware(&fw, mgp->fw_name, dev)) < 0) {
453 dev_err(dev, "Unable to load %s firmware image via hotplug\n", 455 dev_err(dev, "Unable to load %s firmware image via hotplug\n",
@@ -479,18 +481,12 @@ static int myri10ge_load_hotplug_firmware(struct myri10ge_priv *mgp, u32 * size)
479 goto abort_with_fw; 481 goto abort_with_fw;
480 482
481 crc = crc32(~0, fw->data, fw->size); 483 crc = crc32(~0, fw->data, fw->size);
482 if (mgp->tx.boundary == 2048) { 484 for (i = 0; i < fw->size; i += 256) {
483 /* Avoid PCI burst on chipset with unaligned completions. */ 485 myri10ge_pio_copy(mgp->sram + MYRI10GE_FW_OFFSET + i,
484 int i; 486 fw->data + i,
485 __iomem u32 *ptr = (__iomem u32 *) (mgp->sram + 487 min(256U, (unsigned)(fw->size - i)));
486 MYRI10GE_FW_OFFSET); 488 mb();
487 for (i = 0; i < fw->size / 4; i++) { 489 readb(mgp->sram);
488 __raw_writel(((u32 *) fw->data)[i], ptr + i);
489 wmb();
490 }
491 } else {
492 myri10ge_pio_copy(mgp->sram + MYRI10GE_FW_OFFSET, fw->data,
493 fw->size);
494 } 490 }
495 /* corruption checking is good for parity recovery and buggy chipset */ 491 /* corruption checking is good for parity recovery and buggy chipset */
496 memcpy_fromio(fw->data, mgp->sram + MYRI10GE_FW_OFFSET, fw->size); 492 memcpy_fromio(fw->data, mgp->sram + MYRI10GE_FW_OFFSET, fw->size);
@@ -2547,7 +2543,8 @@ static void myri10ge_watchdog_timer(unsigned long arg)
2547 2543
2548 mgp = (struct myri10ge_priv *)arg; 2544 mgp = (struct myri10ge_priv *)arg;
2549 if (mgp->tx.req != mgp->tx.done && 2545 if (mgp->tx.req != mgp->tx.done &&
2550 mgp->tx.done == mgp->watchdog_tx_done) 2546 mgp->tx.done == mgp->watchdog_tx_done &&
2547 mgp->watchdog_tx_req != mgp->watchdog_tx_done)
2551 /* nic seems like it might be stuck.. */ 2548 /* nic seems like it might be stuck.. */
2552 schedule_work(&mgp->watchdog_work); 2549 schedule_work(&mgp->watchdog_work);
2553 else 2550 else
@@ -2556,6 +2553,7 @@ static void myri10ge_watchdog_timer(unsigned long arg)
2556 jiffies + myri10ge_watchdog_timeout * HZ); 2553 jiffies + myri10ge_watchdog_timeout * HZ);
2557 2554
2558 mgp->watchdog_tx_done = mgp->tx.done; 2555 mgp->watchdog_tx_done = mgp->tx.done;
2556 mgp->watchdog_tx_req = mgp->tx.req;
2559} 2557}
2560 2558
2561static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 2559static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 7d5c2233c252..f5aad77288f9 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -419,9 +419,8 @@ void phy_start_machine(struct phy_device *phydev,
419 419
420/* phy_stop_machine 420/* phy_stop_machine
421 * 421 *
422 * description: Stops the state machine timer, sets the state to 422 * description: Stops the state machine timer, sets the state to UP
423 * UP (unless it wasn't up yet), and then frees the interrupt, 423 * (unless it wasn't up yet). This function must be called BEFORE
424 * if it is in use. This function must be called BEFORE
425 * phy_detach. 424 * phy_detach.
426 */ 425 */
427void phy_stop_machine(struct phy_device *phydev) 426void phy_stop_machine(struct phy_device *phydev)
@@ -433,9 +432,6 @@ void phy_stop_machine(struct phy_device *phydev)
433 phydev->state = PHY_UP; 432 phydev->state = PHY_UP;
434 spin_unlock(&phydev->lock); 433 spin_unlock(&phydev->lock);
435 434
436 if (phydev->irq != PHY_POLL)
437 phy_stop_interrupts(phydev);
438
439 phydev->adjust_state = NULL; 435 phydev->adjust_state = NULL;
440} 436}
441 437
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index e1fe3a0a7b0b..132ed32bce1a 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -76,7 +76,7 @@
76#include "s2io.h" 76#include "s2io.h"
77#include "s2io-regs.h" 77#include "s2io-regs.h"
78 78
79#define DRV_VERSION "2.0.14.2" 79#define DRV_VERSION "2.0.15.2"
80 80
81/* S2io Driver name & version. */ 81/* S2io Driver name & version. */
82static char s2io_driver_name[] = "Neterion"; 82static char s2io_driver_name[] = "Neterion";
@@ -370,38 +370,50 @@ static const u64 fix_mac[] = {
370 END_SIGN 370 END_SIGN
371}; 371};
372 372
373MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
374MODULE_LICENSE("GPL");
375MODULE_VERSION(DRV_VERSION);
376
377
373/* Module Loadable parameters. */ 378/* Module Loadable parameters. */
374static unsigned int tx_fifo_num = 1; 379S2IO_PARM_INT(tx_fifo_num, 1);
375static unsigned int tx_fifo_len[MAX_TX_FIFOS] = 380S2IO_PARM_INT(rx_ring_num, 1);
376 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN}; 381
377static unsigned int rx_ring_num = 1; 382
378static unsigned int rx_ring_sz[MAX_RX_RINGS] = 383S2IO_PARM_INT(rx_ring_mode, 1);
379 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT}; 384S2IO_PARM_INT(use_continuous_tx_intrs, 1);
380static unsigned int rts_frm_len[MAX_RX_RINGS] = 385S2IO_PARM_INT(rmac_pause_time, 0x100);
381 {[0 ...(MAX_RX_RINGS - 1)] = 0 }; 386S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
382static unsigned int rx_ring_mode = 1; 387S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
383static unsigned int use_continuous_tx_intrs = 1; 388S2IO_PARM_INT(shared_splits, 0);
384static unsigned int rmac_pause_time = 0x100; 389S2IO_PARM_INT(tmac_util_period, 5);
385static unsigned int mc_pause_threshold_q0q3 = 187; 390S2IO_PARM_INT(rmac_util_period, 5);
386static unsigned int mc_pause_threshold_q4q7 = 187; 391S2IO_PARM_INT(bimodal, 0);
387static unsigned int shared_splits; 392S2IO_PARM_INT(l3l4hdr_size, 128);
388static unsigned int tmac_util_period = 5;
389static unsigned int rmac_util_period = 5;
390static unsigned int bimodal = 0;
391static unsigned int l3l4hdr_size = 128;
392#ifndef CONFIG_S2IO_NAPI
393static unsigned int indicate_max_pkts;
394#endif
395/* Frequency of Rx desc syncs expressed as power of 2 */ 393/* Frequency of Rx desc syncs expressed as power of 2 */
396static unsigned int rxsync_frequency = 3; 394S2IO_PARM_INT(rxsync_frequency, 3);
397/* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */ 395/* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
398static unsigned int intr_type = 0; 396S2IO_PARM_INT(intr_type, 0);
399/* Large receive offload feature */ 397/* Large receive offload feature */
400static unsigned int lro = 0; 398S2IO_PARM_INT(lro, 0);
401/* Max pkts to be aggregated by LRO at one time. If not specified, 399/* Max pkts to be aggregated by LRO at one time. If not specified,
402 * aggregation happens until we hit max IP pkt size(64K) 400 * aggregation happens until we hit max IP pkt size(64K)
403 */ 401 */
404static unsigned int lro_max_pkts = 0xFFFF; 402S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
403#ifndef CONFIG_S2IO_NAPI
404S2IO_PARM_INT(indicate_max_pkts, 0);
405#endif
406
407static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
408 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
409static unsigned int rx_ring_sz[MAX_RX_RINGS] =
410 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
411static unsigned int rts_frm_len[MAX_RX_RINGS] =
412 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
413
414module_param_array(tx_fifo_len, uint, NULL, 0);
415module_param_array(rx_ring_sz, uint, NULL, 0);
416module_param_array(rts_frm_len, uint, NULL, 0);
405 417
406/* 418/*
407 * S2IO device table. 419 * S2IO device table.
@@ -464,10 +476,9 @@ static int init_shared_mem(struct s2io_nic *nic)
464 size += config->tx_cfg[i].fifo_len; 476 size += config->tx_cfg[i].fifo_len;
465 } 477 }
466 if (size > MAX_AVAILABLE_TXDS) { 478 if (size > MAX_AVAILABLE_TXDS) {
467 DBG_PRINT(ERR_DBG, "%s: Requested TxDs too high, ", 479 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
468 __FUNCTION__);
469 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size); 480 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
470 return FAILURE; 481 return -EINVAL;
471 } 482 }
472 483
473 lst_size = (sizeof(TxD_t) * config->max_txds); 484 lst_size = (sizeof(TxD_t) * config->max_txds);
@@ -547,6 +558,7 @@ static int init_shared_mem(struct s2io_nic *nic)
547 nic->ufo_in_band_v = kmalloc((sizeof(u64) * size), GFP_KERNEL); 558 nic->ufo_in_band_v = kmalloc((sizeof(u64) * size), GFP_KERNEL);
548 if (!nic->ufo_in_band_v) 559 if (!nic->ufo_in_band_v)
549 return -ENOMEM; 560 return -ENOMEM;
561 memset(nic->ufo_in_band_v, 0, size);
550 562
551 /* Allocation and initialization of RXDs in Rings */ 563 /* Allocation and initialization of RXDs in Rings */
552 size = 0; 564 size = 0;
@@ -1213,7 +1225,7 @@ static int init_nic(struct s2io_nic *nic)
1213 break; 1225 break;
1214 } 1226 }
1215 1227
1216 /* Enable Tx FIFO partition 0. */ 1228 /* Enable all configured Tx FIFO partitions */
1217 val64 = readq(&bar0->tx_fifo_partition_0); 1229 val64 = readq(&bar0->tx_fifo_partition_0);
1218 val64 |= (TX_FIFO_PARTITION_EN); 1230 val64 |= (TX_FIFO_PARTITION_EN);
1219 writeq(val64, &bar0->tx_fifo_partition_0); 1231 writeq(val64, &bar0->tx_fifo_partition_0);
@@ -1650,7 +1662,7 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1650 writeq(temp64, &bar0->general_int_mask); 1662 writeq(temp64, &bar0->general_int_mask);
1651 /* 1663 /*
1652 * If Hercules adapter enable GPIO otherwise 1664 * If Hercules adapter enable GPIO otherwise
1653 * disabled all PCIX, Flash, MDIO, IIC and GPIO 1665 * disable all PCIX, Flash, MDIO, IIC and GPIO
1654 * interrupts for now. 1666 * interrupts for now.
1655 * TODO 1667 * TODO
1656 */ 1668 */
@@ -2119,7 +2131,7 @@ static struct sk_buff *s2io_txdl_getskb(fifo_info_t *fifo_data, TxD_t *txdlp, in
2119 frag->size, PCI_DMA_TODEVICE); 2131 frag->size, PCI_DMA_TODEVICE);
2120 } 2132 }
2121 } 2133 }
2122 txdlp->Host_Control = 0; 2134 memset(txdlp,0, (sizeof(TxD_t) * fifo_data->max_txds));
2123 return(skb); 2135 return(skb);
2124} 2136}
2125 2137
@@ -2371,9 +2383,14 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2371 skb->data = (void *) (unsigned long)tmp; 2383 skb->data = (void *) (unsigned long)tmp;
2372 skb->tail = (void *) (unsigned long)tmp; 2384 skb->tail = (void *) (unsigned long)tmp;
2373 2385
2374 ((RxD3_t*)rxdp)->Buffer0_ptr = 2386 if (!(((RxD3_t*)rxdp)->Buffer0_ptr))
2375 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN, 2387 ((RxD3_t*)rxdp)->Buffer0_ptr =
2388 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2376 PCI_DMA_FROMDEVICE); 2389 PCI_DMA_FROMDEVICE);
2390 else
2391 pci_dma_sync_single_for_device(nic->pdev,
2392 (dma_addr_t) ((RxD3_t*)rxdp)->Buffer0_ptr,
2393 BUF0_LEN, PCI_DMA_FROMDEVICE);
2377 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); 2394 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2378 if (nic->rxd_mode == RXD_MODE_3B) { 2395 if (nic->rxd_mode == RXD_MODE_3B) {
2379 /* Two buffer mode */ 2396 /* Two buffer mode */
@@ -2386,10 +2403,13 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2386 (nic->pdev, skb->data, dev->mtu + 4, 2403 (nic->pdev, skb->data, dev->mtu + 4,
2387 PCI_DMA_FROMDEVICE); 2404 PCI_DMA_FROMDEVICE);
2388 2405
2389 /* Buffer-1 will be dummy buffer not used */ 2406 /* Buffer-1 will be dummy buffer. Not used */
2390 ((RxD3_t*)rxdp)->Buffer1_ptr = 2407 if (!(((RxD3_t*)rxdp)->Buffer1_ptr)) {
2391 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN, 2408 ((RxD3_t*)rxdp)->Buffer1_ptr =
2392 PCI_DMA_FROMDEVICE); 2409 pci_map_single(nic->pdev,
2410 ba->ba_1, BUF1_LEN,
2411 PCI_DMA_FROMDEVICE);
2412 }
2393 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); 2413 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2394 rxdp->Control_2 |= SET_BUFFER2_SIZE_3 2414 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2395 (dev->mtu + 4); 2415 (dev->mtu + 4);
@@ -2614,23 +2634,23 @@ no_rx:
2614} 2634}
2615#endif 2635#endif
2616 2636
2637#ifdef CONFIG_NET_POLL_CONTROLLER
2617/** 2638/**
2618 * s2io_netpoll - Rx interrupt service handler for netpoll support 2639 * s2io_netpoll - netpoll event handler entry point
2619 * @dev : pointer to the device structure. 2640 * @dev : pointer to the device structure.
2620 * Description: 2641 * Description:
2621 * Polling 'interrupt' - used by things like netconsole to send skbs 2642 * This function will be called by upper layer to check for events on the
2622 * without having to re-enable interrupts. It's not called while 2643 * interface in situations where interrupts are disabled. It is used for
2623 * the interrupt routine is executing. 2644 * specific in-kernel networking tasks, such as remote consoles and kernel
2645 * debugging over the network (example netdump in RedHat).
2624 */ 2646 */
2625
2626#ifdef CONFIG_NET_POLL_CONTROLLER
2627static void s2io_netpoll(struct net_device *dev) 2647static void s2io_netpoll(struct net_device *dev)
2628{ 2648{
2629 nic_t *nic = dev->priv; 2649 nic_t *nic = dev->priv;
2630 mac_info_t *mac_control; 2650 mac_info_t *mac_control;
2631 struct config_param *config; 2651 struct config_param *config;
2632 XENA_dev_config_t __iomem *bar0 = nic->bar0; 2652 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2633 u64 val64; 2653 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2634 int i; 2654 int i;
2635 2655
2636 disable_irq(dev->irq); 2656 disable_irq(dev->irq);
@@ -2639,9 +2659,17 @@ static void s2io_netpoll(struct net_device *dev)
2639 mac_control = &nic->mac_control; 2659 mac_control = &nic->mac_control;
2640 config = &nic->config; 2660 config = &nic->config;
2641 2661
2642 val64 = readq(&bar0->rx_traffic_int);
2643 writeq(val64, &bar0->rx_traffic_int); 2662 writeq(val64, &bar0->rx_traffic_int);
2663 writeq(val64, &bar0->tx_traffic_int);
2644 2664
2665 /* we need to free up the transmitted skbufs or else netpoll will
2666 * run out of skbs and will fail and eventually netpoll application such
2667 * as netdump will fail.
2668 */
2669 for (i = 0; i < config->tx_fifo_num; i++)
2670 tx_intr_handler(&mac_control->fifos[i]);
2671
2672 /* check for received packet and indicate up to network */
2645 for (i = 0; i < config->rx_ring_num; i++) 2673 for (i = 0; i < config->rx_ring_num; i++)
2646 rx_intr_handler(&mac_control->rings[i]); 2674 rx_intr_handler(&mac_control->rings[i]);
2647 2675
@@ -2708,7 +2736,7 @@ static void rx_intr_handler(ring_info_t *ring_data)
2708 /* If your are next to put index then it's FIFO full condition */ 2736 /* If your are next to put index then it's FIFO full condition */
2709 if ((get_block == put_block) && 2737 if ((get_block == put_block) &&
2710 (get_info.offset + 1) == put_info.offset) { 2738 (get_info.offset + 1) == put_info.offset) {
2711 DBG_PRINT(ERR_DBG, "%s: Ring Full\n",dev->name); 2739 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
2712 break; 2740 break;
2713 } 2741 }
2714 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control); 2742 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
@@ -2728,18 +2756,15 @@ static void rx_intr_handler(ring_info_t *ring_data)
2728 HEADER_SNAP_SIZE, 2756 HEADER_SNAP_SIZE,
2729 PCI_DMA_FROMDEVICE); 2757 PCI_DMA_FROMDEVICE);
2730 } else if (nic->rxd_mode == RXD_MODE_3B) { 2758 } else if (nic->rxd_mode == RXD_MODE_3B) {
2731 pci_unmap_single(nic->pdev, (dma_addr_t) 2759 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2732 ((RxD3_t*)rxdp)->Buffer0_ptr, 2760 ((RxD3_t*)rxdp)->Buffer0_ptr,
2733 BUF0_LEN, PCI_DMA_FROMDEVICE); 2761 BUF0_LEN, PCI_DMA_FROMDEVICE);
2734 pci_unmap_single(nic->pdev, (dma_addr_t) 2762 pci_unmap_single(nic->pdev, (dma_addr_t)
2735 ((RxD3_t*)rxdp)->Buffer1_ptr,
2736 BUF1_LEN, PCI_DMA_FROMDEVICE);
2737 pci_unmap_single(nic->pdev, (dma_addr_t)
2738 ((RxD3_t*)rxdp)->Buffer2_ptr, 2763 ((RxD3_t*)rxdp)->Buffer2_ptr,
2739 dev->mtu + 4, 2764 dev->mtu + 4,
2740 PCI_DMA_FROMDEVICE); 2765 PCI_DMA_FROMDEVICE);
2741 } else { 2766 } else {
2742 pci_unmap_single(nic->pdev, (dma_addr_t) 2767 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2743 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN, 2768 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN,
2744 PCI_DMA_FROMDEVICE); 2769 PCI_DMA_FROMDEVICE);
2745 pci_unmap_single(nic->pdev, (dma_addr_t) 2770 pci_unmap_single(nic->pdev, (dma_addr_t)
@@ -3327,7 +3352,7 @@ static void s2io_reset(nic_t * sp)
3327 3352
3328 /* Clear certain PCI/PCI-X fields after reset */ 3353 /* Clear certain PCI/PCI-X fields after reset */
3329 if (sp->device_type == XFRAME_II_DEVICE) { 3354 if (sp->device_type == XFRAME_II_DEVICE) {
3330 /* Clear parity err detect bit */ 3355 /* Clear "detected parity error" bit */
3331 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000); 3356 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3332 3357
3333 /* Clearing PCIX Ecc status register */ 3358 /* Clearing PCIX Ecc status register */
@@ -3528,7 +3553,7 @@ static void restore_xmsi_data(nic_t *nic)
3528 u64 val64; 3553 u64 val64;
3529 int i; 3554 int i;
3530 3555
3531 for (i=0; i< nic->avail_msix_vectors; i++) { 3556 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3532 writeq(nic->msix_info[i].addr, &bar0->xmsi_address); 3557 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3533 writeq(nic->msix_info[i].data, &bar0->xmsi_data); 3558 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3534 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6)); 3559 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
@@ -3547,7 +3572,7 @@ static void store_xmsi_data(nic_t *nic)
3547 int i; 3572 int i;
3548 3573
3549 /* Store and display */ 3574 /* Store and display */
3550 for (i=0; i< nic->avail_msix_vectors; i++) { 3575 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3551 val64 = (BIT(15) | vBIT(i, 26, 6)); 3576 val64 = (BIT(15) | vBIT(i, 26, 6));
3552 writeq(val64, &bar0->xmsi_access); 3577 writeq(val64, &bar0->xmsi_access);
3553 if (wait_for_msix_trans(nic, i)) { 3578 if (wait_for_msix_trans(nic, i)) {
@@ -3808,13 +3833,11 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3808 TxD_t *txdp; 3833 TxD_t *txdp;
3809 TxFIFO_element_t __iomem *tx_fifo; 3834 TxFIFO_element_t __iomem *tx_fifo;
3810 unsigned long flags; 3835 unsigned long flags;
3811#ifdef NETIF_F_TSO
3812 int mss;
3813#endif
3814 u16 vlan_tag = 0; 3836 u16 vlan_tag = 0;
3815 int vlan_priority = 0; 3837 int vlan_priority = 0;
3816 mac_info_t *mac_control; 3838 mac_info_t *mac_control;
3817 struct config_param *config; 3839 struct config_param *config;
3840 int offload_type;
3818 3841
3819 mac_control = &sp->mac_control; 3842 mac_control = &sp->mac_control;
3820 config = &sp->config; 3843 config = &sp->config;
@@ -3862,13 +3885,11 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3862 return 0; 3885 return 0;
3863 } 3886 }
3864 3887
3865 txdp->Control_1 = 0; 3888 offload_type = s2io_offload_type(skb);
3866 txdp->Control_2 = 0;
3867#ifdef NETIF_F_TSO 3889#ifdef NETIF_F_TSO
3868 mss = skb_shinfo(skb)->gso_size; 3890 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3869 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3870 txdp->Control_1 |= TXD_TCP_LSO_EN; 3891 txdp->Control_1 |= TXD_TCP_LSO_EN;
3871 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss); 3892 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
3872 } 3893 }
3873#endif 3894#endif
3874 if (skb->ip_summed == CHECKSUM_HW) { 3895 if (skb->ip_summed == CHECKSUM_HW) {
@@ -3886,10 +3907,10 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3886 } 3907 }
3887 3908
3888 frg_len = skb->len - skb->data_len; 3909 frg_len = skb->len - skb->data_len;
3889 if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) { 3910 if (offload_type == SKB_GSO_UDP) {
3890 int ufo_size; 3911 int ufo_size;
3891 3912
3892 ufo_size = skb_shinfo(skb)->gso_size; 3913 ufo_size = s2io_udp_mss(skb);
3893 ufo_size &= ~7; 3914 ufo_size &= ~7;
3894 txdp->Control_1 |= TXD_UFO_EN; 3915 txdp->Control_1 |= TXD_UFO_EN;
3895 txdp->Control_1 |= TXD_UFO_MSS(ufo_size); 3916 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
@@ -3906,16 +3927,13 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3906 sp->ufo_in_band_v, 3927 sp->ufo_in_band_v,
3907 sizeof(u64), PCI_DMA_TODEVICE); 3928 sizeof(u64), PCI_DMA_TODEVICE);
3908 txdp++; 3929 txdp++;
3909 txdp->Control_1 = 0;
3910 txdp->Control_2 = 0;
3911 } 3930 }
3912 3931
3913 txdp->Buffer_Pointer = pci_map_single 3932 txdp->Buffer_Pointer = pci_map_single
3914 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE); 3933 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
3915 txdp->Host_Control = (unsigned long) skb; 3934 txdp->Host_Control = (unsigned long) skb;
3916 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len); 3935 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
3917 3936 if (offload_type == SKB_GSO_UDP)
3918 if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP)
3919 txdp->Control_1 |= TXD_UFO_EN; 3937 txdp->Control_1 |= TXD_UFO_EN;
3920 3938
3921 frg_cnt = skb_shinfo(skb)->nr_frags; 3939 frg_cnt = skb_shinfo(skb)->nr_frags;
@@ -3930,12 +3948,12 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3930 (sp->pdev, frag->page, frag->page_offset, 3948 (sp->pdev, frag->page, frag->page_offset,
3931 frag->size, PCI_DMA_TODEVICE); 3949 frag->size, PCI_DMA_TODEVICE);
3932 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size); 3950 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
3933 if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) 3951 if (offload_type == SKB_GSO_UDP)
3934 txdp->Control_1 |= TXD_UFO_EN; 3952 txdp->Control_1 |= TXD_UFO_EN;
3935 } 3953 }
3936 txdp->Control_1 |= TXD_GATHER_CODE_LAST; 3954 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
3937 3955
3938 if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) 3956 if (offload_type == SKB_GSO_UDP)
3939 frg_cnt++; /* as Txd0 was used for inband header */ 3957 frg_cnt++; /* as Txd0 was used for inband header */
3940 3958
3941 tx_fifo = mac_control->tx_FIFO_start[queue]; 3959 tx_fifo = mac_control->tx_FIFO_start[queue];
@@ -3944,13 +3962,9 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3944 3962
3945 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST | 3963 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
3946 TX_FIFO_LAST_LIST); 3964 TX_FIFO_LAST_LIST);
3947 3965 if (offload_type)
3948#ifdef NETIF_F_TSO
3949 if (mss)
3950 val64 |= TX_FIFO_SPECIAL_FUNC;
3951#endif
3952 if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP)
3953 val64 |= TX_FIFO_SPECIAL_FUNC; 3966 val64 |= TX_FIFO_SPECIAL_FUNC;
3967
3954 writeq(val64, &tx_fifo->List_Control); 3968 writeq(val64, &tx_fifo->List_Control);
3955 3969
3956 mmiowb(); 3970 mmiowb();
@@ -3984,13 +3998,41 @@ s2io_alarm_handle(unsigned long data)
3984 mod_timer(&sp->alarm_timer, jiffies + HZ / 2); 3998 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
3985} 3999}
3986 4000
4001static int s2io_chk_rx_buffers(nic_t *sp, int rng_n)
4002{
4003 int rxb_size, level;
4004
4005 if (!sp->lro) {
4006 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
4007 level = rx_buffer_level(sp, rxb_size, rng_n);
4008
4009 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4010 int ret;
4011 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4012 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4013 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
4014 DBG_PRINT(ERR_DBG, "Out of memory in %s",
4015 __FUNCTION__);
4016 clear_bit(0, (&sp->tasklet_status));
4017 return -1;
4018 }
4019 clear_bit(0, (&sp->tasklet_status));
4020 } else if (level == LOW)
4021 tasklet_schedule(&sp->task);
4022
4023 } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4024 DBG_PRINT(ERR_DBG, "%s:Out of memory", sp->dev->name);
4025 DBG_PRINT(ERR_DBG, " in Rx Intr!!\n");
4026 }
4027 return 0;
4028}
4029
3987static irqreturn_t 4030static irqreturn_t
3988s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs) 4031s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs)
3989{ 4032{
3990 struct net_device *dev = (struct net_device *) dev_id; 4033 struct net_device *dev = (struct net_device *) dev_id;
3991 nic_t *sp = dev->priv; 4034 nic_t *sp = dev->priv;
3992 int i; 4035 int i;
3993 int ret;
3994 mac_info_t *mac_control; 4036 mac_info_t *mac_control;
3995 struct config_param *config; 4037 struct config_param *config;
3996 4038
@@ -4012,35 +4054,8 @@ s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs)
4012 * reallocate the buffers from the interrupt handler itself, 4054 * reallocate the buffers from the interrupt handler itself,
4013 * else schedule a tasklet to reallocate the buffers. 4055 * else schedule a tasklet to reallocate the buffers.
4014 */ 4056 */
4015 for (i = 0; i < config->rx_ring_num; i++) { 4057 for (i = 0; i < config->rx_ring_num; i++)
4016 if (!sp->lro) { 4058 s2io_chk_rx_buffers(sp, i);
4017 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
4018 int level = rx_buffer_level(sp, rxb_size, i);
4019
4020 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4021 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ",
4022 dev->name);
4023 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4024 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
4025 DBG_PRINT(ERR_DBG, "%s:Out of memory",
4026 dev->name);
4027 DBG_PRINT(ERR_DBG, " in ISR!!\n");
4028 clear_bit(0, (&sp->tasklet_status));
4029 atomic_dec(&sp->isr_cnt);
4030 return IRQ_HANDLED;
4031 }
4032 clear_bit(0, (&sp->tasklet_status));
4033 } else if (level == LOW) {
4034 tasklet_schedule(&sp->task);
4035 }
4036 }
4037 else if (fill_rx_buffers(sp, i) == -ENOMEM) {
4038 DBG_PRINT(ERR_DBG, "%s:Out of memory",
4039 dev->name);
4040 DBG_PRINT(ERR_DBG, " in Rx Intr!!\n");
4041 break;
4042 }
4043 }
4044 4059
4045 atomic_dec(&sp->isr_cnt); 4060 atomic_dec(&sp->isr_cnt);
4046 return IRQ_HANDLED; 4061 return IRQ_HANDLED;
@@ -4051,39 +4066,13 @@ s2io_msix_ring_handle(int irq, void *dev_id, struct pt_regs *regs)
4051{ 4066{
4052 ring_info_t *ring = (ring_info_t *)dev_id; 4067 ring_info_t *ring = (ring_info_t *)dev_id;
4053 nic_t *sp = ring->nic; 4068 nic_t *sp = ring->nic;
4054 struct net_device *dev = (struct net_device *) dev_id;
4055 int rxb_size, level, rng_n;
4056 4069
4057 atomic_inc(&sp->isr_cnt); 4070 atomic_inc(&sp->isr_cnt);
4058 rx_intr_handler(ring);
4059
4060 rng_n = ring->ring_no;
4061 if (!sp->lro) {
4062 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
4063 level = rx_buffer_level(sp, rxb_size, rng_n);
4064 4071
4065 if ((level == PANIC) && (!TASKLET_IN_USE)) { 4072 rx_intr_handler(ring);
4066 int ret; 4073 s2io_chk_rx_buffers(sp, ring->ring_no);
4067 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4068 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4069 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
4070 DBG_PRINT(ERR_DBG, "Out of memory in %s",
4071 __FUNCTION__);
4072 clear_bit(0, (&sp->tasklet_status));
4073 return IRQ_HANDLED;
4074 }
4075 clear_bit(0, (&sp->tasklet_status));
4076 } else if (level == LOW) {
4077 tasklet_schedule(&sp->task);
4078 }
4079 }
4080 else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4081 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
4082 DBG_PRINT(ERR_DBG, " in Rx Intr!!\n");
4083 }
4084 4074
4085 atomic_dec(&sp->isr_cnt); 4075 atomic_dec(&sp->isr_cnt);
4086
4087 return IRQ_HANDLED; 4076 return IRQ_HANDLED;
4088} 4077}
4089 4078
@@ -4248,37 +4237,8 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
4248 * else schedule a tasklet to reallocate the buffers. 4237 * else schedule a tasklet to reallocate the buffers.
4249 */ 4238 */
4250#ifndef CONFIG_S2IO_NAPI 4239#ifndef CONFIG_S2IO_NAPI
4251 for (i = 0; i < config->rx_ring_num; i++) { 4240 for (i = 0; i < config->rx_ring_num; i++)
4252 if (!sp->lro) { 4241 s2io_chk_rx_buffers(sp, i);
4253 int ret;
4254 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
4255 int level = rx_buffer_level(sp, rxb_size, i);
4256
4257 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4258 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ",
4259 dev->name);
4260 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4261 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
4262 DBG_PRINT(ERR_DBG, "%s:Out of memory",
4263 dev->name);
4264 DBG_PRINT(ERR_DBG, " in ISR!!\n");
4265 clear_bit(0, (&sp->tasklet_status));
4266 atomic_dec(&sp->isr_cnt);
4267 writeq(org_mask, &bar0->general_int_mask);
4268 return IRQ_HANDLED;
4269 }
4270 clear_bit(0, (&sp->tasklet_status));
4271 } else if (level == LOW) {
4272 tasklet_schedule(&sp->task);
4273 }
4274 }
4275 else if (fill_rx_buffers(sp, i) == -ENOMEM) {
4276 DBG_PRINT(ERR_DBG, "%s:Out of memory",
4277 dev->name);
4278 DBG_PRINT(ERR_DBG, " in Rx intr!!\n");
4279 break;
4280 }
4281 }
4282#endif 4242#endif
4283 writeq(org_mask, &bar0->general_int_mask); 4243 writeq(org_mask, &bar0->general_int_mask);
4284 atomic_dec(&sp->isr_cnt); 4244 atomic_dec(&sp->isr_cnt);
@@ -4308,6 +4268,8 @@ static void s2io_updt_stats(nic_t *sp)
4308 if (cnt == 5) 4268 if (cnt == 5)
4309 break; /* Updt failed */ 4269 break; /* Updt failed */
4310 } while(1); 4270 } while(1);
4271 } else {
4272 memset(sp->mac_control.stats_info, 0, sizeof(StatInfo_t));
4311 } 4273 }
4312} 4274}
4313 4275
@@ -4942,7 +4904,8 @@ static int write_eeprom(nic_t * sp, int off, u64 data, int cnt)
4942} 4904}
4943static void s2io_vpd_read(nic_t *nic) 4905static void s2io_vpd_read(nic_t *nic)
4944{ 4906{
4945 u8 vpd_data[256],data; 4907 u8 *vpd_data;
4908 u8 data;
4946 int i=0, cnt, fail = 0; 4909 int i=0, cnt, fail = 0;
4947 int vpd_addr = 0x80; 4910 int vpd_addr = 0x80;
4948 4911
@@ -4955,6 +4918,10 @@ static void s2io_vpd_read(nic_t *nic)
4955 vpd_addr = 0x50; 4918 vpd_addr = 0x50;
4956 } 4919 }
4957 4920
4921 vpd_data = kmalloc(256, GFP_KERNEL);
4922 if (!vpd_data)
4923 return;
4924
4958 for (i = 0; i < 256; i +=4 ) { 4925 for (i = 0; i < 256; i +=4 ) {
4959 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i); 4926 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
4960 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data); 4927 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
@@ -4977,6 +4944,7 @@ static void s2io_vpd_read(nic_t *nic)
4977 memset(nic->product_name, 0, vpd_data[1]); 4944 memset(nic->product_name, 0, vpd_data[1]);
4978 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]); 4945 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
4979 } 4946 }
4947 kfree(vpd_data);
4980} 4948}
4981 4949
4982/** 4950/**
@@ -5295,7 +5263,7 @@ static int s2io_link_test(nic_t * sp, uint64_t * data)
5295 else 5263 else
5296 *data = 0; 5264 *data = 0;
5297 5265
5298 return 0; 5266 return *data;
5299} 5267}
5300 5268
5301/** 5269/**
@@ -5753,6 +5721,19 @@ static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
5753 return 0; 5721 return 0;
5754} 5722}
5755 5723
5724static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
5725{
5726 return (dev->features & NETIF_F_TSO) != 0;
5727}
5728static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
5729{
5730 if (data)
5731 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
5732 else
5733 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
5734
5735 return 0;
5736}
5756 5737
5757static struct ethtool_ops netdev_ethtool_ops = { 5738static struct ethtool_ops netdev_ethtool_ops = {
5758 .get_settings = s2io_ethtool_gset, 5739 .get_settings = s2io_ethtool_gset,
@@ -5773,8 +5754,8 @@ static struct ethtool_ops netdev_ethtool_ops = {
5773 .get_sg = ethtool_op_get_sg, 5754 .get_sg = ethtool_op_get_sg,
5774 .set_sg = ethtool_op_set_sg, 5755 .set_sg = ethtool_op_set_sg,
5775#ifdef NETIF_F_TSO 5756#ifdef NETIF_F_TSO
5776 .get_tso = ethtool_op_get_tso, 5757 .get_tso = s2io_ethtool_op_get_tso,
5777 .set_tso = ethtool_op_set_tso, 5758 .set_tso = s2io_ethtool_op_set_tso,
5778#endif 5759#endif
5779 .get_ufo = ethtool_op_get_ufo, 5760 .get_ufo = ethtool_op_get_ufo,
5780 .set_ufo = ethtool_op_set_ufo, 5761 .set_ufo = ethtool_op_set_ufo,
@@ -6337,7 +6318,7 @@ static int s2io_card_up(nic_t * sp)
6337 s2io_set_multicast(dev); 6318 s2io_set_multicast(dev);
6338 6319
6339 if (sp->lro) { 6320 if (sp->lro) {
6340 /* Initialize max aggregatable pkts based on MTU */ 6321 /* Initialize max aggregatable pkts per session based on MTU */
6341 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu; 6322 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
6342 /* Check if we can use(if specified) user provided value */ 6323 /* Check if we can use(if specified) user provided value */
6343 if (lro_max_pkts < sp->lro_max_aggr_per_sess) 6324 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
@@ -6438,7 +6419,7 @@ static void s2io_tx_watchdog(struct net_device *dev)
6438 * @cksum : FCS checksum of the frame. 6419 * @cksum : FCS checksum of the frame.
6439 * @ring_no : the ring from which this RxD was extracted. 6420 * @ring_no : the ring from which this RxD was extracted.
6440 * Description: 6421 * Description:
6441 * This function is called by the Tx interrupt serivce routine to perform 6422 * This function is called by the Rx interrupt serivce routine to perform
6442 * some OS related operations on the SKB before passing it to the upper 6423 * some OS related operations on the SKB before passing it to the upper
6443 * layers. It mainly checks if the checksum is OK, if so adds it to the 6424 * layers. It mainly checks if the checksum is OK, if so adds it to the
6444 * SKBs cksum variable, increments the Rx packet count and passes the SKB 6425 * SKBs cksum variable, increments the Rx packet count and passes the SKB
@@ -6698,33 +6679,6 @@ static void s2io_init_pci(nic_t * sp)
6698 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd); 6679 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
6699} 6680}
6700 6681
6701MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
6702MODULE_LICENSE("GPL");
6703MODULE_VERSION(DRV_VERSION);
6704
6705module_param(tx_fifo_num, int, 0);
6706module_param(rx_ring_num, int, 0);
6707module_param(rx_ring_mode, int, 0);
6708module_param_array(tx_fifo_len, uint, NULL, 0);
6709module_param_array(rx_ring_sz, uint, NULL, 0);
6710module_param_array(rts_frm_len, uint, NULL, 0);
6711module_param(use_continuous_tx_intrs, int, 1);
6712module_param(rmac_pause_time, int, 0);
6713module_param(mc_pause_threshold_q0q3, int, 0);
6714module_param(mc_pause_threshold_q4q7, int, 0);
6715module_param(shared_splits, int, 0);
6716module_param(tmac_util_period, int, 0);
6717module_param(rmac_util_period, int, 0);
6718module_param(bimodal, bool, 0);
6719module_param(l3l4hdr_size, int , 0);
6720#ifndef CONFIG_S2IO_NAPI
6721module_param(indicate_max_pkts, int, 0);
6722#endif
6723module_param(rxsync_frequency, int, 0);
6724module_param(intr_type, int, 0);
6725module_param(lro, int, 0);
6726module_param(lro_max_pkts, int, 0);
6727
6728static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type) 6682static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
6729{ 6683{
6730 if ( tx_fifo_num > 8) { 6684 if ( tx_fifo_num > 8) {
@@ -6832,8 +6786,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6832 } 6786 }
6833 if (dev_intr_type != MSI_X) { 6787 if (dev_intr_type != MSI_X) {
6834 if (pci_request_regions(pdev, s2io_driver_name)) { 6788 if (pci_request_regions(pdev, s2io_driver_name)) {
6835 DBG_PRINT(ERR_DBG, "Request Regions failed\n"), 6789 DBG_PRINT(ERR_DBG, "Request Regions failed\n");
6836 pci_disable_device(pdev); 6790 pci_disable_device(pdev);
6837 return -ENODEV; 6791 return -ENODEV;
6838 } 6792 }
6839 } 6793 }
@@ -6957,7 +6911,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6957 /* initialize the shared memory used by the NIC and the host */ 6911 /* initialize the shared memory used by the NIC and the host */
6958 if (init_shared_mem(sp)) { 6912 if (init_shared_mem(sp)) {
6959 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", 6913 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
6960 __FUNCTION__); 6914 dev->name);
6961 ret = -ENOMEM; 6915 ret = -ENOMEM;
6962 goto mem_alloc_failed; 6916 goto mem_alloc_failed;
6963 } 6917 }
@@ -7094,6 +7048,9 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7094 dev->addr_len = ETH_ALEN; 7048 dev->addr_len = ETH_ALEN;
7095 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN); 7049 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7096 7050
7051 /* reset Nic and bring it to known state */
7052 s2io_reset(sp);
7053
7097 /* 7054 /*
7098 * Initialize the tasklet status and link state flags 7055 * Initialize the tasklet status and link state flags
7099 * and the card state parameter 7056 * and the card state parameter
@@ -7131,11 +7088,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7131 goto register_failed; 7088 goto register_failed;
7132 } 7089 }
7133 s2io_vpd_read(sp); 7090 s2io_vpd_read(sp);
7134 DBG_PRINT(ERR_DBG, "%s: Neterion %s",dev->name, sp->product_name);
7135 DBG_PRINT(ERR_DBG, "(rev %d), Driver version %s\n",
7136 get_xena_rev_id(sp->pdev),
7137 s2io_driver_version);
7138 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2005 Neterion Inc.\n"); 7091 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2005 Neterion Inc.\n");
7092 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
7093 sp->product_name, get_xena_rev_id(sp->pdev));
7094 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
7095 s2io_driver_version);
7139 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: " 7096 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: "
7140 "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name, 7097 "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name,
7141 sp->def_mac_addr[0].mac_addr[0], 7098 sp->def_mac_addr[0].mac_addr[0],
@@ -7436,8 +7393,13 @@ static int verify_l3_l4_lro_capable(lro_t *l_lro, struct iphdr *ip,
7436 if (ip->ihl != 5) /* IP has options */ 7393 if (ip->ihl != 5) /* IP has options */
7437 return -1; 7394 return -1;
7438 7395
7396 /* If we see CE codepoint in IP header, packet is not mergeable */
7397 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
7398 return -1;
7399
7400 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
7439 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin || 7401 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
7440 !tcp->ack) { 7402 tcp->ece || tcp->cwr || !tcp->ack) {
7441 /* 7403 /*
7442 * Currently recognize only the ack control word and 7404 * Currently recognize only the ack control word and
7443 * any other control field being set would result in 7405 * any other control field being set would result in
@@ -7591,18 +7553,16 @@ static void queue_rx_frame(struct sk_buff *skb)
7591static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, 7553static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb,
7592 u32 tcp_len) 7554 u32 tcp_len)
7593{ 7555{
7594 struct sk_buff *tmp, *first = lro->parent; 7556 struct sk_buff *first = lro->parent;
7595 7557
7596 first->len += tcp_len; 7558 first->len += tcp_len;
7597 first->data_len = lro->frags_len; 7559 first->data_len = lro->frags_len;
7598 skb_pull(skb, (skb->len - tcp_len)); 7560 skb_pull(skb, (skb->len - tcp_len));
7599 if ((tmp = skb_shinfo(first)->frag_list)) { 7561 if (skb_shinfo(first)->frag_list)
7600 while (tmp->next) 7562 lro->last_frag->next = skb;
7601 tmp = tmp->next;
7602 tmp->next = skb;
7603 }
7604 else 7563 else
7605 skb_shinfo(first)->frag_list = skb; 7564 skb_shinfo(first)->frag_list = skb;
7565 lro->last_frag = skb;
7606 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++; 7566 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
7607 return; 7567 return;
7608} 7568}
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 217097bc22f1..5ed49c3be1e9 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -719,6 +719,7 @@ struct msix_info_st {
719/* Data structure to represent a LRO session */ 719/* Data structure to represent a LRO session */
720typedef struct lro { 720typedef struct lro {
721 struct sk_buff *parent; 721 struct sk_buff *parent;
722 struct sk_buff *last_frag;
722 u8 *l2h; 723 u8 *l2h;
723 struct iphdr *iph; 724 struct iphdr *iph;
724 struct tcphdr *tcph; 725 struct tcphdr *tcph;
@@ -1011,4 +1012,13 @@ static void clear_lro_session(lro_t *lro);
1011static void queue_rx_frame(struct sk_buff *skb); 1012static void queue_rx_frame(struct sk_buff *skb);
1012static void update_L3L4_header(nic_t *sp, lro_t *lro); 1013static void update_L3L4_header(nic_t *sp, lro_t *lro);
1013static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, u32 tcp_len); 1014static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, u32 tcp_len);
1015
1016#define s2io_tcp_mss(skb) skb_shinfo(skb)->gso_size
1017#define s2io_udp_mss(skb) skb_shinfo(skb)->gso_size
1018#define s2io_offload_type(skb) skb_shinfo(skb)->gso_type
1019
1020#define S2IO_PARM_INT(X, def_val) \
1021 static unsigned int X = def_val;\
1022 module_param(X , uint, 0);
1023
1014#endif /* _S2IO_H */ 1024#endif /* _S2IO_H */
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 1b8138f641e3..6f97962dd06b 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -68,8 +68,8 @@
68 68
69#define DRV_MODULE_NAME "tg3" 69#define DRV_MODULE_NAME "tg3"
70#define PFX DRV_MODULE_NAME ": " 70#define PFX DRV_MODULE_NAME ": "
71#define DRV_MODULE_VERSION "3.63" 71#define DRV_MODULE_VERSION "3.64"
72#define DRV_MODULE_RELDATE "July 25, 2006" 72#define DRV_MODULE_RELDATE "July 31, 2006"
73 73
74#define TG3_DEF_MAC_MODE 0 74#define TG3_DEF_MAC_MODE 0
75#define TG3_DEF_RX_MODE 0 75#define TG3_DEF_RX_MODE 0
@@ -3097,7 +3097,7 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3097 * Callers depend upon this behavior and assume that 3097 * Callers depend upon this behavior and assume that
3098 * we leave everything unchanged if we fail. 3098 * we leave everything unchanged if we fail.
3099 */ 3099 */
3100 skb = dev_alloc_skb(skb_size); 3100 skb = netdev_alloc_skb(tp->dev, skb_size);
3101 if (skb == NULL) 3101 if (skb == NULL)
3102 return -ENOMEM; 3102 return -ENOMEM;
3103 3103
@@ -3270,7 +3270,7 @@ static int tg3_rx(struct tg3 *tp, int budget)
3270 tg3_recycle_rx(tp, opaque_key, 3270 tg3_recycle_rx(tp, opaque_key,
3271 desc_idx, *post_ptr); 3271 desc_idx, *post_ptr);
3272 3272
3273 copy_skb = dev_alloc_skb(len + 2); 3273 copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3274 if (copy_skb == NULL) 3274 if (copy_skb == NULL)
3275 goto drop_it_no_recycle; 3275 goto drop_it_no_recycle;
3276 3276
@@ -8618,7 +8618,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8618 err = -EIO; 8618 err = -EIO;
8619 8619
8620 tx_len = 1514; 8620 tx_len = 1514;
8621 skb = dev_alloc_skb(tx_len); 8621 skb = netdev_alloc_skb(tp->dev, tx_len);
8622 if (!skb) 8622 if (!skb)
8623 return -ENOMEM; 8623 return -ENOMEM;
8624 8624