aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/s2io.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/s2io.c')
-rw-r--r--drivers/net/s2io.c228
1 files changed, 93 insertions, 135 deletions
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 4a4e38be80a8..132ed32bce1a 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -76,7 +76,7 @@
76#include "s2io.h" 76#include "s2io.h"
77#include "s2io-regs.h" 77#include "s2io-regs.h"
78 78
79#define DRV_VERSION "2.0.14.2" 79#define DRV_VERSION "2.0.15.2"
80 80
81/* S2io Driver name & version. */ 81/* S2io Driver name & version. */
82static char s2io_driver_name[] = "Neterion"; 82static char s2io_driver_name[] = "Neterion";
@@ -2383,9 +2383,14 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2383 skb->data = (void *) (unsigned long)tmp; 2383 skb->data = (void *) (unsigned long)tmp;
2384 skb->tail = (void *) (unsigned long)tmp; 2384 skb->tail = (void *) (unsigned long)tmp;
2385 2385
2386 ((RxD3_t*)rxdp)->Buffer0_ptr = 2386 if (!(((RxD3_t*)rxdp)->Buffer0_ptr))
2387 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN, 2387 ((RxD3_t*)rxdp)->Buffer0_ptr =
2388 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2388 PCI_DMA_FROMDEVICE); 2389 PCI_DMA_FROMDEVICE);
2390 else
2391 pci_dma_sync_single_for_device(nic->pdev,
2392 (dma_addr_t) ((RxD3_t*)rxdp)->Buffer0_ptr,
2393 BUF0_LEN, PCI_DMA_FROMDEVICE);
2389 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); 2394 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2390 if (nic->rxd_mode == RXD_MODE_3B) { 2395 if (nic->rxd_mode == RXD_MODE_3B) {
2391 /* Two buffer mode */ 2396 /* Two buffer mode */
@@ -2398,10 +2403,13 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2398 (nic->pdev, skb->data, dev->mtu + 4, 2403 (nic->pdev, skb->data, dev->mtu + 4,
2399 PCI_DMA_FROMDEVICE); 2404 PCI_DMA_FROMDEVICE);
2400 2405
2401 /* Buffer-1 will be dummy buffer not used */ 2406 /* Buffer-1 will be dummy buffer. Not used */
2402 ((RxD3_t*)rxdp)->Buffer1_ptr = 2407 if (!(((RxD3_t*)rxdp)->Buffer1_ptr)) {
2403 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN, 2408 ((RxD3_t*)rxdp)->Buffer1_ptr =
2404 PCI_DMA_FROMDEVICE); 2409 pci_map_single(nic->pdev,
2410 ba->ba_1, BUF1_LEN,
2411 PCI_DMA_FROMDEVICE);
2412 }
2405 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); 2413 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2406 rxdp->Control_2 |= SET_BUFFER2_SIZE_3 2414 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2407 (dev->mtu + 4); 2415 (dev->mtu + 4);
@@ -2728,7 +2736,7 @@ static void rx_intr_handler(ring_info_t *ring_data)
2728 /* If your are next to put index then it's FIFO full condition */ 2736 /* If your are next to put index then it's FIFO full condition */
2729 if ((get_block == put_block) && 2737 if ((get_block == put_block) &&
2730 (get_info.offset + 1) == put_info.offset) { 2738 (get_info.offset + 1) == put_info.offset) {
2731 DBG_PRINT(ERR_DBG, "%s: Ring Full\n",dev->name); 2739 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
2732 break; 2740 break;
2733 } 2741 }
2734 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control); 2742 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
@@ -2748,18 +2756,15 @@ static void rx_intr_handler(ring_info_t *ring_data)
2748 HEADER_SNAP_SIZE, 2756 HEADER_SNAP_SIZE,
2749 PCI_DMA_FROMDEVICE); 2757 PCI_DMA_FROMDEVICE);
2750 } else if (nic->rxd_mode == RXD_MODE_3B) { 2758 } else if (nic->rxd_mode == RXD_MODE_3B) {
2751 pci_unmap_single(nic->pdev, (dma_addr_t) 2759 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2752 ((RxD3_t*)rxdp)->Buffer0_ptr, 2760 ((RxD3_t*)rxdp)->Buffer0_ptr,
2753 BUF0_LEN, PCI_DMA_FROMDEVICE); 2761 BUF0_LEN, PCI_DMA_FROMDEVICE);
2754 pci_unmap_single(nic->pdev, (dma_addr_t) 2762 pci_unmap_single(nic->pdev, (dma_addr_t)
2755 ((RxD3_t*)rxdp)->Buffer1_ptr,
2756 BUF1_LEN, PCI_DMA_FROMDEVICE);
2757 pci_unmap_single(nic->pdev, (dma_addr_t)
2758 ((RxD3_t*)rxdp)->Buffer2_ptr, 2763 ((RxD3_t*)rxdp)->Buffer2_ptr,
2759 dev->mtu + 4, 2764 dev->mtu + 4,
2760 PCI_DMA_FROMDEVICE); 2765 PCI_DMA_FROMDEVICE);
2761 } else { 2766 } else {
2762 pci_unmap_single(nic->pdev, (dma_addr_t) 2767 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2763 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN, 2768 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN,
2764 PCI_DMA_FROMDEVICE); 2769 PCI_DMA_FROMDEVICE);
2765 pci_unmap_single(nic->pdev, (dma_addr_t) 2770 pci_unmap_single(nic->pdev, (dma_addr_t)
@@ -3548,7 +3553,7 @@ static void restore_xmsi_data(nic_t *nic)
3548 u64 val64; 3553 u64 val64;
3549 int i; 3554 int i;
3550 3555
3551 for (i=0; i< nic->avail_msix_vectors; i++) { 3556 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3552 writeq(nic->msix_info[i].addr, &bar0->xmsi_address); 3557 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3553 writeq(nic->msix_info[i].data, &bar0->xmsi_data); 3558 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3554 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6)); 3559 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
@@ -3567,7 +3572,7 @@ static void store_xmsi_data(nic_t *nic)
3567 int i; 3572 int i;
3568 3573
3569 /* Store and display */ 3574 /* Store and display */
3570 for (i=0; i< nic->avail_msix_vectors; i++) { 3575 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3571 val64 = (BIT(15) | vBIT(i, 26, 6)); 3576 val64 = (BIT(15) | vBIT(i, 26, 6));
3572 writeq(val64, &bar0->xmsi_access); 3577 writeq(val64, &bar0->xmsi_access);
3573 if (wait_for_msix_trans(nic, i)) { 3578 if (wait_for_msix_trans(nic, i)) {
@@ -3828,13 +3833,11 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3828 TxD_t *txdp; 3833 TxD_t *txdp;
3829 TxFIFO_element_t __iomem *tx_fifo; 3834 TxFIFO_element_t __iomem *tx_fifo;
3830 unsigned long flags; 3835 unsigned long flags;
3831#ifdef NETIF_F_TSO
3832 int mss;
3833#endif
3834 u16 vlan_tag = 0; 3836 u16 vlan_tag = 0;
3835 int vlan_priority = 0; 3837 int vlan_priority = 0;
3836 mac_info_t *mac_control; 3838 mac_info_t *mac_control;
3837 struct config_param *config; 3839 struct config_param *config;
3840 int offload_type;
3838 3841
3839 mac_control = &sp->mac_control; 3842 mac_control = &sp->mac_control;
3840 config = &sp->config; 3843 config = &sp->config;
@@ -3882,13 +3885,11 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3882 return 0; 3885 return 0;
3883 } 3886 }
3884 3887
3885 txdp->Control_1 = 0; 3888 offload_type = s2io_offload_type(skb);
3886 txdp->Control_2 = 0;
3887#ifdef NETIF_F_TSO 3889#ifdef NETIF_F_TSO
3888 mss = skb_shinfo(skb)->gso_size; 3890 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3889 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3890 txdp->Control_1 |= TXD_TCP_LSO_EN; 3891 txdp->Control_1 |= TXD_TCP_LSO_EN;
3891 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss); 3892 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
3892 } 3893 }
3893#endif 3894#endif
3894 if (skb->ip_summed == CHECKSUM_HW) { 3895 if (skb->ip_summed == CHECKSUM_HW) {
@@ -3906,10 +3907,10 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3906 } 3907 }
3907 3908
3908 frg_len = skb->len - skb->data_len; 3909 frg_len = skb->len - skb->data_len;
3909 if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) { 3910 if (offload_type == SKB_GSO_UDP) {
3910 int ufo_size; 3911 int ufo_size;
3911 3912
3912 ufo_size = skb_shinfo(skb)->gso_size; 3913 ufo_size = s2io_udp_mss(skb);
3913 ufo_size &= ~7; 3914 ufo_size &= ~7;
3914 txdp->Control_1 |= TXD_UFO_EN; 3915 txdp->Control_1 |= TXD_UFO_EN;
3915 txdp->Control_1 |= TXD_UFO_MSS(ufo_size); 3916 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
@@ -3926,16 +3927,13 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3926 sp->ufo_in_band_v, 3927 sp->ufo_in_band_v,
3927 sizeof(u64), PCI_DMA_TODEVICE); 3928 sizeof(u64), PCI_DMA_TODEVICE);
3928 txdp++; 3929 txdp++;
3929 txdp->Control_1 = 0;
3930 txdp->Control_2 = 0;
3931 } 3930 }
3932 3931
3933 txdp->Buffer_Pointer = pci_map_single 3932 txdp->Buffer_Pointer = pci_map_single
3934 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE); 3933 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
3935 txdp->Host_Control = (unsigned long) skb; 3934 txdp->Host_Control = (unsigned long) skb;
3936 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len); 3935 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
3937 3936 if (offload_type == SKB_GSO_UDP)
3938 if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP)
3939 txdp->Control_1 |= TXD_UFO_EN; 3937 txdp->Control_1 |= TXD_UFO_EN;
3940 3938
3941 frg_cnt = skb_shinfo(skb)->nr_frags; 3939 frg_cnt = skb_shinfo(skb)->nr_frags;
@@ -3950,12 +3948,12 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3950 (sp->pdev, frag->page, frag->page_offset, 3948 (sp->pdev, frag->page, frag->page_offset,
3951 frag->size, PCI_DMA_TODEVICE); 3949 frag->size, PCI_DMA_TODEVICE);
3952 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size); 3950 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
3953 if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) 3951 if (offload_type == SKB_GSO_UDP)
3954 txdp->Control_1 |= TXD_UFO_EN; 3952 txdp->Control_1 |= TXD_UFO_EN;
3955 } 3953 }
3956 txdp->Control_1 |= TXD_GATHER_CODE_LAST; 3954 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
3957 3955
3958 if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) 3956 if (offload_type == SKB_GSO_UDP)
3959 frg_cnt++; /* as Txd0 was used for inband header */ 3957 frg_cnt++; /* as Txd0 was used for inband header */
3960 3958
3961 tx_fifo = mac_control->tx_FIFO_start[queue]; 3959 tx_fifo = mac_control->tx_FIFO_start[queue];
@@ -3964,13 +3962,9 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3964 3962
3965 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST | 3963 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
3966 TX_FIFO_LAST_LIST); 3964 TX_FIFO_LAST_LIST);
3967 3965 if (offload_type)
3968#ifdef NETIF_F_TSO
3969 if (mss)
3970 val64 |= TX_FIFO_SPECIAL_FUNC;
3971#endif
3972 if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP)
3973 val64 |= TX_FIFO_SPECIAL_FUNC; 3966 val64 |= TX_FIFO_SPECIAL_FUNC;
3967
3974 writeq(val64, &tx_fifo->List_Control); 3968 writeq(val64, &tx_fifo->List_Control);
3975 3969
3976 mmiowb(); 3970 mmiowb();
@@ -4004,13 +3998,41 @@ s2io_alarm_handle(unsigned long data)
4004 mod_timer(&sp->alarm_timer, jiffies + HZ / 2); 3998 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4005} 3999}
4006 4000
4001static int s2io_chk_rx_buffers(nic_t *sp, int rng_n)
4002{
4003 int rxb_size, level;
4004
4005 if (!sp->lro) {
4006 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
4007 level = rx_buffer_level(sp, rxb_size, rng_n);
4008
4009 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4010 int ret;
4011 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4012 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4013 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
4014 DBG_PRINT(ERR_DBG, "Out of memory in %s",
4015 __FUNCTION__);
4016 clear_bit(0, (&sp->tasklet_status));
4017 return -1;
4018 }
4019 clear_bit(0, (&sp->tasklet_status));
4020 } else if (level == LOW)
4021 tasklet_schedule(&sp->task);
4022
4023 } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4024 DBG_PRINT(ERR_DBG, "%s:Out of memory", sp->dev->name);
4025 DBG_PRINT(ERR_DBG, " in Rx Intr!!\n");
4026 }
4027 return 0;
4028}
4029
4007static irqreturn_t 4030static irqreturn_t
4008s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs) 4031s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs)
4009{ 4032{
4010 struct net_device *dev = (struct net_device *) dev_id; 4033 struct net_device *dev = (struct net_device *) dev_id;
4011 nic_t *sp = dev->priv; 4034 nic_t *sp = dev->priv;
4012 int i; 4035 int i;
4013 int ret;
4014 mac_info_t *mac_control; 4036 mac_info_t *mac_control;
4015 struct config_param *config; 4037 struct config_param *config;
4016 4038
@@ -4032,35 +4054,8 @@ s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs)
4032 * reallocate the buffers from the interrupt handler itself, 4054 * reallocate the buffers from the interrupt handler itself,
4033 * else schedule a tasklet to reallocate the buffers. 4055 * else schedule a tasklet to reallocate the buffers.
4034 */ 4056 */
4035 for (i = 0; i < config->rx_ring_num; i++) { 4057 for (i = 0; i < config->rx_ring_num; i++)
4036 if (!sp->lro) { 4058 s2io_chk_rx_buffers(sp, i);
4037 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
4038 int level = rx_buffer_level(sp, rxb_size, i);
4039
4040 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4041 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ",
4042 dev->name);
4043 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4044 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
4045 DBG_PRINT(ERR_DBG, "%s:Out of memory",
4046 dev->name);
4047 DBG_PRINT(ERR_DBG, " in ISR!!\n");
4048 clear_bit(0, (&sp->tasklet_status));
4049 atomic_dec(&sp->isr_cnt);
4050 return IRQ_HANDLED;
4051 }
4052 clear_bit(0, (&sp->tasklet_status));
4053 } else if (level == LOW) {
4054 tasklet_schedule(&sp->task);
4055 }
4056 }
4057 else if (fill_rx_buffers(sp, i) == -ENOMEM) {
4058 DBG_PRINT(ERR_DBG, "%s:Out of memory",
4059 dev->name);
4060 DBG_PRINT(ERR_DBG, " in Rx Intr!!\n");
4061 break;
4062 }
4063 }
4064 4059
4065 atomic_dec(&sp->isr_cnt); 4060 atomic_dec(&sp->isr_cnt);
4066 return IRQ_HANDLED; 4061 return IRQ_HANDLED;
@@ -4071,39 +4066,13 @@ s2io_msix_ring_handle(int irq, void *dev_id, struct pt_regs *regs)
4071{ 4066{
4072 ring_info_t *ring = (ring_info_t *)dev_id; 4067 ring_info_t *ring = (ring_info_t *)dev_id;
4073 nic_t *sp = ring->nic; 4068 nic_t *sp = ring->nic;
4074 struct net_device *dev = (struct net_device *) dev_id;
4075 int rxb_size, level, rng_n;
4076 4069
4077 atomic_inc(&sp->isr_cnt); 4070 atomic_inc(&sp->isr_cnt);
4078 rx_intr_handler(ring);
4079 4071
4080 rng_n = ring->ring_no; 4072 rx_intr_handler(ring);
4081 if (!sp->lro) { 4073 s2io_chk_rx_buffers(sp, ring->ring_no);
4082 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
4083 level = rx_buffer_level(sp, rxb_size, rng_n);
4084
4085 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4086 int ret;
4087 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4088 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4089 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
4090 DBG_PRINT(ERR_DBG, "Out of memory in %s",
4091 __FUNCTION__);
4092 clear_bit(0, (&sp->tasklet_status));
4093 return IRQ_HANDLED;
4094 }
4095 clear_bit(0, (&sp->tasklet_status));
4096 } else if (level == LOW) {
4097 tasklet_schedule(&sp->task);
4098 }
4099 }
4100 else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4101 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
4102 DBG_PRINT(ERR_DBG, " in Rx Intr!!\n");
4103 }
4104 4074
4105 atomic_dec(&sp->isr_cnt); 4075 atomic_dec(&sp->isr_cnt);
4106
4107 return IRQ_HANDLED; 4076 return IRQ_HANDLED;
4108} 4077}
4109 4078
@@ -4268,37 +4237,8 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
4268 * else schedule a tasklet to reallocate the buffers. 4237 * else schedule a tasklet to reallocate the buffers.
4269 */ 4238 */
4270#ifndef CONFIG_S2IO_NAPI 4239#ifndef CONFIG_S2IO_NAPI
4271 for (i = 0; i < config->rx_ring_num; i++) { 4240 for (i = 0; i < config->rx_ring_num; i++)
4272 if (!sp->lro) { 4241 s2io_chk_rx_buffers(sp, i);
4273 int ret;
4274 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
4275 int level = rx_buffer_level(sp, rxb_size, i);
4276
4277 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4278 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ",
4279 dev->name);
4280 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4281 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
4282 DBG_PRINT(ERR_DBG, "%s:Out of memory",
4283 dev->name);
4284 DBG_PRINT(ERR_DBG, " in ISR!!\n");
4285 clear_bit(0, (&sp->tasklet_status));
4286 atomic_dec(&sp->isr_cnt);
4287 writeq(org_mask, &bar0->general_int_mask);
4288 return IRQ_HANDLED;
4289 }
4290 clear_bit(0, (&sp->tasklet_status));
4291 } else if (level == LOW) {
4292 tasklet_schedule(&sp->task);
4293 }
4294 }
4295 else if (fill_rx_buffers(sp, i) == -ENOMEM) {
4296 DBG_PRINT(ERR_DBG, "%s:Out of memory",
4297 dev->name);
4298 DBG_PRINT(ERR_DBG, " in Rx intr!!\n");
4299 break;
4300 }
4301 }
4302#endif 4242#endif
4303 writeq(org_mask, &bar0->general_int_mask); 4243 writeq(org_mask, &bar0->general_int_mask);
4304 atomic_dec(&sp->isr_cnt); 4244 atomic_dec(&sp->isr_cnt);
@@ -4328,6 +4268,8 @@ static void s2io_updt_stats(nic_t *sp)
4328 if (cnt == 5) 4268 if (cnt == 5)
4329 break; /* Updt failed */ 4269 break; /* Updt failed */
4330 } while(1); 4270 } while(1);
4271 } else {
4272 memset(sp->mac_control.stats_info, 0, sizeof(StatInfo_t));
4331 } 4273 }
4332} 4274}
4333 4275
@@ -5779,6 +5721,19 @@ static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
5779 return 0; 5721 return 0;
5780} 5722}
5781 5723
5724static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
5725{
5726 return (dev->features & NETIF_F_TSO) != 0;
5727}
5728static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
5729{
5730 if (data)
5731 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
5732 else
5733 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
5734
5735 return 0;
5736}
5782 5737
5783static struct ethtool_ops netdev_ethtool_ops = { 5738static struct ethtool_ops netdev_ethtool_ops = {
5784 .get_settings = s2io_ethtool_gset, 5739 .get_settings = s2io_ethtool_gset,
@@ -5799,8 +5754,8 @@ static struct ethtool_ops netdev_ethtool_ops = {
5799 .get_sg = ethtool_op_get_sg, 5754 .get_sg = ethtool_op_get_sg,
5800 .set_sg = ethtool_op_set_sg, 5755 .set_sg = ethtool_op_set_sg,
5801#ifdef NETIF_F_TSO 5756#ifdef NETIF_F_TSO
5802 .get_tso = ethtool_op_get_tso, 5757 .get_tso = s2io_ethtool_op_get_tso,
5803 .set_tso = ethtool_op_set_tso, 5758 .set_tso = s2io_ethtool_op_set_tso,
5804#endif 5759#endif
5805 .get_ufo = ethtool_op_get_ufo, 5760 .get_ufo = ethtool_op_get_ufo,
5806 .set_ufo = ethtool_op_set_ufo, 5761 .set_ufo = ethtool_op_set_ufo,
@@ -7438,8 +7393,13 @@ static int verify_l3_l4_lro_capable(lro_t *l_lro, struct iphdr *ip,
7438 if (ip->ihl != 5) /* IP has options */ 7393 if (ip->ihl != 5) /* IP has options */
7439 return -1; 7394 return -1;
7440 7395
7396 /* If we see CE codepoint in IP header, packet is not mergeable */
7397 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
7398 return -1;
7399
7400 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
7441 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin || 7401 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
7442 !tcp->ack) { 7402 tcp->ece || tcp->cwr || !tcp->ack) {
7443 /* 7403 /*
7444 * Currently recognize only the ack control word and 7404 * Currently recognize only the ack control word and
7445 * any other control field being set would result in 7405 * any other control field being set would result in
@@ -7593,18 +7553,16 @@ static void queue_rx_frame(struct sk_buff *skb)
7593static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, 7553static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb,
7594 u32 tcp_len) 7554 u32 tcp_len)
7595{ 7555{
7596 struct sk_buff *tmp, *first = lro->parent; 7556 struct sk_buff *first = lro->parent;
7597 7557
7598 first->len += tcp_len; 7558 first->len += tcp_len;
7599 first->data_len = lro->frags_len; 7559 first->data_len = lro->frags_len;
7600 skb_pull(skb, (skb->len - tcp_len)); 7560 skb_pull(skb, (skb->len - tcp_len));
7601 if ((tmp = skb_shinfo(first)->frag_list)) { 7561 if (skb_shinfo(first)->frag_list)
7602 while (tmp->next) 7562 lro->last_frag->next = skb;
7603 tmp = tmp->next;
7604 tmp->next = skb;
7605 }
7606 else 7563 else
7607 skb_shinfo(first)->frag_list = skb; 7564 skb_shinfo(first)->frag_list = skb;
7565 lro->last_frag = skb;
7608 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++; 7566 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
7609 return; 7567 return;
7610} 7568}