aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/s2io.c
diff options
context:
space:
mode:
authorAnanda Raju <Ananda.Raju@neterion.com>2005-11-14 15:25:08 -0500
committerJeff Garzik <jgarzik@pobox.com>2005-11-18 13:28:15 -0500
commitfed5eccdcf542742786701b2514b5cb7ab282b93 (patch)
tree526e89f8c7ee9793f6980368f334aca09c62b475 /drivers/net/s2io.c
parent1f8fc99300c6247cace008c470f31eb86e9e7802 (diff)
[PATCH] s2io: UFO support
This patch implements the UFO support in S2io driver. This patch uses the UFO interface available in linux-2.6.15 kernel. Signed-off-by: Ananda Raju <ananda.raju@neterion.com> Signed-off-by: Jeff Garzik <jgarzik@pobox.com>
Diffstat (limited to 'drivers/net/s2io.c')
-rw-r--r--drivers/net/s2io.c186
1 files changed, 109 insertions, 77 deletions
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index e57df8dfe6b4..5303a96b4327 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -66,7 +66,7 @@
66#include "s2io.h" 66#include "s2io.h"
67#include "s2io-regs.h" 67#include "s2io-regs.h"
68 68
69#define DRV_VERSION "Version 2.0.9.3" 69#define DRV_VERSION "Version 2.0.9.4"
70 70
71/* S2io Driver name & version. */ 71/* S2io Driver name & version. */
72static char s2io_driver_name[] = "Neterion"; 72static char s2io_driver_name[] = "Neterion";
@@ -412,7 +412,7 @@ static int init_shared_mem(struct s2io_nic *nic)
412 config->tx_cfg[i].fifo_len - 1; 412 config->tx_cfg[i].fifo_len - 1;
413 mac_control->fifos[i].fifo_no = i; 413 mac_control->fifos[i].fifo_no = i;
414 mac_control->fifos[i].nic = nic; 414 mac_control->fifos[i].nic = nic;
415 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 1; 415 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
416 416
417 for (j = 0; j < page_num; j++) { 417 for (j = 0; j < page_num; j++) {
418 int k = 0; 418 int k = 0;
@@ -459,6 +459,10 @@ static int init_shared_mem(struct s2io_nic *nic)
459 } 459 }
460 } 460 }
461 461
462 nic->ufo_in_band_v = kmalloc((sizeof(u64) * size), GFP_KERNEL);
463 if (!nic->ufo_in_band_v)
464 return -ENOMEM;
465
462 /* Allocation and initialization of RXDs in Rings */ 466 /* Allocation and initialization of RXDs in Rings */
463 size = 0; 467 size = 0;
464 for (i = 0; i < config->rx_ring_num; i++) { 468 for (i = 0; i < config->rx_ring_num; i++) {
@@ -731,6 +735,8 @@ static void free_shared_mem(struct s2io_nic *nic)
731 mac_control->stats_mem, 735 mac_control->stats_mem,
732 mac_control->stats_mem_phy); 736 mac_control->stats_mem_phy);
733 } 737 }
738 if (nic->ufo_in_band_v)
739 kfree(nic->ufo_in_band_v);
734} 740}
735 741
736/** 742/**
@@ -2003,6 +2009,49 @@ static int start_nic(struct s2io_nic *nic)
2003 2009
2004 return SUCCESS; 2010 return SUCCESS;
2005} 2011}
2012/**
2013 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2014 */
2015static struct sk_buff *s2io_txdl_getskb(fifo_info_t *fifo_data, TxD_t *txdlp, int get_off)
2016{
2017 nic_t *nic = fifo_data->nic;
2018 struct sk_buff *skb;
2019 TxD_t *txds;
2020 u16 j, frg_cnt;
2021
2022 txds = txdlp;
2023 if (txds->Host_Control == (u64) nic->ufo_in_band_v) {
2024 pci_unmap_single(nic->pdev, (dma_addr_t)
2025 txds->Buffer_Pointer, sizeof(u64),
2026 PCI_DMA_TODEVICE);
2027 txds++;
2028 }
2029
2030 skb = (struct sk_buff *) ((unsigned long)
2031 txds->Host_Control);
2032 if (!skb) {
2033 memset(txdlp, 0, (sizeof(TxD_t) * fifo_data->max_txds));
2034 return NULL;
2035 }
2036 pci_unmap_single(nic->pdev, (dma_addr_t)
2037 txds->Buffer_Pointer,
2038 skb->len - skb->data_len,
2039 PCI_DMA_TODEVICE);
2040 frg_cnt = skb_shinfo(skb)->nr_frags;
2041 if (frg_cnt) {
2042 txds++;
2043 for (j = 0; j < frg_cnt; j++, txds++) {
2044 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2045 if (!txds->Buffer_Pointer)
2046 break;
2047 pci_unmap_page(nic->pdev, (dma_addr_t)
2048 txds->Buffer_Pointer,
2049 frag->size, PCI_DMA_TODEVICE);
2050 }
2051 }
2052 txdlp->Host_Control = 0;
2053 return(skb);
2054}
2006 2055
2007/** 2056/**
2008 * free_tx_buffers - Free all queued Tx buffers 2057 * free_tx_buffers - Free all queued Tx buffers
@@ -2020,7 +2069,7 @@ static void free_tx_buffers(struct s2io_nic *nic)
2020 int i, j; 2069 int i, j;
2021 mac_info_t *mac_control; 2070 mac_info_t *mac_control;
2022 struct config_param *config; 2071 struct config_param *config;
2023 int cnt = 0, frg_cnt; 2072 int cnt = 0;
2024 2073
2025 mac_control = &nic->mac_control; 2074 mac_control = &nic->mac_control;
2026 config = &nic->config; 2075 config = &nic->config;
@@ -2029,38 +2078,11 @@ static void free_tx_buffers(struct s2io_nic *nic)
2029 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) { 2078 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2030 txdp = (TxD_t *) mac_control->fifos[i].list_info[j]. 2079 txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
2031 list_virt_addr; 2080 list_virt_addr;
2032 skb = 2081 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2033 (struct sk_buff *) ((unsigned long) txdp-> 2082 if (skb) {
2034 Host_Control); 2083 dev_kfree_skb(skb);
2035 if (skb == NULL) { 2084 cnt++;
2036 memset(txdp, 0, sizeof(TxD_t) *
2037 config->max_txds);
2038 continue;
2039 }
2040 frg_cnt = skb_shinfo(skb)->nr_frags;
2041 pci_unmap_single(nic->pdev, (dma_addr_t)
2042 txdp->Buffer_Pointer,
2043 skb->len - skb->data_len,
2044 PCI_DMA_TODEVICE);
2045 if (frg_cnt) {
2046 TxD_t *temp;
2047 temp = txdp;
2048 txdp++;
2049 for (j = 0; j < frg_cnt; j++, txdp++) {
2050 skb_frag_t *frag =
2051 &skb_shinfo(skb)->frags[j];
2052 pci_unmap_page(nic->pdev,
2053 (dma_addr_t)
2054 txdp->
2055 Buffer_Pointer,
2056 frag->size,
2057 PCI_DMA_TODEVICE);
2058 }
2059 txdp = temp;
2060 } 2085 }
2061 dev_kfree_skb(skb);
2062 memset(txdp, 0, sizeof(TxD_t) * config->max_txds);
2063 cnt++;
2064 } 2086 }
2065 DBG_PRINT(INTR_DBG, 2087 DBG_PRINT(INTR_DBG,
2066 "%s:forcibly freeing %d skbs on FIFO%d\n", 2088 "%s:forcibly freeing %d skbs on FIFO%d\n",
@@ -2661,7 +2683,6 @@ static void tx_intr_handler(fifo_info_t *fifo_data)
2661 tx_curr_get_info_t get_info, put_info; 2683 tx_curr_get_info_t get_info, put_info;
2662 struct sk_buff *skb; 2684 struct sk_buff *skb;
2663 TxD_t *txdlp; 2685 TxD_t *txdlp;
2664 u16 j, frg_cnt;
2665 2686
2666 get_info = fifo_data->tx_curr_get_info; 2687 get_info = fifo_data->tx_curr_get_info;
2667 put_info = fifo_data->tx_curr_put_info; 2688 put_info = fifo_data->tx_curr_put_info;
@@ -2684,8 +2705,7 @@ to loss of link\n");
2684 } 2705 }
2685 } 2706 }
2686 2707
2687 skb = (struct sk_buff *) ((unsigned long) 2708 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
2688 txdlp->Host_Control);
2689 if (skb == NULL) { 2709 if (skb == NULL) {
2690 DBG_PRINT(ERR_DBG, "%s: Null skb ", 2710 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2691 __FUNCTION__); 2711 __FUNCTION__);
@@ -2693,34 +2713,6 @@ to loss of link\n");
2693 return; 2713 return;
2694 } 2714 }
2695 2715
2696 frg_cnt = skb_shinfo(skb)->nr_frags;
2697 nic->tx_pkt_count++;
2698
2699 pci_unmap_single(nic->pdev, (dma_addr_t)
2700 txdlp->Buffer_Pointer,
2701 skb->len - skb->data_len,
2702 PCI_DMA_TODEVICE);
2703 if (frg_cnt) {
2704 TxD_t *temp;
2705 temp = txdlp;
2706 txdlp++;
2707 for (j = 0; j < frg_cnt; j++, txdlp++) {
2708 skb_frag_t *frag =
2709 &skb_shinfo(skb)->frags[j];
2710 if (!txdlp->Buffer_Pointer)
2711 break;
2712 pci_unmap_page(nic->pdev,
2713 (dma_addr_t)
2714 txdlp->
2715 Buffer_Pointer,
2716 frag->size,
2717 PCI_DMA_TODEVICE);
2718 }
2719 txdlp = temp;
2720 }
2721 memset(txdlp, 0,
2722 (sizeof(TxD_t) * fifo_data->max_txds));
2723
2724 /* Updating the statistics block */ 2716 /* Updating the statistics block */
2725 nic->stats.tx_bytes += skb->len; 2717 nic->stats.tx_bytes += skb->len;
2726 dev_kfree_skb_irq(skb); 2718 dev_kfree_skb_irq(skb);
@@ -3527,6 +3519,8 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3527 return 0; 3519 return 0;
3528 } 3520 }
3529 3521
3522 txdp->Control_1 = 0;
3523 txdp->Control_2 = 0;
3530#ifdef NETIF_F_TSO 3524#ifdef NETIF_F_TSO
3531 mss = skb_shinfo(skb)->tso_size; 3525 mss = skb_shinfo(skb)->tso_size;
3532 if (mss) { 3526 if (mss) {
@@ -3534,19 +3528,13 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3534 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss); 3528 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
3535 } 3529 }
3536#endif 3530#endif
3537
3538 frg_cnt = skb_shinfo(skb)->nr_frags;
3539 frg_len = skb->len - skb->data_len;
3540
3541 txdp->Buffer_Pointer = pci_map_single
3542 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
3543 txdp->Host_Control = (unsigned long) skb;
3544 if (skb->ip_summed == CHECKSUM_HW) { 3531 if (skb->ip_summed == CHECKSUM_HW) {
3545 txdp->Control_2 |= 3532 txdp->Control_2 |=
3546 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN | 3533 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
3547 TXD_TX_CKO_UDP_EN); 3534 TXD_TX_CKO_UDP_EN);
3548 } 3535 }
3549 3536 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
3537 txdp->Control_1 |= TXD_LIST_OWN_XENA;
3550 txdp->Control_2 |= config->tx_intr_type; 3538 txdp->Control_2 |= config->tx_intr_type;
3551 3539
3552 if (sp->vlgrp && vlan_tx_tag_present(skb)) { 3540 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
@@ -3554,10 +3542,40 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3554 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag); 3542 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
3555 } 3543 }
3556 3544
3557 txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) | 3545 frg_len = skb->len - skb->data_len;
3558 TXD_GATHER_CODE_FIRST); 3546 if (skb_shinfo(skb)->ufo_size) {
3559 txdp->Control_1 |= TXD_LIST_OWN_XENA; 3547 int ufo_size;
3548
3549 ufo_size = skb_shinfo(skb)->ufo_size;
3550 ufo_size &= ~7;
3551 txdp->Control_1 |= TXD_UFO_EN;
3552 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
3553 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
3554#ifdef __BIG_ENDIAN
3555 sp->ufo_in_band_v[put_off] =
3556 (u64)skb_shinfo(skb)->ip6_frag_id;
3557#else
3558 sp->ufo_in_band_v[put_off] =
3559 (u64)skb_shinfo(skb)->ip6_frag_id << 32;
3560#endif
3561 txdp->Host_Control = (unsigned long)sp->ufo_in_band_v;
3562 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
3563 sp->ufo_in_band_v,
3564 sizeof(u64), PCI_DMA_TODEVICE);
3565 txdp++;
3566 txdp->Control_1 = 0;
3567 txdp->Control_2 = 0;
3568 }
3569
3570 txdp->Buffer_Pointer = pci_map_single
3571 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
3572 txdp->Host_Control = (unsigned long) skb;
3573 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
3560 3574
3575 if (skb_shinfo(skb)->ufo_size)
3576 txdp->Control_1 |= TXD_UFO_EN;
3577
3578 frg_cnt = skb_shinfo(skb)->nr_frags;
3561 /* For fragmented SKB. */ 3579 /* For fragmented SKB. */
3562 for (i = 0; i < frg_cnt; i++) { 3580 for (i = 0; i < frg_cnt; i++) {
3563 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3581 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -3569,9 +3587,14 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3569 (sp->pdev, frag->page, frag->page_offset, 3587 (sp->pdev, frag->page, frag->page_offset,
3570 frag->size, PCI_DMA_TODEVICE); 3588 frag->size, PCI_DMA_TODEVICE);
3571 txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size); 3589 txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
3590 if (skb_shinfo(skb)->ufo_size)
3591 txdp->Control_1 |= TXD_UFO_EN;
3572 } 3592 }
3573 txdp->Control_1 |= TXD_GATHER_CODE_LAST; 3593 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
3574 3594
3595 if (skb_shinfo(skb)->ufo_size)
3596 frg_cnt++; /* as Txd0 was used for inband header */
3597
3575 tx_fifo = mac_control->tx_FIFO_start[queue]; 3598 tx_fifo = mac_control->tx_FIFO_start[queue];
3576 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr; 3599 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
3577 writeq(val64, &tx_fifo->TxDL_Pointer); 3600 writeq(val64, &tx_fifo->TxDL_Pointer);
@@ -3583,6 +3606,8 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3583 if (mss) 3606 if (mss)
3584 val64 |= TX_FIFO_SPECIAL_FUNC; 3607 val64 |= TX_FIFO_SPECIAL_FUNC;
3585#endif 3608#endif
3609 if (skb_shinfo(skb)->ufo_size)
3610 val64 |= TX_FIFO_SPECIAL_FUNC;
3586 writeq(val64, &tx_fifo->List_Control); 3611 writeq(val64, &tx_fifo->List_Control);
3587 3612
3588 mmiowb(); 3613 mmiowb();
@@ -5190,6 +5215,8 @@ static struct ethtool_ops netdev_ethtool_ops = {
5190 .get_tso = ethtool_op_get_tso, 5215 .get_tso = ethtool_op_get_tso,
5191 .set_tso = ethtool_op_set_tso, 5216 .set_tso = ethtool_op_set_tso,
5192#endif 5217#endif
5218 .get_ufo = ethtool_op_get_ufo,
5219 .set_ufo = ethtool_op_set_ufo,
5193 .self_test_count = s2io_ethtool_self_test_count, 5220 .self_test_count = s2io_ethtool_self_test_count,
5194 .self_test = s2io_ethtool_test, 5221 .self_test = s2io_ethtool_test,
5195 .get_strings = s2io_ethtool_get_strings, 5222 .get_strings = s2io_ethtool_get_strings,
@@ -5941,7 +5968,8 @@ Defaulting to INTA\n");
5941 break; 5968 break;
5942 } 5969 }
5943 } 5970 }
5944 config->max_txds = MAX_SKB_FRAGS + 1; 5971 /* + 2 because one Txd for skb->data and one Txd for UFO */
5972 config->max_txds = MAX_SKB_FRAGS + 2;
5945 5973
5946 /* Rx side parameters. */ 5974 /* Rx side parameters. */
5947 if (rx_ring_sz[0] == 0) 5975 if (rx_ring_sz[0] == 0)
@@ -6035,6 +6063,10 @@ Defaulting to INTA\n");
6035#ifdef NETIF_F_TSO 6063#ifdef NETIF_F_TSO
6036 dev->features |= NETIF_F_TSO; 6064 dev->features |= NETIF_F_TSO;
6037#endif 6065#endif
6066 if (sp->device_type & XFRAME_II_DEVICE) {
6067 dev->features |= NETIF_F_UFO;
6068 dev->features |= NETIF_F_HW_CSUM;
6069 }
6038 6070
6039 dev->tx_timeout = &s2io_tx_watchdog; 6071 dev->tx_timeout = &s2io_tx_watchdog;
6040 dev->watchdog_timeo = WATCH_DOG_TIMEOUT; 6072 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;