diff options
author | Brian Hill <brian.hill@xilinx.com> | 2010-05-26 23:44:30 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-05-26 23:44:30 -0400 |
commit | 23ecc4bde21f0ccb38f4b53cadde7fc5d67d68e3 (patch) | |
tree | bb2cb5eb3121164665f704a8597a9dd18de63df0 /drivers/net/ll_temac_main.c | |
parent | 755fae0ac41672523a3ac00d41fe9bac226b0578 (diff) |
net: ll_temac: fix checksum offload logic
The current checksum offload code does not work and this corrects
that functionality. It also updates the interrupt coallescing
initialization so than there are fewer interrupts and performance
is increased.
Signed-off-by: Brian Hill <brian.hill@xilinx.com>
Signed-off-by: John Linn <john.linn@xilinx.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ll_temac_main.c')
-rw-r--r-- | drivers/net/ll_temac_main.c | 82 |
1 files changed, 58 insertions, 24 deletions
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c index 1bb6e605f64f..fbd07de2e088 100644 --- a/drivers/net/ll_temac_main.c +++ b/drivers/net/ll_temac_main.c | |||
@@ -245,7 +245,7 @@ static int temac_dma_bd_init(struct net_device *ndev) | |||
245 | CHNL_CTRL_IRQ_COAL_EN); | 245 | CHNL_CTRL_IRQ_COAL_EN); |
246 | /* 0x10220483 */ | 246 | /* 0x10220483 */ |
247 | /* 0x00100483 */ | 247 | /* 0x00100483 */ |
248 | lp->dma_out(lp, RX_CHNL_CTRL, 0xff010000 | | 248 | lp->dma_out(lp, RX_CHNL_CTRL, 0xff070000 | |
249 | CHNL_CTRL_IRQ_EN | | 249 | CHNL_CTRL_IRQ_EN | |
250 | CHNL_CTRL_IRQ_DLY_EN | | 250 | CHNL_CTRL_IRQ_DLY_EN | |
251 | CHNL_CTRL_IRQ_COAL_EN | | 251 | CHNL_CTRL_IRQ_COAL_EN | |
@@ -574,6 +574,10 @@ static void temac_start_xmit_done(struct net_device *ndev) | |||
574 | if (cur_p->app4) | 574 | if (cur_p->app4) |
575 | dev_kfree_skb_irq((struct sk_buff *)cur_p->app4); | 575 | dev_kfree_skb_irq((struct sk_buff *)cur_p->app4); |
576 | cur_p->app0 = 0; | 576 | cur_p->app0 = 0; |
577 | cur_p->app1 = 0; | ||
578 | cur_p->app2 = 0; | ||
579 | cur_p->app3 = 0; | ||
580 | cur_p->app4 = 0; | ||
577 | 581 | ||
578 | ndev->stats.tx_packets++; | 582 | ndev->stats.tx_packets++; |
579 | ndev->stats.tx_bytes += cur_p->len; | 583 | ndev->stats.tx_bytes += cur_p->len; |
@@ -589,6 +593,29 @@ static void temac_start_xmit_done(struct net_device *ndev) | |||
589 | netif_wake_queue(ndev); | 593 | netif_wake_queue(ndev); |
590 | } | 594 | } |
591 | 595 | ||
596 | static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag) | ||
597 | { | ||
598 | struct cdmac_bd *cur_p; | ||
599 | int tail; | ||
600 | |||
601 | tail = lp->tx_bd_tail; | ||
602 | cur_p = &lp->tx_bd_v[tail]; | ||
603 | |||
604 | do { | ||
605 | if (cur_p->app0) | ||
606 | return NETDEV_TX_BUSY; | ||
607 | |||
608 | tail++; | ||
609 | if (tail >= TX_BD_NUM) | ||
610 | tail = 0; | ||
611 | |||
612 | cur_p = &lp->tx_bd_v[tail]; | ||
613 | num_frag--; | ||
614 | } while (num_frag >= 0); | ||
615 | |||
616 | return 0; | ||
617 | } | ||
618 | |||
592 | static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) | 619 | static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) |
593 | { | 620 | { |
594 | struct temac_local *lp = netdev_priv(ndev); | 621 | struct temac_local *lp = netdev_priv(ndev); |
@@ -603,7 +630,7 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
603 | start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; | 630 | start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; |
604 | cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; | 631 | cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; |
605 | 632 | ||
606 | if (cur_p->app0 & STS_CTRL_APP0_CMPLT) { | 633 | if (temac_check_tx_bd_space(lp, num_frag)) { |
607 | if (!netif_queue_stopped(ndev)) { | 634 | if (!netif_queue_stopped(ndev)) { |
608 | netif_stop_queue(ndev); | 635 | netif_stop_queue(ndev); |
609 | return NETDEV_TX_BUSY; | 636 | return NETDEV_TX_BUSY; |
@@ -613,29 +640,14 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
613 | 640 | ||
614 | cur_p->app0 = 0; | 641 | cur_p->app0 = 0; |
615 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 642 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
616 | const struct iphdr *ip = ip_hdr(skb); | 643 | unsigned int csum_start_off = skb_transport_offset(skb); |
617 | int length = 0, start = 0, insert = 0; | 644 | unsigned int csum_index_off = csum_start_off + skb->csum_offset; |
618 | 645 | ||
619 | switch (ip->protocol) { | 646 | cur_p->app0 |= 1; /* TX Checksum Enabled */ |
620 | case IPPROTO_TCP: | 647 | cur_p->app1 = (csum_start_off << 16) | csum_index_off; |
621 | start = sizeof(struct iphdr) + ETH_HLEN; | 648 | cur_p->app2 = 0; /* initial checksum seed */ |
622 | insert = sizeof(struct iphdr) + ETH_HLEN + 16; | ||
623 | length = ip->tot_len - sizeof(struct iphdr); | ||
624 | break; | ||
625 | case IPPROTO_UDP: | ||
626 | start = sizeof(struct iphdr) + ETH_HLEN; | ||
627 | insert = sizeof(struct iphdr) + ETH_HLEN + 6; | ||
628 | length = ip->tot_len - sizeof(struct iphdr); | ||
629 | break; | ||
630 | default: | ||
631 | break; | ||
632 | } | ||
633 | cur_p->app1 = ((start << 16) | insert); | ||
634 | cur_p->app2 = csum_tcpudp_magic(ip->saddr, ip->daddr, | ||
635 | length, ip->protocol, 0); | ||
636 | skb->data[insert] = 0; | ||
637 | skb->data[insert + 1] = 0; | ||
638 | } | 649 | } |
650 | |||
639 | cur_p->app0 |= STS_CTRL_APP0_SOP; | 651 | cur_p->app0 |= STS_CTRL_APP0_SOP; |
640 | cur_p->len = skb_headlen(skb); | 652 | cur_p->len = skb_headlen(skb); |
641 | cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, skb->len, | 653 | cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, skb->len, |
@@ -699,6 +711,15 @@ static void ll_temac_recv(struct net_device *ndev) | |||
699 | skb->protocol = eth_type_trans(skb, ndev); | 711 | skb->protocol = eth_type_trans(skb, ndev); |
700 | skb->ip_summed = CHECKSUM_NONE; | 712 | skb->ip_summed = CHECKSUM_NONE; |
701 | 713 | ||
714 | /* if we're doing rx csum offload, set it up */ | ||
715 | if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) && | ||
716 | (skb->protocol == __constant_htons(ETH_P_IP)) && | ||
717 | (skb->len > 64)) { | ||
718 | |||
719 | skb->csum = cur_p->app3 & 0xFFFF; | ||
720 | skb->ip_summed = CHECKSUM_COMPLETE; | ||
721 | } | ||
722 | |||
702 | netif_rx(skb); | 723 | netif_rx(skb); |
703 | 724 | ||
704 | ndev->stats.rx_packets++; | 725 | ndev->stats.rx_packets++; |
@@ -883,6 +904,7 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match) | |||
883 | struct temac_local *lp; | 904 | struct temac_local *lp; |
884 | struct net_device *ndev; | 905 | struct net_device *ndev; |
885 | const void *addr; | 906 | const void *addr; |
907 | __be32 *p; | ||
886 | int size, rc = 0; | 908 | int size, rc = 0; |
887 | 909 | ||
888 | /* Init network device structure */ | 910 | /* Init network device structure */ |
@@ -926,6 +948,18 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match) | |||
926 | goto nodev; | 948 | goto nodev; |
927 | } | 949 | } |
928 | 950 | ||
951 | /* Setup checksum offload, but default to off if not specified */ | ||
952 | lp->temac_features = 0; | ||
953 | p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,txcsum", NULL); | ||
954 | if (p && be32_to_cpu(*p)) { | ||
955 | lp->temac_features |= TEMAC_FEATURE_TX_CSUM; | ||
956 | /* Can checksum TCP/UDP over IPv4. */ | ||
957 | ndev->features |= NETIF_F_IP_CSUM; | ||
958 | } | ||
959 | p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL); | ||
960 | if (p && be32_to_cpu(*p)) | ||
961 | lp->temac_features |= TEMAC_FEATURE_RX_CSUM; | ||
962 | |||
929 | /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ | 963 | /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ |
930 | np = of_parse_phandle(op->node, "llink-connected", 0); | 964 | np = of_parse_phandle(op->node, "llink-connected", 0); |
931 | if (!np) { | 965 | if (!np) { |