diff options
Diffstat (limited to 'drivers/net/ethernet/calxeda/xgmac.c')
-rw-r--r-- | drivers/net/ethernet/calxeda/xgmac.c | 195 |
1 files changed, 116 insertions, 79 deletions
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index 7cb148c495c9..78d6d6b970e1 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c | |||
@@ -353,11 +353,9 @@ struct xgmac_extra_stats { | |||
353 | /* Receive errors */ | 353 | /* Receive errors */ |
354 | unsigned long rx_watchdog; | 354 | unsigned long rx_watchdog; |
355 | unsigned long rx_da_filter_fail; | 355 | unsigned long rx_da_filter_fail; |
356 | unsigned long rx_sa_filter_fail; | ||
357 | unsigned long rx_payload_error; | 356 | unsigned long rx_payload_error; |
358 | unsigned long rx_ip_header_error; | 357 | unsigned long rx_ip_header_error; |
359 | /* Tx/Rx IRQ errors */ | 358 | /* Tx/Rx IRQ errors */ |
360 | unsigned long tx_undeflow; | ||
361 | unsigned long tx_process_stopped; | 359 | unsigned long tx_process_stopped; |
362 | unsigned long rx_buf_unav; | 360 | unsigned long rx_buf_unav; |
363 | unsigned long rx_process_stopped; | 361 | unsigned long rx_process_stopped; |
@@ -393,6 +391,7 @@ struct xgmac_priv { | |||
393 | char rx_pause; | 391 | char rx_pause; |
394 | char tx_pause; | 392 | char tx_pause; |
395 | int wolopts; | 393 | int wolopts; |
394 | struct work_struct tx_timeout_work; | ||
396 | }; | 395 | }; |
397 | 396 | ||
398 | /* XGMAC Configuration Settings */ | 397 | /* XGMAC Configuration Settings */ |
@@ -409,6 +408,9 @@ struct xgmac_priv { | |||
409 | #define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s) | 408 | #define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s) |
410 | #define dma_ring_cnt(h, t, s) CIRC_CNT(h, t, s) | 409 | #define dma_ring_cnt(h, t, s) CIRC_CNT(h, t, s) |
411 | 410 | ||
411 | #define tx_dma_ring_space(p) \ | ||
412 | dma_ring_space((p)->tx_head, (p)->tx_tail, DMA_TX_RING_SZ) | ||
413 | |||
412 | /* XGMAC Descriptor Access Helpers */ | 414 | /* XGMAC Descriptor Access Helpers */ |
413 | static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz) | 415 | static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz) |
414 | { | 416 | { |
@@ -421,7 +423,7 @@ static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz) | |||
421 | 423 | ||
422 | static inline int desc_get_buf_len(struct xgmac_dma_desc *p) | 424 | static inline int desc_get_buf_len(struct xgmac_dma_desc *p) |
423 | { | 425 | { |
424 | u32 len = cpu_to_le32(p->flags); | 426 | u32 len = le32_to_cpu(p->buf_size); |
425 | return (len & DESC_BUFFER1_SZ_MASK) + | 427 | return (len & DESC_BUFFER1_SZ_MASK) + |
426 | ((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET); | 428 | ((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET); |
427 | } | 429 | } |
@@ -464,11 +466,23 @@ static inline void desc_set_tx_owner(struct xgmac_dma_desc *p, u32 flags) | |||
464 | p->flags = cpu_to_le32(tmpflags); | 466 | p->flags = cpu_to_le32(tmpflags); |
465 | } | 467 | } |
466 | 468 | ||
469 | static inline void desc_clear_tx_owner(struct xgmac_dma_desc *p) | ||
470 | { | ||
471 | u32 tmpflags = le32_to_cpu(p->flags); | ||
472 | tmpflags &= TXDESC_END_RING; | ||
473 | p->flags = cpu_to_le32(tmpflags); | ||
474 | } | ||
475 | |||
467 | static inline int desc_get_tx_ls(struct xgmac_dma_desc *p) | 476 | static inline int desc_get_tx_ls(struct xgmac_dma_desc *p) |
468 | { | 477 | { |
469 | return le32_to_cpu(p->flags) & TXDESC_LAST_SEG; | 478 | return le32_to_cpu(p->flags) & TXDESC_LAST_SEG; |
470 | } | 479 | } |
471 | 480 | ||
481 | static inline int desc_get_tx_fs(struct xgmac_dma_desc *p) | ||
482 | { | ||
483 | return le32_to_cpu(p->flags) & TXDESC_FIRST_SEG; | ||
484 | } | ||
485 | |||
472 | static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p) | 486 | static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p) |
473 | { | 487 | { |
474 | return le32_to_cpu(p->buf1_addr); | 488 | return le32_to_cpu(p->buf1_addr); |
@@ -609,10 +623,15 @@ static void xgmac_set_mac_addr(void __iomem *ioaddr, unsigned char *addr, | |||
609 | { | 623 | { |
610 | u32 data; | 624 | u32 data; |
611 | 625 | ||
612 | data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0); | 626 | if (addr) { |
613 | writel(data, ioaddr + XGMAC_ADDR_HIGH(num)); | 627 | data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0); |
614 | data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; | 628 | writel(data, ioaddr + XGMAC_ADDR_HIGH(num)); |
615 | writel(data, ioaddr + XGMAC_ADDR_LOW(num)); | 629 | data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; |
630 | writel(data, ioaddr + XGMAC_ADDR_LOW(num)); | ||
631 | } else { | ||
632 | writel(0, ioaddr + XGMAC_ADDR_HIGH(num)); | ||
633 | writel(0, ioaddr + XGMAC_ADDR_LOW(num)); | ||
634 | } | ||
616 | } | 635 | } |
617 | 636 | ||
618 | static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, | 637 | static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, |
@@ -683,9 +702,14 @@ static void xgmac_rx_refill(struct xgmac_priv *priv) | |||
683 | if (unlikely(skb == NULL)) | 702 | if (unlikely(skb == NULL)) |
684 | break; | 703 | break; |
685 | 704 | ||
686 | priv->rx_skbuff[entry] = skb; | ||
687 | paddr = dma_map_single(priv->device, skb->data, | 705 | paddr = dma_map_single(priv->device, skb->data, |
688 | bufsz, DMA_FROM_DEVICE); | 706 | priv->dma_buf_sz - NET_IP_ALIGN, |
707 | DMA_FROM_DEVICE); | ||
708 | if (dma_mapping_error(priv->device, paddr)) { | ||
709 | dev_kfree_skb_any(skb); | ||
710 | break; | ||
711 | } | ||
712 | priv->rx_skbuff[entry] = skb; | ||
689 | desc_set_buf_addr(p, paddr, priv->dma_buf_sz); | 713 | desc_set_buf_addr(p, paddr, priv->dma_buf_sz); |
690 | } | 714 | } |
691 | 715 | ||
@@ -782,20 +806,21 @@ static void xgmac_free_rx_skbufs(struct xgmac_priv *priv) | |||
782 | return; | 806 | return; |
783 | 807 | ||
784 | for (i = 0; i < DMA_RX_RING_SZ; i++) { | 808 | for (i = 0; i < DMA_RX_RING_SZ; i++) { |
785 | if (priv->rx_skbuff[i] == NULL) | 809 | struct sk_buff *skb = priv->rx_skbuff[i]; |
810 | if (skb == NULL) | ||
786 | continue; | 811 | continue; |
787 | 812 | ||
788 | p = priv->dma_rx + i; | 813 | p = priv->dma_rx + i; |
789 | dma_unmap_single(priv->device, desc_get_buf_addr(p), | 814 | dma_unmap_single(priv->device, desc_get_buf_addr(p), |
790 | priv->dma_buf_sz, DMA_FROM_DEVICE); | 815 | priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE); |
791 | dev_kfree_skb_any(priv->rx_skbuff[i]); | 816 | dev_kfree_skb_any(skb); |
792 | priv->rx_skbuff[i] = NULL; | 817 | priv->rx_skbuff[i] = NULL; |
793 | } | 818 | } |
794 | } | 819 | } |
795 | 820 | ||
796 | static void xgmac_free_tx_skbufs(struct xgmac_priv *priv) | 821 | static void xgmac_free_tx_skbufs(struct xgmac_priv *priv) |
797 | { | 822 | { |
798 | int i, f; | 823 | int i; |
799 | struct xgmac_dma_desc *p; | 824 | struct xgmac_dma_desc *p; |
800 | 825 | ||
801 | if (!priv->tx_skbuff) | 826 | if (!priv->tx_skbuff) |
@@ -806,16 +831,15 @@ static void xgmac_free_tx_skbufs(struct xgmac_priv *priv) | |||
806 | continue; | 831 | continue; |
807 | 832 | ||
808 | p = priv->dma_tx + i; | 833 | p = priv->dma_tx + i; |
809 | dma_unmap_single(priv->device, desc_get_buf_addr(p), | 834 | if (desc_get_tx_fs(p)) |
810 | desc_get_buf_len(p), DMA_TO_DEVICE); | 835 | dma_unmap_single(priv->device, desc_get_buf_addr(p), |
811 | 836 | desc_get_buf_len(p), DMA_TO_DEVICE); | |
812 | for (f = 0; f < skb_shinfo(priv->tx_skbuff[i])->nr_frags; f++) { | 837 | else |
813 | p = priv->dma_tx + i++; | ||
814 | dma_unmap_page(priv->device, desc_get_buf_addr(p), | 838 | dma_unmap_page(priv->device, desc_get_buf_addr(p), |
815 | desc_get_buf_len(p), DMA_TO_DEVICE); | 839 | desc_get_buf_len(p), DMA_TO_DEVICE); |
816 | } | ||
817 | 840 | ||
818 | dev_kfree_skb_any(priv->tx_skbuff[i]); | 841 | if (desc_get_tx_ls(p)) |
842 | dev_kfree_skb_any(priv->tx_skbuff[i]); | ||
819 | priv->tx_skbuff[i] = NULL; | 843 | priv->tx_skbuff[i] = NULL; |
820 | } | 844 | } |
821 | } | 845 | } |
@@ -852,8 +876,6 @@ static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv) | |||
852 | */ | 876 | */ |
853 | static void xgmac_tx_complete(struct xgmac_priv *priv) | 877 | static void xgmac_tx_complete(struct xgmac_priv *priv) |
854 | { | 878 | { |
855 | int i; | ||
856 | |||
857 | while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) { | 879 | while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) { |
858 | unsigned int entry = priv->tx_tail; | 880 | unsigned int entry = priv->tx_tail; |
859 | struct sk_buff *skb = priv->tx_skbuff[entry]; | 881 | struct sk_buff *skb = priv->tx_skbuff[entry]; |
@@ -863,55 +885,45 @@ static void xgmac_tx_complete(struct xgmac_priv *priv) | |||
863 | if (desc_get_owner(p)) | 885 | if (desc_get_owner(p)) |
864 | break; | 886 | break; |
865 | 887 | ||
866 | /* Verify tx error by looking at the last segment */ | ||
867 | if (desc_get_tx_ls(p)) | ||
868 | desc_get_tx_status(priv, p); | ||
869 | |||
870 | netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n", | 888 | netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n", |
871 | priv->tx_head, priv->tx_tail); | 889 | priv->tx_head, priv->tx_tail); |
872 | 890 | ||
873 | dma_unmap_single(priv->device, desc_get_buf_addr(p), | 891 | if (desc_get_tx_fs(p)) |
874 | desc_get_buf_len(p), DMA_TO_DEVICE); | 892 | dma_unmap_single(priv->device, desc_get_buf_addr(p), |
875 | 893 | desc_get_buf_len(p), DMA_TO_DEVICE); | |
876 | priv->tx_skbuff[entry] = NULL; | 894 | else |
877 | priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ); | ||
878 | |||
879 | if (!skb) { | ||
880 | continue; | ||
881 | } | ||
882 | |||
883 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | ||
884 | entry = priv->tx_tail = dma_ring_incr(priv->tx_tail, | ||
885 | DMA_TX_RING_SZ); | ||
886 | p = priv->dma_tx + priv->tx_tail; | ||
887 | |||
888 | dma_unmap_page(priv->device, desc_get_buf_addr(p), | 895 | dma_unmap_page(priv->device, desc_get_buf_addr(p), |
889 | desc_get_buf_len(p), DMA_TO_DEVICE); | 896 | desc_get_buf_len(p), DMA_TO_DEVICE); |
897 | |||
898 | /* Check tx error on the last segment */ | ||
899 | if (desc_get_tx_ls(p)) { | ||
900 | desc_get_tx_status(priv, p); | ||
901 | dev_kfree_skb(skb); | ||
890 | } | 902 | } |
891 | 903 | ||
892 | dev_kfree_skb(skb); | 904 | priv->tx_skbuff[entry] = NULL; |
905 | priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ); | ||
893 | } | 906 | } |
894 | 907 | ||
895 | if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) > | 908 | /* Ensure tx_tail is visible to xgmac_xmit */ |
896 | MAX_SKB_FRAGS) | 909 | smp_mb(); |
910 | if (unlikely(netif_queue_stopped(priv->dev) && | ||
911 | (tx_dma_ring_space(priv) > MAX_SKB_FRAGS))) | ||
897 | netif_wake_queue(priv->dev); | 912 | netif_wake_queue(priv->dev); |
898 | } | 913 | } |
899 | 914 | ||
900 | /** | 915 | static void xgmac_tx_timeout_work(struct work_struct *work) |
901 | * xgmac_tx_err: | ||
902 | * @priv: pointer to the private device structure | ||
903 | * Description: it cleans the descriptors and restarts the transmission | ||
904 | * in case of errors. | ||
905 | */ | ||
906 | static void xgmac_tx_err(struct xgmac_priv *priv) | ||
907 | { | 916 | { |
908 | u32 reg, value, inten; | 917 | u32 reg, value; |
918 | struct xgmac_priv *priv = | ||
919 | container_of(work, struct xgmac_priv, tx_timeout_work); | ||
909 | 920 | ||
910 | netif_stop_queue(priv->dev); | 921 | napi_disable(&priv->napi); |
911 | 922 | ||
912 | inten = readl(priv->base + XGMAC_DMA_INTR_ENA); | ||
913 | writel(0, priv->base + XGMAC_DMA_INTR_ENA); | 923 | writel(0, priv->base + XGMAC_DMA_INTR_ENA); |
914 | 924 | ||
925 | netif_tx_lock(priv->dev); | ||
926 | |||
915 | reg = readl(priv->base + XGMAC_DMA_CONTROL); | 927 | reg = readl(priv->base + XGMAC_DMA_CONTROL); |
916 | writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL); | 928 | writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL); |
917 | do { | 929 | do { |
@@ -927,9 +939,15 @@ static void xgmac_tx_err(struct xgmac_priv *priv) | |||
927 | 939 | ||
928 | writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS, | 940 | writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS, |
929 | priv->base + XGMAC_DMA_STATUS); | 941 | priv->base + XGMAC_DMA_STATUS); |
930 | writel(inten, priv->base + XGMAC_DMA_INTR_ENA); | ||
931 | 942 | ||
943 | netif_tx_unlock(priv->dev); | ||
932 | netif_wake_queue(priv->dev); | 944 | netif_wake_queue(priv->dev); |
945 | |||
946 | napi_enable(&priv->napi); | ||
947 | |||
948 | /* Enable interrupts */ | ||
949 | writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_STATUS); | ||
950 | writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA); | ||
933 | } | 951 | } |
934 | 952 | ||
935 | static int xgmac_hw_init(struct net_device *dev) | 953 | static int xgmac_hw_init(struct net_device *dev) |
@@ -957,9 +975,7 @@ static int xgmac_hw_init(struct net_device *dev) | |||
957 | DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL; | 975 | DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL; |
958 | writel(value, ioaddr + XGMAC_DMA_BUS_MODE); | 976 | writel(value, ioaddr + XGMAC_DMA_BUS_MODE); |
959 | 977 | ||
960 | /* Enable interrupts */ | 978 | writel(0, ioaddr + XGMAC_DMA_INTR_ENA); |
961 | writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS); | ||
962 | writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA); | ||
963 | 979 | ||
964 | /* Mask power mgt interrupt */ | 980 | /* Mask power mgt interrupt */ |
965 | writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT); | 981 | writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT); |
@@ -1027,6 +1043,10 @@ static int xgmac_open(struct net_device *dev) | |||
1027 | napi_enable(&priv->napi); | 1043 | napi_enable(&priv->napi); |
1028 | netif_start_queue(dev); | 1044 | netif_start_queue(dev); |
1029 | 1045 | ||
1046 | /* Enable interrupts */ | ||
1047 | writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS); | ||
1048 | writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA); | ||
1049 | |||
1030 | return 0; | 1050 | return 0; |
1031 | } | 1051 | } |
1032 | 1052 | ||
@@ -1087,7 +1107,7 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1087 | paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE); | 1107 | paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE); |
1088 | if (dma_mapping_error(priv->device, paddr)) { | 1108 | if (dma_mapping_error(priv->device, paddr)) { |
1089 | dev_kfree_skb(skb); | 1109 | dev_kfree_skb(skb); |
1090 | return -EIO; | 1110 | return NETDEV_TX_OK; |
1091 | } | 1111 | } |
1092 | priv->tx_skbuff[entry] = skb; | 1112 | priv->tx_skbuff[entry] = skb; |
1093 | desc_set_buf_addr_and_size(desc, paddr, len); | 1113 | desc_set_buf_addr_and_size(desc, paddr, len); |
@@ -1099,14 +1119,12 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1099 | 1119 | ||
1100 | paddr = skb_frag_dma_map(priv->device, frag, 0, len, | 1120 | paddr = skb_frag_dma_map(priv->device, frag, 0, len, |
1101 | DMA_TO_DEVICE); | 1121 | DMA_TO_DEVICE); |
1102 | if (dma_mapping_error(priv->device, paddr)) { | 1122 | if (dma_mapping_error(priv->device, paddr)) |
1103 | dev_kfree_skb(skb); | 1123 | goto dma_err; |
1104 | return -EIO; | ||
1105 | } | ||
1106 | 1124 | ||
1107 | entry = dma_ring_incr(entry, DMA_TX_RING_SZ); | 1125 | entry = dma_ring_incr(entry, DMA_TX_RING_SZ); |
1108 | desc = priv->dma_tx + entry; | 1126 | desc = priv->dma_tx + entry; |
1109 | priv->tx_skbuff[entry] = NULL; | 1127 | priv->tx_skbuff[entry] = skb; |
1110 | 1128 | ||
1111 | desc_set_buf_addr_and_size(desc, paddr, len); | 1129 | desc_set_buf_addr_and_size(desc, paddr, len); |
1112 | if (i < (nfrags - 1)) | 1130 | if (i < (nfrags - 1)) |
@@ -1124,13 +1142,35 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1124 | wmb(); | 1142 | wmb(); |
1125 | desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG); | 1143 | desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG); |
1126 | 1144 | ||
1145 | writel(1, priv->base + XGMAC_DMA_TX_POLL); | ||
1146 | |||
1127 | priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ); | 1147 | priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ); |
1128 | 1148 | ||
1129 | writel(1, priv->base + XGMAC_DMA_TX_POLL); | 1149 | /* Ensure tx_head update is visible to tx completion */ |
1130 | if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) < | 1150 | smp_mb(); |
1131 | MAX_SKB_FRAGS) | 1151 | if (unlikely(tx_dma_ring_space(priv) <= MAX_SKB_FRAGS)) { |
1132 | netif_stop_queue(dev); | 1152 | netif_stop_queue(dev); |
1153 | /* Ensure netif_stop_queue is visible to tx completion */ | ||
1154 | smp_mb(); | ||
1155 | if (tx_dma_ring_space(priv) > MAX_SKB_FRAGS) | ||
1156 | netif_start_queue(dev); | ||
1157 | } | ||
1158 | return NETDEV_TX_OK; | ||
1133 | 1159 | ||
1160 | dma_err: | ||
1161 | entry = priv->tx_head; | ||
1162 | for ( ; i > 0; i--) { | ||
1163 | entry = dma_ring_incr(entry, DMA_TX_RING_SZ); | ||
1164 | desc = priv->dma_tx + entry; | ||
1165 | priv->tx_skbuff[entry] = NULL; | ||
1166 | dma_unmap_page(priv->device, desc_get_buf_addr(desc), | ||
1167 | desc_get_buf_len(desc), DMA_TO_DEVICE); | ||
1168 | desc_clear_tx_owner(desc); | ||
1169 | } | ||
1170 | desc = first; | ||
1171 | dma_unmap_single(priv->device, desc_get_buf_addr(desc), | ||
1172 | desc_get_buf_len(desc), DMA_TO_DEVICE); | ||
1173 | dev_kfree_skb(skb); | ||
1134 | return NETDEV_TX_OK; | 1174 | return NETDEV_TX_OK; |
1135 | } | 1175 | } |
1136 | 1176 | ||
@@ -1174,7 +1214,7 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit) | |||
1174 | 1214 | ||
1175 | skb_put(skb, frame_len); | 1215 | skb_put(skb, frame_len); |
1176 | dma_unmap_single(priv->device, desc_get_buf_addr(p), | 1216 | dma_unmap_single(priv->device, desc_get_buf_addr(p), |
1177 | frame_len, DMA_FROM_DEVICE); | 1217 | priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE); |
1178 | 1218 | ||
1179 | skb->protocol = eth_type_trans(skb, priv->dev); | 1219 | skb->protocol = eth_type_trans(skb, priv->dev); |
1180 | skb->ip_summed = ip_checksum; | 1220 | skb->ip_summed = ip_checksum; |
@@ -1225,9 +1265,7 @@ static int xgmac_poll(struct napi_struct *napi, int budget) | |||
1225 | static void xgmac_tx_timeout(struct net_device *dev) | 1265 | static void xgmac_tx_timeout(struct net_device *dev) |
1226 | { | 1266 | { |
1227 | struct xgmac_priv *priv = netdev_priv(dev); | 1267 | struct xgmac_priv *priv = netdev_priv(dev); |
1228 | 1268 | schedule_work(&priv->tx_timeout_work); | |
1229 | /* Clear Tx resources and restart transmitting again */ | ||
1230 | xgmac_tx_err(priv); | ||
1231 | } | 1269 | } |
1232 | 1270 | ||
1233 | /** | 1271 | /** |
@@ -1286,6 +1324,8 @@ static void xgmac_set_rx_mode(struct net_device *dev) | |||
1286 | if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) { | 1324 | if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) { |
1287 | use_hash = true; | 1325 | use_hash = true; |
1288 | value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF; | 1326 | value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF; |
1327 | } else { | ||
1328 | use_hash = false; | ||
1289 | } | 1329 | } |
1290 | netdev_for_each_mc_addr(ha, dev) { | 1330 | netdev_for_each_mc_addr(ha, dev) { |
1291 | if (use_hash) { | 1331 | if (use_hash) { |
@@ -1302,6 +1342,8 @@ static void xgmac_set_rx_mode(struct net_device *dev) | |||
1302 | } | 1342 | } |
1303 | 1343 | ||
1304 | out: | 1344 | out: |
1345 | for (i = reg; i < XGMAC_MAX_FILTER_ADDR; i++) | ||
1346 | xgmac_set_mac_addr(ioaddr, NULL, reg); | ||
1305 | for (i = 0; i < XGMAC_NUM_HASH; i++) | 1347 | for (i = 0; i < XGMAC_NUM_HASH; i++) |
1306 | writel(hash_filter[i], ioaddr + XGMAC_HASH(i)); | 1348 | writel(hash_filter[i], ioaddr + XGMAC_HASH(i)); |
1307 | 1349 | ||
@@ -1366,7 +1408,6 @@ static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id) | |||
1366 | static irqreturn_t xgmac_interrupt(int irq, void *dev_id) | 1408 | static irqreturn_t xgmac_interrupt(int irq, void *dev_id) |
1367 | { | 1409 | { |
1368 | u32 intr_status; | 1410 | u32 intr_status; |
1369 | bool tx_err = false; | ||
1370 | struct net_device *dev = (struct net_device *)dev_id; | 1411 | struct net_device *dev = (struct net_device *)dev_id; |
1371 | struct xgmac_priv *priv = netdev_priv(dev); | 1412 | struct xgmac_priv *priv = netdev_priv(dev); |
1372 | struct xgmac_extra_stats *x = &priv->xstats; | 1413 | struct xgmac_extra_stats *x = &priv->xstats; |
@@ -1396,16 +1437,12 @@ static irqreturn_t xgmac_interrupt(int irq, void *dev_id) | |||
1396 | if (intr_status & DMA_STATUS_TPS) { | 1437 | if (intr_status & DMA_STATUS_TPS) { |
1397 | netdev_err(priv->dev, "transmit process stopped\n"); | 1438 | netdev_err(priv->dev, "transmit process stopped\n"); |
1398 | x->tx_process_stopped++; | 1439 | x->tx_process_stopped++; |
1399 | tx_err = true; | 1440 | schedule_work(&priv->tx_timeout_work); |
1400 | } | 1441 | } |
1401 | if (intr_status & DMA_STATUS_FBI) { | 1442 | if (intr_status & DMA_STATUS_FBI) { |
1402 | netdev_err(priv->dev, "fatal bus error\n"); | 1443 | netdev_err(priv->dev, "fatal bus error\n"); |
1403 | x->fatal_bus_error++; | 1444 | x->fatal_bus_error++; |
1404 | tx_err = true; | ||
1405 | } | 1445 | } |
1406 | |||
1407 | if (tx_err) | ||
1408 | xgmac_tx_err(priv); | ||
1409 | } | 1446 | } |
1410 | 1447 | ||
1411 | /* TX/RX NORMAL interrupts */ | 1448 | /* TX/RX NORMAL interrupts */ |
@@ -1569,7 +1606,6 @@ static const struct xgmac_stats xgmac_gstrings_stats[] = { | |||
1569 | XGMAC_STAT(rx_payload_error), | 1606 | XGMAC_STAT(rx_payload_error), |
1570 | XGMAC_STAT(rx_ip_header_error), | 1607 | XGMAC_STAT(rx_ip_header_error), |
1571 | XGMAC_STAT(rx_da_filter_fail), | 1608 | XGMAC_STAT(rx_da_filter_fail), |
1572 | XGMAC_STAT(rx_sa_filter_fail), | ||
1573 | XGMAC_STAT(fatal_bus_error), | 1609 | XGMAC_STAT(fatal_bus_error), |
1574 | XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG), | 1610 | XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG), |
1575 | XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME), | 1611 | XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME), |
@@ -1708,6 +1744,7 @@ static int xgmac_probe(struct platform_device *pdev) | |||
1708 | ndev->netdev_ops = &xgmac_netdev_ops; | 1744 | ndev->netdev_ops = &xgmac_netdev_ops; |
1709 | SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops); | 1745 | SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops); |
1710 | spin_lock_init(&priv->stats_lock); | 1746 | spin_lock_init(&priv->stats_lock); |
1747 | INIT_WORK(&priv->tx_timeout_work, xgmac_tx_timeout_work); | ||
1711 | 1748 | ||
1712 | priv->device = &pdev->dev; | 1749 | priv->device = &pdev->dev; |
1713 | priv->dev = ndev; | 1750 | priv->dev = ndev; |
@@ -1759,7 +1796,7 @@ static int xgmac_probe(struct platform_device *pdev) | |||
1759 | if (device_can_wakeup(priv->device)) | 1796 | if (device_can_wakeup(priv->device)) |
1760 | priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ | 1797 | priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ |
1761 | 1798 | ||
1762 | ndev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA; | 1799 | ndev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA; |
1763 | if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL) | 1800 | if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL) |
1764 | ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | 1801 | ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
1765 | NETIF_F_RXCSUM; | 1802 | NETIF_F_RXCSUM; |