aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorClaudiu Manoil <claudiu.manoil@freescale.com>2015-07-13 09:22:04 -0400
committerDavid S. Miller <davem@davemloft.net>2015-07-15 20:13:24 -0400
commitf966082e2065d223942cc40e0bc4841f84f0604d (patch)
treed747fd4bdc788e3dbd482b7697dc31ff7ae62df7
parent76f31e8b0911e620ac9191c8d3775cc91ed65c4c (diff)
gianfar: Fix and cleanup rxbd status handling
There are several (long standing) problems about how the status field of the rx buffer descriptor (rxbd) is currently handled on the error path: - too many unnecessary 16bit reads of the two halves of the rxbd status field (32bit), also resulting in overuse of endianness convesion macros; - "bdp->status = RXBD_LARGE" makes no sense, since the "large" flag is read only (only eTSEC can write it), and trying to clear the other status bits is also error prone in this context (most of the rx status bits are read only anyway). This is fixed with a single 32bit read of the "status" field, and then the appropriate 16bit shifting is applied to access the various status bits or the rx frame length. Also corrected the use of the RXBD_LARGE flag. Additional fix: "rx_over_errors" stat is incremented instead of "rx_crc_errors" in case of RXBD_OVERRUN occurrence. Signed-off-by: Claudiu Manoil <claudiu.manoil@freescale.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c34
1 files changed, 18 insertions, 16 deletions
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index b35bf3de44e0..c839e7628181 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2756,14 +2756,14 @@ static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
2756 rx_queue->next_to_use = i; 2756 rx_queue->next_to_use = i;
2757} 2757}
2758 2758
2759static inline void count_errors(unsigned short status, struct net_device *dev) 2759static void count_errors(u32 lstatus, struct net_device *dev)
2760{ 2760{
2761 struct gfar_private *priv = netdev_priv(dev); 2761 struct gfar_private *priv = netdev_priv(dev);
2762 struct net_device_stats *stats = &dev->stats; 2762 struct net_device_stats *stats = &dev->stats;
2763 struct gfar_extra_stats *estats = &priv->extra_stats; 2763 struct gfar_extra_stats *estats = &priv->extra_stats;
2764 2764
2765 /* If the packet was truncated, none of the other errors matter */ 2765 /* If the packet was truncated, none of the other errors matter */
2766 if (status & RXBD_TRUNCATED) { 2766 if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) {
2767 stats->rx_length_errors++; 2767 stats->rx_length_errors++;
2768 2768
2769 atomic64_inc(&estats->rx_trunc); 2769 atomic64_inc(&estats->rx_trunc);
@@ -2771,25 +2771,25 @@ static inline void count_errors(unsigned short status, struct net_device *dev)
2771 return; 2771 return;
2772 } 2772 }
2773 /* Count the errors, if there were any */ 2773 /* Count the errors, if there were any */
2774 if (status & (RXBD_LARGE | RXBD_SHORT)) { 2774 if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) {
2775 stats->rx_length_errors++; 2775 stats->rx_length_errors++;
2776 2776
2777 if (status & RXBD_LARGE) 2777 if (lstatus & BD_LFLAG(RXBD_LARGE))
2778 atomic64_inc(&estats->rx_large); 2778 atomic64_inc(&estats->rx_large);
2779 else 2779 else
2780 atomic64_inc(&estats->rx_short); 2780 atomic64_inc(&estats->rx_short);
2781 } 2781 }
2782 if (status & RXBD_NONOCTET) { 2782 if (lstatus & BD_LFLAG(RXBD_NONOCTET)) {
2783 stats->rx_frame_errors++; 2783 stats->rx_frame_errors++;
2784 atomic64_inc(&estats->rx_nonoctet); 2784 atomic64_inc(&estats->rx_nonoctet);
2785 } 2785 }
2786 if (status & RXBD_CRCERR) { 2786 if (lstatus & BD_LFLAG(RXBD_CRCERR)) {
2787 atomic64_inc(&estats->rx_crcerr); 2787 atomic64_inc(&estats->rx_crcerr);
2788 stats->rx_crc_errors++; 2788 stats->rx_crc_errors++;
2789 } 2789 }
2790 if (status & RXBD_OVERRUN) { 2790 if (lstatus & BD_LFLAG(RXBD_OVERRUN)) {
2791 atomic64_inc(&estats->rx_overrun); 2791 atomic64_inc(&estats->rx_overrun);
2792 stats->rx_crc_errors++; 2792 stats->rx_over_errors++;
2793 } 2793 }
2794} 2794}
2795 2795
@@ -2921,6 +2921,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2921 i = rx_queue->next_to_clean; 2921 i = rx_queue->next_to_clean;
2922 2922
2923 while (rx_work_limit--) { 2923 while (rx_work_limit--) {
2924 u32 lstatus;
2924 2925
2925 if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) { 2926 if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) {
2926 gfar_alloc_rx_buffs(rx_queue, cleaned_cnt); 2927 gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
@@ -2928,7 +2929,8 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2928 } 2929 }
2929 2930
2930 bdp = &rx_queue->rx_bd_base[i]; 2931 bdp = &rx_queue->rx_bd_base[i];
2931 if (be16_to_cpu(bdp->status) & RXBD_EMPTY) 2932 lstatus = be32_to_cpu(bdp->lstatus);
2933 if (lstatus & BD_LFLAG(RXBD_EMPTY))
2932 break; 2934 break;
2933 2935
2934 /* order rx buffer descriptor reads */ 2936 /* order rx buffer descriptor reads */
@@ -2940,13 +2942,13 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2940 dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr), 2942 dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
2941 priv->rx_buffer_size, DMA_FROM_DEVICE); 2943 priv->rx_buffer_size, DMA_FROM_DEVICE);
2942 2944
2943 if (unlikely(!(be16_to_cpu(bdp->status) & RXBD_ERR) && 2945 if (unlikely(!(lstatus & BD_LFLAG(RXBD_ERR)) &&
2944 be16_to_cpu(bdp->length) > priv->rx_buffer_size)) 2946 (lstatus & BD_LENGTH_MASK) > priv->rx_buffer_size))
2945 bdp->status = cpu_to_be16(RXBD_LARGE); 2947 lstatus |= BD_LFLAG(RXBD_LARGE);
2946 2948
2947 if (unlikely(!(be16_to_cpu(bdp->status) & RXBD_LAST) || 2949 if (unlikely(!(lstatus & BD_LFLAG(RXBD_LAST)) ||
2948 be16_to_cpu(bdp->status) & RXBD_ERR)) { 2950 (lstatus & BD_LFLAG(RXBD_ERR)))) {
2949 count_errors(be16_to_cpu(bdp->status), dev); 2951 count_errors(lstatus, dev);
2950 2952
2951 /* discard faulty buffer */ 2953 /* discard faulty buffer */
2952 dev_kfree_skb(skb); 2954 dev_kfree_skb(skb);
@@ -2957,7 +2959,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2957 howmany++; 2959 howmany++;
2958 2960
2959 if (likely(skb)) { 2961 if (likely(skb)) {
2960 int pkt_len = be16_to_cpu(bdp->length) - 2962 int pkt_len = (lstatus & BD_LENGTH_MASK) -
2961 ETH_FCS_LEN; 2963 ETH_FCS_LEN;
2962 /* Remove the FCS from the packet length */ 2964 /* Remove the FCS from the packet length */
2963 skb_put(skb, pkt_len); 2965 skb_put(skb, pkt_len);