aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKevin Hao <haokexin@gmail.com>2014-12-11 01:08:41 -0500
committerDavid S. Miller <davem@davemloft.net>2014-12-11 14:27:14 -0500
commit0a4b5a2488347a53ad695cf6a87e4b8fbede9eaa (patch)
treeb6480cbc3aef12d0905490a0fc91ef64011b594a
parent666224d4d582661e3c7d321c23e8c69e4068ee55 (diff)
gianfar: Fix dma check map error when DMA_API_DEBUG is enabled
We need to use dma_mapping_error() to check the dma address returned by dma_map_single/page(). Otherwise we would get warning like this: WARNING: at lib/dma-debug.c:1140 Modules linked in: CPU: 0 PID: 0 Comm: swapper/0 Not tainted 3.18.0-rc2-next-20141029 #196 task: c0834300 ti: effe6000 task.ti: c0874000 NIP: c02b2c98 LR: c02b2c98 CTR: c030abc4 REGS: effe7d70 TRAP: 0700 Not tainted (3.18.0-rc2-next-20141029) MSR: 00021000 <CE,ME> CR: 22044022 XER: 20000000 GPR00: c02b2c98 effe7e20 c0834300 00000098 00021000 00000000 c030b898 00000003 GPR08: 00000001 00000000 00000001 749eec9d 22044022 1001abe0 00000020 ef278678 GPR16: ef278670 ef278668 ef278660 070a8040 c087f99c c08cdc60 00029000 c0840d44 GPR24: c08be6e8 c0840000 effe7e78 ef041340 00000600 ef114e10 00000000 c08be6e0 NIP [c02b2c98] check_unmap+0x51c/0x9e4 LR [c02b2c98] check_unmap+0x51c/0x9e4 Call Trace: [effe7e20] [c02b2c98] check_unmap+0x51c/0x9e4 (unreliable) [effe7e70] [c02b31d8] debug_dma_unmap_page+0x78/0x8c [effe7ed0] [c03d1640] gfar_clean_rx_ring+0x208/0x488 [effe7f40] [c03d1a9c] gfar_poll_rx_sq+0x3c/0xa8 [effe7f60] [c04f8714] net_rx_action+0xc0/0x178 [effe7f90] [c00435a0] __do_softirq+0x100/0x1fc [effe7fe0] [c0043958] irq_exit+0xa4/0xc8 [effe7ff0] [c000d14c] call_do_irq+0x24/0x3c [c0875e90] [c00048a0] do_IRQ+0x8c/0xf8 [c0875eb0] [c000ed10] ret_from_except+0x0/0x18 For TX, we need to unmap the pages which has already been mapped and free the skb before return. For RX, move the dma mapping and error check to gfar_new_skb(). We would reuse the original skb in the rx ring when either allocating skb failure or dma mapping error. Signed-off-by: Kevin Hao <haokexin@gmail.com> Signed-off-by: Claudiu Manoil <claudiu.manoil@freescale.com> Reviewed-by: Claudiu Manoil <claudiu.manoil@freescale.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c84
1 files changed, 56 insertions, 28 deletions
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 86dccb26fecc..7402ab12e46b 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -116,9 +116,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
116static void gfar_reset_task(struct work_struct *work); 116static void gfar_reset_task(struct work_struct *work);
117static void gfar_timeout(struct net_device *dev); 117static void gfar_timeout(struct net_device *dev);
118static int gfar_close(struct net_device *dev); 118static int gfar_close(struct net_device *dev);
119struct sk_buff *gfar_new_skb(struct net_device *dev); 119struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr);
120static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
121 struct sk_buff *skb);
122static int gfar_set_mac_address(struct net_device *dev); 120static int gfar_set_mac_address(struct net_device *dev);
123static int gfar_change_mtu(struct net_device *dev, int new_mtu); 121static int gfar_change_mtu(struct net_device *dev, int new_mtu);
124static irqreturn_t gfar_error(int irq, void *dev_id); 122static irqreturn_t gfar_error(int irq, void *dev_id);
@@ -180,6 +178,7 @@ static int gfar_init_bds(struct net_device *ndev)
180 struct rxbd8 *rxbdp; 178 struct rxbd8 *rxbdp;
181 u32 *rfbptr; 179 u32 *rfbptr;
182 int i, j; 180 int i, j;
181 dma_addr_t bufaddr;
183 182
184 for (i = 0; i < priv->num_tx_queues; i++) { 183 for (i = 0; i < priv->num_tx_queues; i++) {
185 tx_queue = priv->tx_queue[i]; 184 tx_queue = priv->tx_queue[i];
@@ -214,19 +213,17 @@ static int gfar_init_bds(struct net_device *ndev)
214 struct sk_buff *skb = rx_queue->rx_skbuff[j]; 213 struct sk_buff *skb = rx_queue->rx_skbuff[j];
215 214
216 if (skb) { 215 if (skb) {
217 gfar_init_rxbdp(rx_queue, rxbdp, 216 bufaddr = rxbdp->bufPtr;
218 rxbdp->bufPtr);
219 } else { 217 } else {
220 skb = gfar_new_skb(ndev); 218 skb = gfar_new_skb(ndev, &bufaddr);
221 if (!skb) { 219 if (!skb) {
222 netdev_err(ndev, "Can't allocate RX buffers\n"); 220 netdev_err(ndev, "Can't allocate RX buffers\n");
223 return -ENOMEM; 221 return -ENOMEM;
224 } 222 }
225 rx_queue->rx_skbuff[j] = skb; 223 rx_queue->rx_skbuff[j] = skb;
226
227 gfar_new_rxbdp(rx_queue, rxbdp, skb);
228 } 224 }
229 225
226 gfar_init_rxbdp(rx_queue, rxbdp, bufaddr);
230 rxbdp++; 227 rxbdp++;
231 } 228 }
232 229
@@ -2319,6 +2316,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2319 0, 2316 0,
2320 frag_len, 2317 frag_len,
2321 DMA_TO_DEVICE); 2318 DMA_TO_DEVICE);
2319 if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
2320 goto dma_map_err;
2322 2321
2323 /* set the TxBD length and buffer pointer */ 2322 /* set the TxBD length and buffer pointer */
2324 txbdp->bufPtr = bufaddr; 2323 txbdp->bufPtr = bufaddr;
@@ -2368,8 +2367,12 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2368 fcb->ptp = 1; 2367 fcb->ptp = 1;
2369 } 2368 }
2370 2369
2371 txbdp_start->bufPtr = dma_map_single(priv->dev, skb->data, 2370 bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
2372 skb_headlen(skb), DMA_TO_DEVICE); 2371 DMA_TO_DEVICE);
2372 if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
2373 goto dma_map_err;
2374
2375 txbdp_start->bufPtr = bufaddr;
2373 2376
2374 /* If time stamping is requested one additional TxBD must be set up. The 2377 /* If time stamping is requested one additional TxBD must be set up. The
2375 * first TxBD points to the FCB and must have a data length of 2378 * first TxBD points to the FCB and must have a data length of
@@ -2435,6 +2438,25 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2435 spin_unlock_irqrestore(&tx_queue->txlock, flags); 2438 spin_unlock_irqrestore(&tx_queue->txlock, flags);
2436 2439
2437 return NETDEV_TX_OK; 2440 return NETDEV_TX_OK;
2441
2442dma_map_err:
2443 txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size);
2444 if (do_tstamp)
2445 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2446 for (i = 0; i < nr_frags; i++) {
2447 lstatus = txbdp->lstatus;
2448 if (!(lstatus & BD_LFLAG(TXBD_READY)))
2449 break;
2450
2451 txbdp->lstatus = lstatus & ~BD_LFLAG(TXBD_READY);
2452 bufaddr = txbdp->bufPtr;
2453 dma_unmap_page(priv->dev, bufaddr, txbdp->length,
2454 DMA_TO_DEVICE);
2455 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2456 }
2457 gfar_wmb();
2458 dev_kfree_skb_any(skb);
2459 return NETDEV_TX_OK;
2438} 2460}
2439 2461
2440/* Stops the kernel queue, and halts the controller */ 2462/* Stops the kernel queue, and halts the controller */
@@ -2635,18 +2657,6 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2635 netdev_tx_completed_queue(txq, howmany, bytes_sent); 2657 netdev_tx_completed_queue(txq, howmany, bytes_sent);
2636} 2658}
2637 2659
2638static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
2639 struct sk_buff *skb)
2640{
2641 struct net_device *dev = rx_queue->dev;
2642 struct gfar_private *priv = netdev_priv(dev);
2643 dma_addr_t buf;
2644
2645 buf = dma_map_single(priv->dev, skb->data,
2646 priv->rx_buffer_size, DMA_FROM_DEVICE);
2647 gfar_init_rxbdp(rx_queue, bdp, buf);
2648}
2649
2650static struct sk_buff *gfar_alloc_skb(struct net_device *dev) 2660static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
2651{ 2661{
2652 struct gfar_private *priv = netdev_priv(dev); 2662 struct gfar_private *priv = netdev_priv(dev);
@@ -2661,9 +2671,25 @@ static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
2661 return skb; 2671 return skb;
2662} 2672}
2663 2673
2664struct sk_buff *gfar_new_skb(struct net_device *dev) 2674struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr)
2665{ 2675{
2666 return gfar_alloc_skb(dev); 2676 struct gfar_private *priv = netdev_priv(dev);
2677 struct sk_buff *skb;
2678 dma_addr_t addr;
2679
2680 skb = gfar_alloc_skb(dev);
2681 if (!skb)
2682 return NULL;
2683
2684 addr = dma_map_single(priv->dev, skb->data,
2685 priv->rx_buffer_size, DMA_FROM_DEVICE);
2686 if (unlikely(dma_mapping_error(priv->dev, addr))) {
2687 dev_kfree_skb_any(skb);
2688 return NULL;
2689 }
2690
2691 *bufaddr = addr;
2692 return skb;
2667} 2693}
2668 2694
2669static inline void count_errors(unsigned short status, struct net_device *dev) 2695static inline void count_errors(unsigned short status, struct net_device *dev)
@@ -2834,11 +2860,12 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2834 2860
2835 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { 2861 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
2836 struct sk_buff *newskb; 2862 struct sk_buff *newskb;
2863 dma_addr_t bufaddr;
2837 2864
2838 rmb(); 2865 rmb();
2839 2866
2840 /* Add another skb for the future */ 2867 /* Add another skb for the future */
2841 newskb = gfar_new_skb(dev); 2868 newskb = gfar_new_skb(dev, &bufaddr);
2842 2869
2843 skb = rx_queue->rx_skbuff[rx_queue->skb_currx]; 2870 skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
2844 2871
@@ -2854,9 +2881,10 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2854 bdp->status & RXBD_ERR)) { 2881 bdp->status & RXBD_ERR)) {
2855 count_errors(bdp->status, dev); 2882 count_errors(bdp->status, dev);
2856 2883
2857 if (unlikely(!newskb)) 2884 if (unlikely(!newskb)) {
2858 newskb = skb; 2885 newskb = skb;
2859 else if (skb) 2886 bufaddr = bdp->bufPtr;
2887 } else if (skb)
2860 dev_kfree_skb(skb); 2888 dev_kfree_skb(skb);
2861 } else { 2889 } else {
2862 /* Increment the number of packets */ 2890 /* Increment the number of packets */
@@ -2883,7 +2911,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2883 rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb; 2911 rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
2884 2912
2885 /* Setup the new bdp */ 2913 /* Setup the new bdp */
2886 gfar_new_rxbdp(rx_queue, bdp, newskb); 2914 gfar_init_rxbdp(rx_queue, bdp, bufaddr);
2887 2915
2888 /* Update Last Free RxBD pointer for LFC */ 2916 /* Update Last Free RxBD pointer for LFC */
2889 if (unlikely(rx_queue->rfbptr && priv->tx_actual_en)) 2917 if (unlikely(rx_queue->rfbptr && priv->tx_actual_en))