diff options
| -rw-r--r-- | drivers/net/ethernet/freescale/gianfar.c | 84 |
1 files changed, 56 insertions, 28 deletions
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 86dccb26fecc..7402ab12e46b 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c | |||
| @@ -116,9 +116,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); | |||
| 116 | static void gfar_reset_task(struct work_struct *work); | 116 | static void gfar_reset_task(struct work_struct *work); |
| 117 | static void gfar_timeout(struct net_device *dev); | 117 | static void gfar_timeout(struct net_device *dev); |
| 118 | static int gfar_close(struct net_device *dev); | 118 | static int gfar_close(struct net_device *dev); |
| 119 | struct sk_buff *gfar_new_skb(struct net_device *dev); | 119 | struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr); |
| 120 | static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, | ||
| 121 | struct sk_buff *skb); | ||
| 122 | static int gfar_set_mac_address(struct net_device *dev); | 120 | static int gfar_set_mac_address(struct net_device *dev); |
| 123 | static int gfar_change_mtu(struct net_device *dev, int new_mtu); | 121 | static int gfar_change_mtu(struct net_device *dev, int new_mtu); |
| 124 | static irqreturn_t gfar_error(int irq, void *dev_id); | 122 | static irqreturn_t gfar_error(int irq, void *dev_id); |
| @@ -180,6 +178,7 @@ static int gfar_init_bds(struct net_device *ndev) | |||
| 180 | struct rxbd8 *rxbdp; | 178 | struct rxbd8 *rxbdp; |
| 181 | u32 *rfbptr; | 179 | u32 *rfbptr; |
| 182 | int i, j; | 180 | int i, j; |
| 181 | dma_addr_t bufaddr; | ||
| 183 | 182 | ||
| 184 | for (i = 0; i < priv->num_tx_queues; i++) { | 183 | for (i = 0; i < priv->num_tx_queues; i++) { |
| 185 | tx_queue = priv->tx_queue[i]; | 184 | tx_queue = priv->tx_queue[i]; |
| @@ -214,19 +213,17 @@ static int gfar_init_bds(struct net_device *ndev) | |||
| 214 | struct sk_buff *skb = rx_queue->rx_skbuff[j]; | 213 | struct sk_buff *skb = rx_queue->rx_skbuff[j]; |
| 215 | 214 | ||
| 216 | if (skb) { | 215 | if (skb) { |
| 217 | gfar_init_rxbdp(rx_queue, rxbdp, | 216 | bufaddr = rxbdp->bufPtr; |
| 218 | rxbdp->bufPtr); | ||
| 219 | } else { | 217 | } else { |
| 220 | skb = gfar_new_skb(ndev); | 218 | skb = gfar_new_skb(ndev, &bufaddr); |
| 221 | if (!skb) { | 219 | if (!skb) { |
| 222 | netdev_err(ndev, "Can't allocate RX buffers\n"); | 220 | netdev_err(ndev, "Can't allocate RX buffers\n"); |
| 223 | return -ENOMEM; | 221 | return -ENOMEM; |
| 224 | } | 222 | } |
| 225 | rx_queue->rx_skbuff[j] = skb; | 223 | rx_queue->rx_skbuff[j] = skb; |
| 226 | |||
| 227 | gfar_new_rxbdp(rx_queue, rxbdp, skb); | ||
| 228 | } | 224 | } |
| 229 | 225 | ||
| 226 | gfar_init_rxbdp(rx_queue, rxbdp, bufaddr); | ||
| 230 | rxbdp++; | 227 | rxbdp++; |
| 231 | } | 228 | } |
| 232 | 229 | ||
| @@ -2319,6 +2316,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 2319 | 0, | 2316 | 0, |
| 2320 | frag_len, | 2317 | frag_len, |
| 2321 | DMA_TO_DEVICE); | 2318 | DMA_TO_DEVICE); |
| 2319 | if (unlikely(dma_mapping_error(priv->dev, bufaddr))) | ||
| 2320 | goto dma_map_err; | ||
| 2322 | 2321 | ||
| 2323 | /* set the TxBD length and buffer pointer */ | 2322 | /* set the TxBD length and buffer pointer */ |
| 2324 | txbdp->bufPtr = bufaddr; | 2323 | txbdp->bufPtr = bufaddr; |
| @@ -2368,8 +2367,12 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 2368 | fcb->ptp = 1; | 2367 | fcb->ptp = 1; |
| 2369 | } | 2368 | } |
| 2370 | 2369 | ||
| 2371 | txbdp_start->bufPtr = dma_map_single(priv->dev, skb->data, | 2370 | bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb), |
| 2372 | skb_headlen(skb), DMA_TO_DEVICE); | 2371 | DMA_TO_DEVICE); |
| 2372 | if (unlikely(dma_mapping_error(priv->dev, bufaddr))) | ||
| 2373 | goto dma_map_err; | ||
| 2374 | |||
| 2375 | txbdp_start->bufPtr = bufaddr; | ||
| 2373 | 2376 | ||
| 2374 | /* If time stamping is requested one additional TxBD must be set up. The | 2377 | /* If time stamping is requested one additional TxBD must be set up. The |
| 2375 | * first TxBD points to the FCB and must have a data length of | 2378 | * first TxBD points to the FCB and must have a data length of |
| @@ -2435,6 +2438,25 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 2435 | spin_unlock_irqrestore(&tx_queue->txlock, flags); | 2438 | spin_unlock_irqrestore(&tx_queue->txlock, flags); |
| 2436 | 2439 | ||
| 2437 | return NETDEV_TX_OK; | 2440 | return NETDEV_TX_OK; |
| 2441 | |||
| 2442 | dma_map_err: | ||
| 2443 | txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size); | ||
| 2444 | if (do_tstamp) | ||
| 2445 | txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); | ||
| 2446 | for (i = 0; i < nr_frags; i++) { | ||
| 2447 | lstatus = txbdp->lstatus; | ||
| 2448 | if (!(lstatus & BD_LFLAG(TXBD_READY))) | ||
| 2449 | break; | ||
| 2450 | |||
| 2451 | txbdp->lstatus = lstatus & ~BD_LFLAG(TXBD_READY); | ||
| 2452 | bufaddr = txbdp->bufPtr; | ||
| 2453 | dma_unmap_page(priv->dev, bufaddr, txbdp->length, | ||
| 2454 | DMA_TO_DEVICE); | ||
| 2455 | txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); | ||
| 2456 | } | ||
| 2457 | gfar_wmb(); | ||
| 2458 | dev_kfree_skb_any(skb); | ||
| 2459 | return NETDEV_TX_OK; | ||
| 2438 | } | 2460 | } |
| 2439 | 2461 | ||
| 2440 | /* Stops the kernel queue, and halts the controller */ | 2462 | /* Stops the kernel queue, and halts the controller */ |
| @@ -2635,18 +2657,6 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) | |||
| 2635 | netdev_tx_completed_queue(txq, howmany, bytes_sent); | 2657 | netdev_tx_completed_queue(txq, howmany, bytes_sent); |
| 2636 | } | 2658 | } |
| 2637 | 2659 | ||
| 2638 | static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, | ||
| 2639 | struct sk_buff *skb) | ||
| 2640 | { | ||
| 2641 | struct net_device *dev = rx_queue->dev; | ||
| 2642 | struct gfar_private *priv = netdev_priv(dev); | ||
| 2643 | dma_addr_t buf; | ||
| 2644 | |||
| 2645 | buf = dma_map_single(priv->dev, skb->data, | ||
| 2646 | priv->rx_buffer_size, DMA_FROM_DEVICE); | ||
| 2647 | gfar_init_rxbdp(rx_queue, bdp, buf); | ||
| 2648 | } | ||
| 2649 | |||
| 2650 | static struct sk_buff *gfar_alloc_skb(struct net_device *dev) | 2660 | static struct sk_buff *gfar_alloc_skb(struct net_device *dev) |
| 2651 | { | 2661 | { |
| 2652 | struct gfar_private *priv = netdev_priv(dev); | 2662 | struct gfar_private *priv = netdev_priv(dev); |
| @@ -2661,9 +2671,25 @@ static struct sk_buff *gfar_alloc_skb(struct net_device *dev) | |||
| 2661 | return skb; | 2671 | return skb; |
| 2662 | } | 2672 | } |
| 2663 | 2673 | ||
| 2664 | struct sk_buff *gfar_new_skb(struct net_device *dev) | 2674 | struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr) |
| 2665 | { | 2675 | { |
| 2666 | return gfar_alloc_skb(dev); | 2676 | struct gfar_private *priv = netdev_priv(dev); |
| 2677 | struct sk_buff *skb; | ||
| 2678 | dma_addr_t addr; | ||
| 2679 | |||
| 2680 | skb = gfar_alloc_skb(dev); | ||
| 2681 | if (!skb) | ||
| 2682 | return NULL; | ||
| 2683 | |||
| 2684 | addr = dma_map_single(priv->dev, skb->data, | ||
| 2685 | priv->rx_buffer_size, DMA_FROM_DEVICE); | ||
| 2686 | if (unlikely(dma_mapping_error(priv->dev, addr))) { | ||
| 2687 | dev_kfree_skb_any(skb); | ||
| 2688 | return NULL; | ||
| 2689 | } | ||
| 2690 | |||
| 2691 | *bufaddr = addr; | ||
| 2692 | return skb; | ||
| 2667 | } | 2693 | } |
| 2668 | 2694 | ||
| 2669 | static inline void count_errors(unsigned short status, struct net_device *dev) | 2695 | static inline void count_errors(unsigned short status, struct net_device *dev) |
| @@ -2834,11 +2860,12 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) | |||
| 2834 | 2860 | ||
| 2835 | while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { | 2861 | while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { |
| 2836 | struct sk_buff *newskb; | 2862 | struct sk_buff *newskb; |
| 2863 | dma_addr_t bufaddr; | ||
| 2837 | 2864 | ||
| 2838 | rmb(); | 2865 | rmb(); |
| 2839 | 2866 | ||
| 2840 | /* Add another skb for the future */ | 2867 | /* Add another skb for the future */ |
| 2841 | newskb = gfar_new_skb(dev); | 2868 | newskb = gfar_new_skb(dev, &bufaddr); |
| 2842 | 2869 | ||
| 2843 | skb = rx_queue->rx_skbuff[rx_queue->skb_currx]; | 2870 | skb = rx_queue->rx_skbuff[rx_queue->skb_currx]; |
| 2844 | 2871 | ||
| @@ -2854,9 +2881,10 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) | |||
| 2854 | bdp->status & RXBD_ERR)) { | 2881 | bdp->status & RXBD_ERR)) { |
| 2855 | count_errors(bdp->status, dev); | 2882 | count_errors(bdp->status, dev); |
| 2856 | 2883 | ||
| 2857 | if (unlikely(!newskb)) | 2884 | if (unlikely(!newskb)) { |
| 2858 | newskb = skb; | 2885 | newskb = skb; |
| 2859 | else if (skb) | 2886 | bufaddr = bdp->bufPtr; |
| 2887 | } else if (skb) | ||
| 2860 | dev_kfree_skb(skb); | 2888 | dev_kfree_skb(skb); |
| 2861 | } else { | 2889 | } else { |
| 2862 | /* Increment the number of packets */ | 2890 | /* Increment the number of packets */ |
| @@ -2883,7 +2911,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) | |||
| 2883 | rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb; | 2911 | rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb; |
| 2884 | 2912 | ||
| 2885 | /* Setup the new bdp */ | 2913 | /* Setup the new bdp */ |
| 2886 | gfar_new_rxbdp(rx_queue, bdp, newskb); | 2914 | gfar_init_rxbdp(rx_queue, bdp, bufaddr); |
| 2887 | 2915 | ||
| 2888 | /* Update Last Free RxBD pointer for LFC */ | 2916 | /* Update Last Free RxBD pointer for LFC */ |
| 2889 | if (unlikely(rx_queue->rfbptr && priv->tx_actual_en)) | 2917 | if (unlikely(rx_queue->rfbptr && priv->tx_actual_en)) |
