aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFugang Duan <b38611@freescale.com>2014-06-04 04:58:18 -0400
committerFugang Duan <b38611@freescale.com>2014-06-13 01:59:27 -0400
commit67ed1a863390f2277e25aac8768580ed202dede4 (patch)
tree4ded9bf401d298dbb561efa507fb05913d269534
parent9d1bb4f767a25dbcc35d5c7aa7b92a364566eb53 (diff)
net: fec: Factorize the .xmit transmit function
Make the code more readable and easy to support other features like SG, TSO, moving the common transmit function to one api. And the patch also factorize the getting BD index to it own function. CC: David Laight <David.Laight@ACULAB.COM> Signed-off-by: Fugang Duan <B38611@freescale.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c102
1 files changed, 65 insertions, 37 deletions
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 63b4244df33b..fe5966b24374 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -272,6 +272,28 @@ struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp,
272 return (new_bd < base) ? (new_bd + ring_size) : new_bd; 272 return (new_bd < base) ? (new_bd + ring_size) : new_bd;
273} 273}
274 274
275static inline
276int fec_enet_get_bd_index(struct bufdesc *bdp,
277 struct fec_enet_private *fep, int queue_id)
278{
279 struct fec_enet_priv_tx_q *tx_queue = fep->tx_queue[queue_id];
280 struct fec_enet_priv_rx_q *rx_queue = fep->rx_queue[queue_id];
281 struct bufdesc *base;
282 int index;
283
284 if (bdp >= tx_queue->tx_bd_base)
285 base = tx_queue->tx_bd_base;
286 else
287 base = rx_queue->rx_bd_base;
288
289 if (fep->bufdesc_ex)
290 index = (struct bufdesc_ex *)bdp - (struct bufdesc_ex *)base;
291 else
292 index = bdp - base;
293
294 return index;
295}
296
275static void *swap_buffer(void *bufaddr, int len) 297static void *swap_buffer(void *bufaddr, int len)
276{ 298{
277 int i; 299 int i;
@@ -298,15 +320,13 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
298 return 0; 320 return 0;
299} 321}
300 322
301static netdev_tx_t 323static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
302fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) 324 struct sk_buff *skb, struct net_device *ndev)
303{ 325{
304 struct fec_enet_private *fep = netdev_priv(ndev); 326 struct fec_enet_private *fep = netdev_priv(ndev);
305 const struct platform_device_id *id_entry = 327 const struct platform_device_id *id_entry =
306 platform_get_device_id(fep->pdev); 328 platform_get_device_id(fep->pdev);
307 struct bufdesc *bdp, *bdp_pre; 329 struct bufdesc *bdp, *bdp_pre;
308 struct fec_enet_priv_tx_q *txq;
309 struct netdev_queue *nq;
310 unsigned short queue; 330 unsigned short queue;
311 void *bufaddr; 331 void *bufaddr;
312 unsigned short status; 332 unsigned short status;
@@ -316,22 +336,12 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
316 unsigned int index; 336 unsigned int index;
317 337
318 queue = skb_get_queue_mapping(skb); 338 queue = skb_get_queue_mapping(skb);
319 txq = fep->tx_queue[queue];
320 nq = netdev_get_tx_queue(ndev, queue);
321 339
322 /* Fill in a Tx ring entry */ 340 /* Fill in a Tx ring entry */
323 bdp = txq->cur_tx; 341 bdp = txq->cur_tx;
324 342
325 status = bdp->cbd_sc; 343 status = bdp->cbd_sc;
326 344
327 if (status & BD_ENET_TX_READY) {
328 /* Ooops. All transmit buffers are full. Bail out.
329 * This should not happen, since ndev->tbusy should be set.
330 */
331 netdev_err(ndev, "tx queue full!\n");
332 return NETDEV_TX_BUSY;
333 }
334
335 /* Protocol checksum off-load for TCP and UDP. */ 345 /* Protocol checksum off-load for TCP and UDP. */
336 if (fec_enet_clear_csum(skb, ndev)) { 346 if (fec_enet_clear_csum(skb, ndev)) {
337 kfree_skb(skb); 347 kfree_skb(skb);
@@ -345,16 +355,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
345 bufaddr = skb->data; 355 bufaddr = skb->data;
346 bdbuf_len = skb->len; 356 bdbuf_len = skb->len;
347 357
348 /* 358 index = fec_enet_get_bd_index(bdp, fep, queue);
349 * On some FEC implementations data must be aligned on
350 * 4-byte boundaries. Use bounce buffers to copy data
351 * and get it aligned. Ugh.
352 */
353 if (fep->bufdesc_ex)
354 index = (struct bufdesc_ex *)bdp -
355 (struct bufdesc_ex *)txq->tx_bd_base;
356 else
357 index = bdp - txq->tx_bd_base;
358 359
359 if (!(id_entry->driver_data & FEC_QUIRK_HAS_AVB) && 360 if (!(id_entry->driver_data & FEC_QUIRK_HAS_AVB) &&
360 ((unsigned long) bufaddr) & FEC_ALIGNMENT) { 361 ((unsigned long) bufaddr) & FEC_ALIGNMENT) {
@@ -437,9 +438,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
437 438
438 txq->cur_tx = bdp; 439 txq->cur_tx = bdp;
439 440
440 if (txq->cur_tx == txq->dirty_tx)
441 netif_tx_stop_queue(nq);
442
443 /* Trigger transmission start */ 441 /* Trigger transmission start */
444 if (!(id_entry->driver_data & FEC_QUIRK_TKT210582) || 442 if (!(id_entry->driver_data & FEC_QUIRK_TKT210582) ||
445 !__raw_readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || 443 !__raw_readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) ||
@@ -450,6 +448,44 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
450 __raw_writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue)); 448 __raw_writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue));
451 } 449 }
452 450
451 return 0;
452}
453
454static netdev_tx_t
455fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
456{
457 struct fec_enet_private *fep = netdev_priv(ndev);
458 struct fec_enet_priv_tx_q *txq;
459 struct netdev_queue *nq;
460 unsigned short queue;
461 struct bufdesc *bdp;
462 unsigned short status;
463 int ret;
464
465 queue = skb_get_queue_mapping(skb);
466 txq = fep->tx_queue[queue];
467 nq = netdev_get_tx_queue(ndev, queue);
468
469 /* Fill in a Tx ring entry */
470 bdp = txq->cur_tx;
471
472 status = bdp->cbd_sc;
473
474 if (status & BD_ENET_TX_READY) {
475 /* Ooops. All transmit buffers are full. Bail out.
476 * This should not happen, since ndev->tbusy should be set.
477 */
478 netdev_err(ndev, "tx queue full!\n");
479 return NETDEV_TX_BUSY;
480 }
481
482 ret = fec_enet_txq_submit_skb(txq, skb, ndev);
483 if (ret == -EBUSY)
484 return NETDEV_TX_BUSY;
485
486 if (txq->cur_tx == txq->dirty_tx)
487 netif_tx_stop_queue(nq);
488
453 return NETDEV_TX_OK; 489 return NETDEV_TX_OK;
454} 490}
455 491
@@ -889,11 +925,7 @@ fec_enet_tx(struct net_device *ndev)
889 if (bdp == txq->cur_tx) 925 if (bdp == txq->cur_tx)
890 break; 926 break;
891 927
892 if (fep->bufdesc_ex) 928 index = fec_enet_get_bd_index(bdp, fep, queue_id);
893 index = (struct bufdesc_ex *)bdp -
894 (struct bufdesc_ex *)txq->tx_bd_base;
895 else
896 index = bdp - txq->tx_bd_base;
897 929
898 skb = txq->tx_skbuff[index]; 930 skb = txq->tx_skbuff[index];
899 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, 931 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
@@ -1030,11 +1062,7 @@ fec_enet_rx(struct net_device *ndev, int budget)
1030 break; 1062 break;
1031 pkt_received++; 1063 pkt_received++;
1032 1064
1033 if (fep->bufdesc_ex) 1065 index = fec_enet_get_bd_index(bdp, fep, queue_id);
1034 index = (struct bufdesc_ex *)bdp -
1035 (struct bufdesc_ex *)rxq->rx_bd_base;
1036 else
1037 index = bdp - rxq->rx_bd_base;
1038 1066
1039 /* Since we have allocated space to hold a complete frame, 1067 /* Since we have allocated space to hold a complete frame,
1040 * the last indicator should be set. 1068 * the last indicator should be set.