diff options
author | Nimrod Andy <B38611@freescale.com> | 2014-06-11 20:16:22 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-06-12 14:01:57 -0400 |
commit | 6e909283cb344e32aa8adb4a4c169512d8e5fd27 (patch) | |
tree | f13c260b9dd44bf6ebd4cfb39087715d54cb2189 | |
parent | 55d0218ae2e23eb8a4da1f277eba53ba4edb9a26 (diff) |
net: fec: Add Scatter/gather support
Add Scatter/gather support for FEC.
This feature allows to improve outbound throughput performance.
Tested on imx6dl sabresd board:
Running iperf tests shows a 55.4% improvement.
$ ethtool -K eth0 sg off
$ iperf -c 10.192.242.167 -t 3 &
[ 3] local 10.192.242.108 port 52618 connected with 10.192.242.167 port 5001
[ ID] Interval Transfer Bandwidth
[ 3] 0.0- 3.0 sec 99.5 MBytes 278 Mbits/sec
$ ethtool -K eth0 sg on
$ iperf -c 10.192.242.167 -t 3 &
[ 3] local 10.192.242.108 port 52617 connected with 10.192.242.167 port 5001
[ ID] Interval Transfer Bandwidth
[ 3] 0.0- 3.0 sec 154 MBytes 432 Mbits/sec
CC: Li Frank <B20596@freescale.com>
Signed-off-by: Fugang Duan <B38611@freescale.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/freescale/fec.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/fec_main.c | 238 |
2 files changed, 178 insertions, 62 deletions
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 5ffd32308b9c..e7ce14d8d3c3 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h | |||
@@ -221,7 +221,7 @@ struct bufdesc_ex { | |||
221 | #define BD_ENET_TX_RCMASK ((ushort)0x003c) | 221 | #define BD_ENET_TX_RCMASK ((ushort)0x003c) |
222 | #define BD_ENET_TX_UN ((ushort)0x0002) | 222 | #define BD_ENET_TX_UN ((ushort)0x0002) |
223 | #define BD_ENET_TX_CSL ((ushort)0x0001) | 223 | #define BD_ENET_TX_CSL ((ushort)0x0001) |
224 | #define BD_ENET_TX_STATS ((ushort)0x03ff) /* All status bits */ | 224 | #define BD_ENET_TX_STATS ((ushort)0x0fff) /* All status bits */ |
225 | 225 | ||
226 | /*enhanced buffer descriptor control/status used by Ethernet transmit*/ | 226 | /*enhanced buffer descriptor control/status used by Ethernet transmit*/ |
227 | #define BD_ENET_TX_INT 0x40000000 | 227 | #define BD_ENET_TX_INT 0x40000000 |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index b27a729222b8..bea00a8d6c99 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
@@ -289,6 +289,16 @@ static int fec_enet_get_bd_index(struct bufdesc *base, struct bufdesc *bdp, | |||
289 | return ((const char *)bdp - (const char *)base) / fep->bufdesc_size; | 289 | return ((const char *)bdp - (const char *)base) / fep->bufdesc_size; |
290 | } | 290 | } |
291 | 291 | ||
292 | static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep) | ||
293 | { | ||
294 | int entries; | ||
295 | |||
296 | entries = ((const char *)fep->dirty_tx - | ||
297 | (const char *)fep->cur_tx) / fep->bufdesc_size - 1; | ||
298 | |||
299 | return entries > 0 ? entries : entries + fep->tx_ring_size; | ||
300 | } | ||
301 | |||
292 | static void *swap_buffer(void *bufaddr, int len) | 302 | static void *swap_buffer(void *bufaddr, int len) |
293 | { | 303 | { |
294 | int i; | 304 | int i; |
@@ -316,20 +326,119 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev) | |||
316 | return 0; | 326 | return 0; |
317 | } | 327 | } |
318 | 328 | ||
319 | static int txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) | 329 | static void |
330 | fec_enet_submit_work(struct bufdesc *bdp, struct fec_enet_private *fep) | ||
331 | { | ||
332 | const struct platform_device_id *id_entry = | ||
333 | platform_get_device_id(fep->pdev); | ||
334 | struct bufdesc *bdp_pre; | ||
335 | |||
336 | bdp_pre = fec_enet_get_prevdesc(bdp, fep); | ||
337 | if ((id_entry->driver_data & FEC_QUIRK_ERR006358) && | ||
338 | !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) { | ||
339 | fep->delay_work.trig_tx = true; | ||
340 | schedule_delayed_work(&(fep->delay_work.delay_work), | ||
341 | msecs_to_jiffies(1)); | ||
342 | } | ||
343 | } | ||
344 | |||
345 | static int | ||
346 | fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev) | ||
320 | { | 347 | { |
321 | struct fec_enet_private *fep = netdev_priv(ndev); | 348 | struct fec_enet_private *fep = netdev_priv(ndev); |
322 | const struct platform_device_id *id_entry = | 349 | const struct platform_device_id *id_entry = |
323 | platform_get_device_id(fep->pdev); | 350 | platform_get_device_id(fep->pdev); |
324 | struct bufdesc *bdp, *bdp_pre; | 351 | struct bufdesc *bdp = fep->cur_tx; |
325 | void *bufaddr; | 352 | struct bufdesc_ex *ebdp; |
326 | unsigned short status; | 353 | int nr_frags = skb_shinfo(skb)->nr_frags; |
354 | int frag, frag_len; | ||
355 | unsigned short status; | ||
356 | unsigned int estatus = 0; | ||
357 | skb_frag_t *this_frag; | ||
327 | unsigned int index; | 358 | unsigned int index; |
359 | void *bufaddr; | ||
360 | int i; | ||
328 | 361 | ||
329 | /* Fill in a Tx ring entry */ | 362 | for (frag = 0; frag < nr_frags; frag++) { |
363 | this_frag = &skb_shinfo(skb)->frags[frag]; | ||
364 | bdp = fec_enet_get_nextdesc(bdp, fep); | ||
365 | ebdp = (struct bufdesc_ex *)bdp; | ||
366 | |||
367 | status = bdp->cbd_sc; | ||
368 | status &= ~BD_ENET_TX_STATS; | ||
369 | status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); | ||
370 | frag_len = skb_shinfo(skb)->frags[frag].size; | ||
371 | |||
372 | /* Handle the last BD specially */ | ||
373 | if (frag == nr_frags - 1) { | ||
374 | status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); | ||
375 | if (fep->bufdesc_ex) { | ||
376 | estatus |= BD_ENET_TX_INT; | ||
377 | if (unlikely(skb_shinfo(skb)->tx_flags & | ||
378 | SKBTX_HW_TSTAMP && fep->hwts_tx_en)) | ||
379 | estatus |= BD_ENET_TX_TS; | ||
380 | } | ||
381 | } | ||
382 | |||
383 | if (fep->bufdesc_ex) { | ||
384 | if (skb->ip_summed == CHECKSUM_PARTIAL) | ||
385 | estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; | ||
386 | ebdp->cbd_bdu = 0; | ||
387 | ebdp->cbd_esc = estatus; | ||
388 | } | ||
389 | |||
390 | bufaddr = page_address(this_frag->page.p) + this_frag->page_offset; | ||
391 | |||
392 | index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); | ||
393 | if (((unsigned long) bufaddr) & FEC_ALIGNMENT || | ||
394 | id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { | ||
395 | memcpy(fep->tx_bounce[index], bufaddr, frag_len); | ||
396 | bufaddr = fep->tx_bounce[index]; | ||
397 | |||
398 | if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) | ||
399 | swap_buffer(bufaddr, frag_len); | ||
400 | } | ||
401 | |||
402 | bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr, | ||
403 | frag_len, DMA_TO_DEVICE); | ||
404 | if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { | ||
405 | dev_kfree_skb_any(skb); | ||
406 | if (net_ratelimit()) | ||
407 | netdev_err(ndev, "Tx DMA memory map failed\n"); | ||
408 | goto dma_mapping_error; | ||
409 | } | ||
410 | |||
411 | bdp->cbd_datlen = frag_len; | ||
412 | bdp->cbd_sc = status; | ||
413 | } | ||
414 | |||
415 | fep->cur_tx = bdp; | ||
416 | |||
417 | return 0; | ||
418 | |||
419 | dma_mapping_error: | ||
330 | bdp = fep->cur_tx; | 420 | bdp = fep->cur_tx; |
421 | for (i = 0; i < frag; i++) { | ||
422 | bdp = fec_enet_get_nextdesc(bdp, fep); | ||
423 | dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, | ||
424 | bdp->cbd_datlen, DMA_TO_DEVICE); | ||
425 | } | ||
426 | return NETDEV_TX_OK; | ||
427 | } | ||
331 | 428 | ||
332 | status = bdp->cbd_sc; | 429 | static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) |
430 | { | ||
431 | struct fec_enet_private *fep = netdev_priv(ndev); | ||
432 | const struct platform_device_id *id_entry = | ||
433 | platform_get_device_id(fep->pdev); | ||
434 | int nr_frags = skb_shinfo(skb)->nr_frags; | ||
435 | struct bufdesc *bdp, *last_bdp; | ||
436 | void *bufaddr; | ||
437 | unsigned short status; | ||
438 | unsigned short buflen; | ||
439 | unsigned int estatus = 0; | ||
440 | unsigned int index; | ||
441 | int ret; | ||
333 | 442 | ||
334 | /* Protocol checksum off-load for TCP and UDP. */ | 443 | /* Protocol checksum off-load for TCP and UDP. */ |
335 | if (fec_enet_clear_csum(skb, ndev)) { | 444 | if (fec_enet_clear_csum(skb, ndev)) { |
@@ -337,82 +446,83 @@ static int txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) | |||
337 | return NETDEV_TX_OK; | 446 | return NETDEV_TX_OK; |
338 | } | 447 | } |
339 | 448 | ||
340 | /* Clear all of the status flags */ | 449 | /* Fill in a Tx ring entry */ |
450 | bdp = fep->cur_tx; | ||
451 | status = bdp->cbd_sc; | ||
341 | status &= ~BD_ENET_TX_STATS; | 452 | status &= ~BD_ENET_TX_STATS; |
342 | 453 | ||
343 | /* Set buffer length and buffer pointer */ | 454 | /* Set buffer length and buffer pointer */ |
344 | bufaddr = skb->data; | 455 | bufaddr = skb->data; |
345 | bdp->cbd_datlen = skb->len; | 456 | buflen = skb_headlen(skb); |
346 | 457 | ||
347 | index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); | 458 | index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); |
348 | 459 | if (((unsigned long) bufaddr) & FEC_ALIGNMENT || | |
349 | if (((unsigned long) bufaddr) & FEC_ALIGNMENT) { | 460 | id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { |
350 | memcpy(fep->tx_bounce[index], skb->data, skb->len); | 461 | memcpy(fep->tx_bounce[index], skb->data, buflen); |
351 | bufaddr = fep->tx_bounce[index]; | 462 | bufaddr = fep->tx_bounce[index]; |
352 | } | ||
353 | 463 | ||
354 | /* | 464 | if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) |
355 | * Some design made an incorrect assumption on endian mode of | 465 | swap_buffer(bufaddr, buflen); |
356 | * the system that it's running on. As the result, driver has to | 466 | } |
357 | * swap every frame going to and coming from the controller. | ||
358 | */ | ||
359 | if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) | ||
360 | swap_buffer(bufaddr, skb->len); | ||
361 | |||
362 | /* Save skb pointer */ | ||
363 | fep->tx_skbuff[index] = skb; | ||
364 | 467 | ||
365 | /* Push the data cache so the CPM does not get stale memory | 468 | /* Push the data cache so the CPM does not get stale memory |
366 | * data. | 469 | * data. |
367 | */ | 470 | */ |
368 | bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr, | 471 | bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr, |
369 | skb->len, DMA_TO_DEVICE); | 472 | buflen, DMA_TO_DEVICE); |
370 | if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { | 473 | if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { |
371 | bdp->cbd_bufaddr = 0; | ||
372 | fep->tx_skbuff[index] = NULL; | ||
373 | dev_kfree_skb_any(skb); | 474 | dev_kfree_skb_any(skb); |
374 | if (net_ratelimit()) | 475 | if (net_ratelimit()) |
375 | netdev_err(ndev, "Tx DMA memory map failed\n"); | 476 | netdev_err(ndev, "Tx DMA memory map failed\n"); |
376 | return NETDEV_TX_OK; | 477 | return NETDEV_TX_OK; |
377 | } | 478 | } |
378 | 479 | ||
480 | if (nr_frags) { | ||
481 | ret = fec_enet_txq_submit_frag_skb(skb, ndev); | ||
482 | if (ret) | ||
483 | return ret; | ||
484 | } else { | ||
485 | status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); | ||
486 | if (fep->bufdesc_ex) { | ||
487 | estatus = BD_ENET_TX_INT; | ||
488 | if (unlikely(skb_shinfo(skb)->tx_flags & | ||
489 | SKBTX_HW_TSTAMP && fep->hwts_tx_en)) | ||
490 | estatus |= BD_ENET_TX_TS; | ||
491 | } | ||
492 | } | ||
493 | |||
379 | if (fep->bufdesc_ex) { | 494 | if (fep->bufdesc_ex) { |
380 | 495 | ||
381 | struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; | 496 | struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; |
382 | ebdp->cbd_bdu = 0; | 497 | |
383 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && | 498 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && |
384 | fep->hwts_tx_en)) { | 499 | fep->hwts_tx_en)) |
385 | ebdp->cbd_esc = (BD_ENET_TX_TS | BD_ENET_TX_INT); | ||
386 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; | 500 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
387 | } else { | ||
388 | ebdp->cbd_esc = BD_ENET_TX_INT; | ||
389 | 501 | ||
390 | /* Enable protocol checksum flags | 502 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
391 | * We do not bother with the IP Checksum bits as they | 503 | estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; |
392 | * are done by the kernel | 504 | |
393 | */ | 505 | ebdp->cbd_bdu = 0; |
394 | if (skb->ip_summed == CHECKSUM_PARTIAL) | 506 | ebdp->cbd_esc = estatus; |
395 | ebdp->cbd_esc |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; | ||
396 | } | ||
397 | } | 507 | } |
398 | 508 | ||
509 | last_bdp = fep->cur_tx; | ||
510 | index = fec_enet_get_bd_index(fep->tx_bd_base, last_bdp, fep); | ||
511 | /* Save skb pointer */ | ||
512 | fep->tx_skbuff[index] = skb; | ||
513 | |||
514 | bdp->cbd_datlen = buflen; | ||
515 | |||
399 | /* Send it on its way. Tell FEC it's ready, interrupt when done, | 516 | /* Send it on its way. Tell FEC it's ready, interrupt when done, |
400 | * it's the last BD of the frame, and to put the CRC on the end. | 517 | * it's the last BD of the frame, and to put the CRC on the end. |
401 | */ | 518 | */ |
402 | status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR | 519 | status |= (BD_ENET_TX_READY | BD_ENET_TX_TC); |
403 | | BD_ENET_TX_LAST | BD_ENET_TX_TC); | ||
404 | bdp->cbd_sc = status; | 520 | bdp->cbd_sc = status; |
405 | 521 | ||
406 | bdp_pre = fec_enet_get_prevdesc(bdp, fep); | 522 | fec_enet_submit_work(bdp, fep); |
407 | if ((id_entry->driver_data & FEC_QUIRK_ERR006358) && | ||
408 | !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) { | ||
409 | fep->delay_work.trig_tx = true; | ||
410 | schedule_delayed_work(&(fep->delay_work.delay_work), | ||
411 | msecs_to_jiffies(1)); | ||
412 | } | ||
413 | 523 | ||
414 | /* If this was the last BD in the ring, start at the beginning again. */ | 524 | /* If this was the last BD in the ring, start at the beginning again. */ |
415 | bdp = fec_enet_get_nextdesc(bdp, fep); | 525 | bdp = fec_enet_get_nextdesc(last_bdp, fep); |
416 | 526 | ||
417 | skb_tx_timestamp(skb); | 527 | skb_tx_timestamp(skb); |
418 | 528 | ||
@@ -421,7 +531,7 @@ static int txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) | |||
421 | /* Trigger transmission start */ | 531 | /* Trigger transmission start */ |
422 | writel(0, fep->hwp + FEC_X_DES_ACTIVE); | 532 | writel(0, fep->hwp + FEC_X_DES_ACTIVE); |
423 | 533 | ||
424 | return NETDEV_TX_OK; | 534 | return 0; |
425 | } | 535 | } |
426 | 536 | ||
427 | static netdev_tx_t | 537 | static netdev_tx_t |
@@ -430,6 +540,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
430 | struct fec_enet_private *fep = netdev_priv(ndev); | 540 | struct fec_enet_private *fep = netdev_priv(ndev); |
431 | struct bufdesc *bdp; | 541 | struct bufdesc *bdp; |
432 | unsigned short status; | 542 | unsigned short status; |
543 | int entries_free; | ||
433 | int ret; | 544 | int ret; |
434 | 545 | ||
435 | /* Fill in a Tx ring entry */ | 546 | /* Fill in a Tx ring entry */ |
@@ -441,15 +552,17 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
441 | /* Ooops. All transmit buffers are full. Bail out. | 552 | /* Ooops. All transmit buffers are full. Bail out. |
442 | * This should not happen, since ndev->tbusy should be set. | 553 | * This should not happen, since ndev->tbusy should be set. |
443 | */ | 554 | */ |
444 | netdev_err(ndev, "tx queue full!\n"); | 555 | if (net_ratelimit()) |
556 | netdev_err(ndev, "tx queue full!\n"); | ||
445 | return NETDEV_TX_BUSY; | 557 | return NETDEV_TX_BUSY; |
446 | } | 558 | } |
447 | 559 | ||
448 | ret = txq_submit_skb(skb, ndev); | 560 | ret = fec_enet_txq_submit_skb(skb, ndev); |
449 | if (ret == -EBUSY) | 561 | if (ret) |
450 | return NETDEV_TX_BUSY; | 562 | return ret; |
451 | 563 | ||
452 | if (fep->cur_tx == fep->dirty_tx) | 564 | entries_free = fec_enet_get_free_txdesc_num(fep); |
565 | if (entries_free < MAX_SKB_FRAGS + 1) | ||
453 | netif_stop_queue(ndev); | 566 | netif_stop_queue(ndev); |
454 | 567 | ||
455 | return NETDEV_TX_OK; | 568 | return NETDEV_TX_OK; |
@@ -770,6 +883,7 @@ fec_enet_tx(struct net_device *ndev) | |||
770 | unsigned short status; | 883 | unsigned short status; |
771 | struct sk_buff *skb; | 884 | struct sk_buff *skb; |
772 | int index = 0; | 885 | int index = 0; |
886 | int entries; | ||
773 | 887 | ||
774 | fep = netdev_priv(ndev); | 888 | fep = netdev_priv(ndev); |
775 | bdp = fep->dirty_tx; | 889 | bdp = fep->dirty_tx; |
@@ -786,9 +900,13 @@ fec_enet_tx(struct net_device *ndev) | |||
786 | index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); | 900 | index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); |
787 | 901 | ||
788 | skb = fep->tx_skbuff[index]; | 902 | skb = fep->tx_skbuff[index]; |
789 | dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, skb->len, | 903 | dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, bdp->cbd_datlen, |
790 | DMA_TO_DEVICE); | 904 | DMA_TO_DEVICE); |
791 | bdp->cbd_bufaddr = 0; | 905 | bdp->cbd_bufaddr = 0; |
906 | if (!skb) { | ||
907 | bdp = fec_enet_get_nextdesc(bdp, fep); | ||
908 | continue; | ||
909 | } | ||
792 | 910 | ||
793 | /* Check for errors. */ | 911 | /* Check for errors. */ |
794 | if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | | 912 | if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | |
@@ -807,7 +925,7 @@ fec_enet_tx(struct net_device *ndev) | |||
807 | ndev->stats.tx_carrier_errors++; | 925 | ndev->stats.tx_carrier_errors++; |
808 | } else { | 926 | } else { |
809 | ndev->stats.tx_packets++; | 927 | ndev->stats.tx_packets++; |
810 | ndev->stats.tx_bytes += bdp->cbd_datlen; | 928 | ndev->stats.tx_bytes += skb->len; |
811 | } | 929 | } |
812 | 930 | ||
813 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) && | 931 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) && |
@@ -844,15 +962,13 @@ fec_enet_tx(struct net_device *ndev) | |||
844 | 962 | ||
845 | /* Since we have freed up a buffer, the ring is no longer full | 963 | /* Since we have freed up a buffer, the ring is no longer full |
846 | */ | 964 | */ |
847 | if (fep->dirty_tx != fep->cur_tx) { | 965 | entries = fec_enet_get_free_txdesc_num(fep); |
848 | if (netif_queue_stopped(ndev)) | 966 | if (entries >= MAX_SKB_FRAGS + 1 && netif_queue_stopped(ndev)) |
849 | netif_wake_queue(ndev); | 967 | netif_wake_queue(ndev); |
850 | } | ||
851 | } | 968 | } |
852 | return; | 969 | return; |
853 | } | 970 | } |
854 | 971 | ||
855 | |||
856 | /* During a receive, the cur_rx points to the current incoming buffer. | 972 | /* During a receive, the cur_rx points to the current incoming buffer. |
857 | * When we update through the ring, if the next incoming buffer has | 973 | * When we update through the ring, if the next incoming buffer has |
858 | * not been given to the system, we just set the empty indicator, | 974 | * not been given to the system, we just set the empty indicator, |
@@ -2095,7 +2211,7 @@ static int fec_enet_init(struct net_device *ndev) | |||
2095 | if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) { | 2211 | if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) { |
2096 | /* enable hw accelerator */ | 2212 | /* enable hw accelerator */ |
2097 | ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 2213 | ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2098 | | NETIF_F_RXCSUM); | 2214 | | NETIF_F_RXCSUM | NETIF_F_SG); |
2099 | fep->csum_flags |= FLAG_RX_CSUM_ENABLED; | 2215 | fep->csum_flags |= FLAG_RX_CSUM_ENABLED; |
2100 | } | 2216 | } |
2101 | 2217 | ||