diff options
| author | Fugang Duan <b38611@freescale.com> | 2014-06-05 21:18:31 -0400 |
|---|---|---|
| committer | Fugang Duan <b38611@freescale.com> | 2014-06-13 02:00:53 -0400 |
| commit | bd91b5f547197f38b4733dc27e6bccf586ab5cfc (patch) | |
| tree | 4443f586bec001b23b54e107f666011bcb5bf328 /drivers/net | |
| parent | f9f9dd934002f5637f7d2c6120004092a3841fc2 (diff) | |
net: fec: Add Scatter/gather support
Add Scatter/gather support for FEC.
This feature allows to improve outbound throughput performance.
Tested on imx6dl sabresd board:
Running iperf tests shows a 55.4% improvement.
$ ethtool -K eth0 sg off
$ iperf -c 10.192.242.167 -t 3 &
[ 3] local 10.192.242.108 port 52618 connected with 10.192.242.167 port 5001
[ ID] Interval Transfer Bandwidth
[ 3] 0.0- 3.0 sec 99.5 MBytes 278 Mbits/sec
$ ethtool -K eth0 sg on
$ iperf -c 10.192.242.167 -t 3 &
[ 3] local 10.192.242.108 port 52617 connected with 10.192.242.167 port 5001
[ ID] Interval Transfer Bandwidth
[ 3] 0.0- 3.0 sec 154 MBytes 432 Mbits/sec
CC: Li Frank <B20596@freescale.com>
Signed-off-by: Fugang Duan <B38611@freescale.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
(cherry-pick and merge form linux-net: 6e909283cb3)
Diffstat (limited to 'drivers/net')
| -rw-r--r-- | drivers/net/ethernet/freescale/fec.h | 2 | ||||
| -rw-r--r-- | drivers/net/ethernet/freescale/fec_main.c | 260 |
2 files changed, 191 insertions, 71 deletions
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 878d64efc178..fc558addb39f 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h | |||
| @@ -243,7 +243,7 @@ struct bufdesc_ex { | |||
| 243 | #define BD_ENET_TX_RCMASK ((ushort)0x003c) | 243 | #define BD_ENET_TX_RCMASK ((ushort)0x003c) |
| 244 | #define BD_ENET_TX_UN ((ushort)0x0002) | 244 | #define BD_ENET_TX_UN ((ushort)0x0002) |
| 245 | #define BD_ENET_TX_CSL ((ushort)0x0001) | 245 | #define BD_ENET_TX_CSL ((ushort)0x0001) |
| 246 | #define BD_ENET_TX_STATS ((ushort)0x03ff) /* All status bits */ | 246 | #define BD_ENET_TX_STATS ((ushort)0x0fff) /* All status bits */ |
| 247 | 247 | ||
| 248 | /*enhanced buffer descriptor control/status used by Ethernet transmit*/ | 248 | /*enhanced buffer descriptor control/status used by Ethernet transmit*/ |
| 249 | #define BD_ENET_TX_INT 0x40000000 | 249 | #define BD_ENET_TX_INT 0x40000000 |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 591913dde10a..39ed10bf0009 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
| @@ -294,6 +294,24 @@ int fec_enet_get_bd_index(struct bufdesc *bdp, | |||
| 294 | return index; | 294 | return index; |
| 295 | } | 295 | } |
| 296 | 296 | ||
| 297 | static inline | ||
| 298 | int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep, | ||
| 299 | struct fec_enet_priv_tx_q *txq) | ||
| 300 | { | ||
| 301 | int entries; | ||
| 302 | |||
| 303 | if (fep->bufdesc_ex) | ||
| 304 | entries = (struct bufdesc_ex *)txq->dirty_tx - | ||
| 305 | (struct bufdesc_ex *)txq->cur_tx; | ||
| 306 | else | ||
| 307 | entries = txq->dirty_tx - txq->cur_tx; | ||
| 308 | |||
| 309 | if (txq->cur_tx >= txq->dirty_tx) | ||
| 310 | entries += txq->tx_ring_size; | ||
| 311 | |||
| 312 | return entries; | ||
| 313 | } | ||
| 314 | |||
| 297 | static void *swap_buffer(void *bufaddr, int len) | 315 | static void *swap_buffer(void *bufaddr, int len) |
| 298 | { | 316 | { |
| 299 | int i; | 317 | int i; |
| @@ -321,119 +339,214 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev) | |||
| 321 | return 0; | 339 | return 0; |
| 322 | } | 340 | } |
| 323 | 341 | ||
| 324 | static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, | 342 | static void fec_enet_submit_work(struct bufdesc *bdp, |
| 343 | struct fec_enet_private *fep, int queue) | ||
| 344 | { | ||
| 345 | const struct platform_device_id *id_entry = | ||
| 346 | platform_get_device_id(fep->pdev); | ||
| 347 | struct bufdesc *bdp_pre; | ||
| 348 | |||
| 349 | bdp_pre = fec_enet_get_prevdesc(bdp, fep, queue); | ||
| 350 | if ((id_entry->driver_data & FEC_QUIRK_ERR006358) && | ||
| 351 | !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) { | ||
| 352 | fep->delay_work.trig_tx = queue + 1; | ||
| 353 | schedule_delayed_work(&(fep->delay_work.delay_work), | ||
| 354 | msecs_to_jiffies(1)); | ||
| 355 | } | ||
| 356 | } | ||
| 357 | |||
| 358 | static int fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, | ||
| 325 | struct sk_buff *skb, struct net_device *ndev) | 359 | struct sk_buff *skb, struct net_device *ndev) |
| 326 | { | 360 | { |
| 327 | struct fec_enet_private *fep = netdev_priv(ndev); | 361 | struct fec_enet_private *fep = netdev_priv(ndev); |
| 328 | const struct platform_device_id *id_entry = | 362 | const struct platform_device_id *id_entry = |
| 329 | platform_get_device_id(fep->pdev); | 363 | platform_get_device_id(fep->pdev); |
| 330 | struct bufdesc *bdp, *bdp_pre; | 364 | int nr_frags = skb_shinfo(skb)->nr_frags; |
| 331 | unsigned short queue; | 365 | unsigned short queue = skb_get_queue_mapping(skb); |
| 332 | void *bufaddr; | 366 | struct bufdesc *bdp = txq->cur_tx; |
| 333 | unsigned short status; | 367 | struct bufdesc_ex *ebdp; |
| 334 | unsigned int status_esc; | 368 | int frag, frag_len; |
| 335 | unsigned int bdbuf_len; | 369 | unsigned short status; |
| 336 | unsigned int bdbuf_addr; | 370 | unsigned int estatus = 0; |
| 371 | skb_frag_t *this_frag; | ||
| 337 | unsigned int index; | 372 | unsigned int index; |
| 373 | void *bufaddr; | ||
| 374 | int i; | ||
| 338 | 375 | ||
| 339 | queue = skb_get_queue_mapping(skb); | 376 | for (frag = 0; frag < nr_frags; frag++) { |
| 377 | this_frag = &skb_shinfo(skb)->frags[frag]; | ||
| 378 | bdp = fec_enet_get_nextdesc(bdp, fep, queue); | ||
| 379 | ebdp = (struct bufdesc_ex *)bdp; | ||
| 340 | 380 | ||
| 341 | /* Fill in a Tx ring entry */ | 381 | status = bdp->cbd_sc; |
| 382 | status &= ~BD_ENET_TX_STATS; | ||
| 383 | status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); | ||
| 384 | frag_len = skb_shinfo(skb)->frags[frag].size; | ||
| 385 | |||
| 386 | /* Handle the last BD specially */ | ||
| 387 | if (frag == nr_frags - 1) { | ||
| 388 | status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); | ||
| 389 | if (fep->bufdesc_ex) { | ||
| 390 | estatus |= BD_ENET_TX_INT; | ||
| 391 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && | ||
| 392 | fep->hwts_tx_en) || unlikely(fep->hwts_tx_en_ioctl && | ||
| 393 | fec_ptp_do_txstamp(skb))) | ||
| 394 | estatus |= BD_ENET_TX_TS; | ||
| 395 | } | ||
| 396 | } | ||
| 397 | |||
| 398 | if (fep->bufdesc_ex) { | ||
| 399 | if (skb->ip_summed == CHECKSUM_PARTIAL) | ||
| 400 | estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; | ||
| 401 | ebdp->cbd_bdu = 0; | ||
| 402 | ebdp->cbd_esc = estatus; | ||
| 403 | } | ||
| 404 | |||
| 405 | bufaddr = page_address(this_frag->page.p) + this_frag->page_offset; | ||
| 406 | |||
| 407 | index = fec_enet_get_bd_index(bdp, fep, queue); | ||
| 408 | if (!(id_entry->driver_data & FEC_QUIRK_HAS_AVB) && | ||
| 409 | (((unsigned long) bufaddr) & FEC_ALIGNMENT || | ||
| 410 | id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)) { | ||
| 411 | memcpy(txq->tx_bounce[index], bufaddr, frag_len); | ||
| 412 | bufaddr = txq->tx_bounce[index]; | ||
| 413 | |||
| 414 | if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) | ||
| 415 | swap_buffer(bufaddr, frag_len); | ||
| 416 | } | ||
| 417 | |||
| 418 | bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr, | ||
| 419 | frag_len, DMA_TO_DEVICE); | ||
| 420 | if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { | ||
| 421 | dev_kfree_skb_any(skb); | ||
| 422 | if (net_ratelimit()) | ||
| 423 | netdev_err(ndev, "Tx DMA memory map failed\n"); | ||
| 424 | goto dma_mapping_error; | ||
| 425 | } | ||
| 426 | |||
| 427 | bdp->cbd_datlen = frag_len; | ||
| 428 | bdp->cbd_sc = status; | ||
| 429 | } | ||
| 430 | |||
| 431 | txq->cur_tx = bdp; | ||
| 432 | |||
| 433 | return 0; | ||
| 434 | |||
| 435 | dma_mapping_error: | ||
| 342 | bdp = txq->cur_tx; | 436 | bdp = txq->cur_tx; |
| 437 | for (i = 0; i < frag; i++) { | ||
| 438 | bdp = fec_enet_get_nextdesc(bdp, fep, queue); | ||
| 439 | dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, | ||
| 440 | bdp->cbd_datlen, DMA_TO_DEVICE); | ||
| 441 | } | ||
| 442 | return NETDEV_TX_OK; | ||
| 443 | } | ||
| 343 | 444 | ||
| 344 | status = bdp->cbd_sc; | 445 | static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, |
| 446 | struct sk_buff *skb, struct net_device *ndev) | ||
| 447 | { | ||
| 448 | struct fec_enet_private *fep = netdev_priv(ndev); | ||
| 449 | const struct platform_device_id *id_entry = | ||
| 450 | platform_get_device_id(fep->pdev); | ||
| 451 | int nr_frags = skb_shinfo(skb)->nr_frags; | ||
| 452 | struct bufdesc *bdp, *last_bdp; | ||
| 453 | void *bufaddr; | ||
| 454 | unsigned short status; | ||
| 455 | unsigned short buflen; | ||
| 456 | unsigned short queue; | ||
| 457 | unsigned int estatus = 0; | ||
| 458 | unsigned int index; | ||
| 459 | int ret; | ||
| 345 | 460 | ||
| 346 | /* Protocol checksum off-load for TCP and UDP. */ | 461 | /* Protocol checksum off-load for TCP and UDP. */ |
| 347 | if (fec_enet_clear_csum(skb, ndev)) { | 462 | if (fec_enet_clear_csum(skb, ndev)) { |
| 348 | kfree_skb(skb); | 463 | dev_kfree_skb_any(skb); |
| 349 | return NETDEV_TX_OK; | 464 | return NETDEV_TX_OK; |
| 350 | } | 465 | } |
| 351 | 466 | ||
| 352 | /* Clear all of the status flags */ | 467 | /* Fill in a Tx ring entry */ |
| 468 | bdp = txq->cur_tx; | ||
| 469 | status = bdp->cbd_sc; | ||
| 353 | status &= ~BD_ENET_TX_STATS; | 470 | status &= ~BD_ENET_TX_STATS; |
| 354 | 471 | ||
| 355 | /* Set buffer length and buffer pointer */ | 472 | /* Set buffer length and buffer pointer */ |
| 356 | bufaddr = skb->data; | 473 | bufaddr = skb->data; |
| 357 | bdbuf_len = skb->len; | 474 | buflen = skb_headlen(skb); |
| 358 | 475 | ||
| 476 | queue = skb_get_queue_mapping(skb); | ||
| 359 | index = fec_enet_get_bd_index(bdp, fep, queue); | 477 | index = fec_enet_get_bd_index(bdp, fep, queue); |
| 360 | 478 | ||
| 361 | if (!(id_entry->driver_data & FEC_QUIRK_HAS_AVB) && | 479 | if (!(id_entry->driver_data & FEC_QUIRK_HAS_AVB) && |
| 362 | ((unsigned long) bufaddr) & FEC_ALIGNMENT) { | 480 | (((unsigned long) bufaddr) & FEC_ALIGNMENT || |
| 363 | memcpy(txq->tx_bounce[index], skb->data, skb->len); | 481 | id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)) { |
| 482 | memcpy(txq->tx_bounce[index], skb->data, buflen); | ||
| 364 | bufaddr = txq->tx_bounce[index]; | 483 | bufaddr = txq->tx_bounce[index]; |
| 365 | } | ||
| 366 | 484 | ||
| 367 | /* | 485 | if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) |
| 368 | * Some design made an incorrect assumption on endian mode of | 486 | swap_buffer(bufaddr, buflen); |
| 369 | * the system that it's running on. As the result, driver has to | 487 | } |
| 370 | * swap every frame going to and coming from the controller. | ||
| 371 | */ | ||
| 372 | if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) | ||
| 373 | swap_buffer(bufaddr, skb->len); | ||
| 374 | |||
| 375 | /* Save skb pointer */ | ||
| 376 | txq->tx_skbuff[index] = skb; | ||
| 377 | 488 | ||
| 378 | /* Push the data cache so the CPM does not get stale memory | 489 | /* Push the data cache so the CPM does not get stale memory |
| 379 | * data. | 490 | * data. |
| 380 | */ | 491 | */ |
| 381 | bdbuf_addr = dma_map_single(&fep->pdev->dev, bufaddr, | 492 | bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr, |
| 382 | skb->len, DMA_TO_DEVICE); | 493 | buflen, DMA_TO_DEVICE); |
| 383 | if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { | 494 | if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { |
| 384 | bdp->cbd_bufaddr = 0; | 495 | dev_kfree_skb_any(skb); |
| 385 | netdev_err(ndev, "Tx DMA memory map failed\n"); | 496 | if (net_ratelimit()) |
| 497 | netdev_err(ndev, "Tx DMA memory map failed\n"); | ||
| 386 | return NETDEV_TX_OK; | 498 | return NETDEV_TX_OK; |
| 387 | } | 499 | } |
| 388 | 500 | ||
| 501 | if (nr_frags) { | ||
| 502 | ret = fec_enet_txq_submit_frag_skb(txq, skb, ndev); | ||
| 503 | if (ret) | ||
| 504 | return ret; | ||
| 505 | } else { | ||
| 506 | status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); | ||
| 507 | if (fep->bufdesc_ex) { | ||
| 508 | estatus = BD_ENET_TX_INT; | ||
| 509 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && | ||
| 510 | fep->hwts_tx_en) || unlikely(fep->hwts_tx_en_ioctl && | ||
| 511 | fec_ptp_do_txstamp(skb))) | ||
| 512 | estatus |= BD_ENET_TX_TS; | ||
| 513 | } | ||
| 514 | } | ||
| 515 | |||
| 389 | if (fep->bufdesc_ex) { | 516 | if (fep->bufdesc_ex) { |
| 390 | 517 | ||
| 391 | struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; | 518 | struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; |
| 392 | 519 | ||
| 393 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && | 520 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && |
| 394 | fep->hwts_tx_en) || unlikely(fep->hwts_tx_en_ioctl && | 521 | fep->hwts_tx_en) || unlikely(fep->hwts_tx_en_ioctl && |
| 395 | fec_ptp_do_txstamp(skb))) { | 522 | fec_ptp_do_txstamp(skb))) |
| 396 | status_esc = (BD_ENET_TX_TS | BD_ENET_TX_INT); | ||
| 397 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; | 523 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
| 398 | } else { | ||
| 399 | status_esc = BD_ENET_TX_INT; | ||
| 400 | |||
| 401 | /* Enable protocol checksum flags | ||
| 402 | * We do not bother with the IP Checksum bits as they | ||
| 403 | * are done by the kernel | ||
| 404 | */ | ||
| 405 | if (skb->ip_summed == CHECKSUM_PARTIAL) | ||
| 406 | status_esc |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; | ||
| 407 | } | ||
| 408 | 524 | ||
| 409 | if (id_entry->driver_data & FEC_QUIRK_HAS_AVB) | 525 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
| 410 | status_esc |= FEC_TX_BD_FTYPE(queue); | 526 | estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; |
| 411 | 527 | ||
| 412 | ebdp->cbd_bdu = 0; | 528 | ebdp->cbd_bdu = 0; |
| 413 | ebdp->cbd_esc = status_esc; | 529 | ebdp->cbd_esc = estatus; |
| 414 | } | 530 | } |
| 415 | 531 | ||
| 416 | bdp->cbd_bufaddr = bdbuf_addr; | 532 | last_bdp = txq->cur_tx; |
| 417 | bdp->cbd_datlen = bdbuf_len; | 533 | index = fec_enet_get_bd_index(last_bdp, fep, queue); |
| 534 | /* Save skb pointer */ | ||
| 535 | txq->tx_skbuff[index] = skb; | ||
| 536 | |||
| 537 | bdp->cbd_datlen = buflen; | ||
| 418 | dmb(); | 538 | dmb(); |
| 419 | 539 | ||
| 420 | /* Send it on its way. Tell FEC it's ready, interrupt when done, | 540 | /* Send it on its way. Tell FEC it's ready, interrupt when done, |
| 421 | * it's the last BD of the frame, and to put the CRC on the end. | 541 | * it's the last BD of the frame, and to put the CRC on the end. |
| 422 | */ | 542 | */ |
| 423 | status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR | 543 | status |= (BD_ENET_TX_READY | BD_ENET_TX_TC); |
| 424 | | BD_ENET_TX_LAST | BD_ENET_TX_TC); | ||
| 425 | bdp->cbd_sc = status; | 544 | bdp->cbd_sc = status; |
| 426 | 545 | ||
| 427 | bdp_pre = fec_enet_get_prevdesc(bdp, fep, queue); | 546 | fec_enet_submit_work(bdp, fep, queue); |
| 428 | if ((id_entry->driver_data & FEC_QUIRK_ERR006358) && | ||
| 429 | !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) { | ||
| 430 | fep->delay_work.trig_tx = queue + 1; | ||
| 431 | schedule_delayed_work(&(fep->delay_work.delay_work), | ||
| 432 | msecs_to_jiffies(1)); | ||
| 433 | } | ||
| 434 | 547 | ||
| 435 | /* If this was the last BD in the ring, start at the beginning again. */ | 548 | /* If this was the last BD in the ring, start at the beginning again. */ |
| 436 | bdp = fec_enet_get_nextdesc(bdp, fep, queue); | 549 | bdp = fec_enet_get_nextdesc(last_bdp, fep, queue); |
| 437 | 550 | ||
| 438 | skb_tx_timestamp(skb); | 551 | skb_tx_timestamp(skb); |
| 439 | 552 | ||
| @@ -462,6 +575,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
| 462 | struct bufdesc *bdp; | 575 | struct bufdesc *bdp; |
| 463 | unsigned short status; | 576 | unsigned short status; |
| 464 | int ret; | 577 | int ret; |
| 578 | int entries_free; | ||
| 465 | 579 | ||
| 466 | queue = skb_get_queue_mapping(skb); | 580 | queue = skb_get_queue_mapping(skb); |
| 467 | txq = fep->tx_queue[queue]; | 581 | txq = fep->tx_queue[queue]; |
| @@ -476,15 +590,17 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
| 476 | /* Ooops. All transmit buffers are full. Bail out. | 590 | /* Ooops. All transmit buffers are full. Bail out. |
| 477 | * This should not happen, since ndev->tbusy should be set. | 591 | * This should not happen, since ndev->tbusy should be set. |
| 478 | */ | 592 | */ |
| 479 | netdev_err(ndev, "tx queue full!\n"); | 593 | if (net_ratelimit()) |
| 594 | netdev_err(ndev, "tx queue full!\n"); | ||
| 480 | return NETDEV_TX_BUSY; | 595 | return NETDEV_TX_BUSY; |
| 481 | } | 596 | } |
| 482 | 597 | ||
| 483 | ret = fec_enet_txq_submit_skb(txq, skb, ndev); | 598 | ret = fec_enet_txq_submit_skb(txq, skb, ndev); |
| 484 | if (ret == -EBUSY) | 599 | if (ret) |
| 485 | return NETDEV_TX_BUSY; | 600 | return ret; |
| 486 | 601 | ||
| 487 | if (txq->cur_tx == txq->dirty_tx) | 602 | entries_free = fec_enet_get_free_txdesc_num(fep, txq); |
| 603 | if (entries_free < MAX_SKB_FRAGS + 1) | ||
| 488 | netif_tx_stop_queue(nq); | 604 | netif_tx_stop_queue(nq); |
| 489 | 605 | ||
| 490 | return NETDEV_TX_OK; | 606 | return NETDEV_TX_OK; |
| @@ -905,6 +1021,7 @@ fec_enet_tx(struct net_device *ndev) | |||
| 905 | struct netdev_queue *nq; | 1021 | struct netdev_queue *nq; |
| 906 | int queue_id; | 1022 | int queue_id; |
| 907 | int index = 0; | 1023 | int index = 0; |
| 1024 | int entries; | ||
| 908 | 1025 | ||
| 909 | fep = netdev_priv(ndev); | 1026 | fep = netdev_priv(ndev); |
| 910 | 1027 | ||
| @@ -929,9 +1046,13 @@ fec_enet_tx(struct net_device *ndev) | |||
| 929 | index = fec_enet_get_bd_index(bdp, fep, queue_id); | 1046 | index = fec_enet_get_bd_index(bdp, fep, queue_id); |
| 930 | 1047 | ||
| 931 | skb = txq->tx_skbuff[index]; | 1048 | skb = txq->tx_skbuff[index]; |
| 932 | dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, | 1049 | dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, bdp->cbd_datlen, |
| 933 | skb->len, DMA_TO_DEVICE); | 1050 | DMA_TO_DEVICE); |
| 934 | bdp->cbd_bufaddr = 0; | 1051 | bdp->cbd_bufaddr = 0; |
| 1052 | if (!skb) { | ||
| 1053 | bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); | ||
| 1054 | continue; | ||
| 1055 | } | ||
| 935 | 1056 | ||
| 936 | /* Check for errors. */ | 1057 | /* Check for errors. */ |
| 937 | if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | | 1058 | if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | |
| @@ -950,7 +1071,7 @@ fec_enet_tx(struct net_device *ndev) | |||
| 950 | ndev->stats.tx_carrier_errors++; | 1071 | ndev->stats.tx_carrier_errors++; |
| 951 | } else { | 1072 | } else { |
| 952 | ndev->stats.tx_packets++; | 1073 | ndev->stats.tx_packets++; |
| 953 | ndev->stats.tx_bytes += bdp->cbd_datlen; | 1074 | ndev->stats.tx_bytes += skb->len; |
| 954 | } | 1075 | } |
| 955 | 1076 | ||
| 956 | if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) && | 1077 | if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) && |
| @@ -988,11 +1109,10 @@ fec_enet_tx(struct net_device *ndev) | |||
| 988 | 1109 | ||
| 989 | /* Since we have freed up a buffer, the ring is no longer full | 1110 | /* Since we have freed up a buffer, the ring is no longer full |
| 990 | */ | 1111 | */ |
| 991 | if (txq->dirty_tx != txq->cur_tx) { | 1112 | entries = fec_enet_get_free_txdesc_num(fep, txq); |
| 992 | if (netif_tx_queue_stopped(nq)) | 1113 | if (entries >= MAX_SKB_FRAGS + 1 && netif_tx_queue_stopped(nq)) |
| 993 | netif_tx_wake_queue(nq); | 1114 | netif_tx_wake_queue(nq); |
| 994 | } | 1115 | } |
| 995 | } | ||
| 996 | } | 1116 | } |
| 997 | 1117 | ||
| 998 | return; | 1118 | return; |
| @@ -2614,7 +2734,7 @@ static int fec_enet_init(struct net_device *ndev) | |||
| 2614 | if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) { | 2734 | if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) { |
| 2615 | /* enable hw accelerator */ | 2735 | /* enable hw accelerator */ |
| 2616 | ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 2736 | ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
| 2617 | | NETIF_F_RXCSUM); | 2737 | | NETIF_F_RXCSUM | NETIF_F_SG); |
| 2618 | fep->csum_flags |= FLAG_RX_CSUM_ENABLED; | 2738 | fep->csum_flags |= FLAG_RX_CSUM_ENABLED; |
| 2619 | } | 2739 | } |
| 2620 | 2740 | ||
