diff options
| -rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e.h | 11 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_debugfs.c | 207 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 69 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_main.c | 442 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_txrx.c | 358 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_txrx.h | 35 |
6 files changed, 650 insertions, 472 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index b5252eb8a6c7..c06a76ca9aaa 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h | |||
| @@ -347,9 +347,9 @@ struct i40e_vsi { | |||
| 347 | u32 rx_buf_failed; | 347 | u32 rx_buf_failed; |
| 348 | u32 rx_page_failed; | 348 | u32 rx_page_failed; |
| 349 | 349 | ||
| 350 | /* These are arrays of rings, allocated at run-time */ | 350 | /* These are containers of ring pointers, allocated at run-time */ |
| 351 | struct i40e_ring *rx_rings; | 351 | struct i40e_ring **rx_rings; |
| 352 | struct i40e_ring *tx_rings; | 352 | struct i40e_ring **tx_rings; |
| 353 | 353 | ||
| 354 | u16 work_limit; | 354 | u16 work_limit; |
| 355 | /* high bit set means dynamic, use accessor routines to read/write. | 355 | /* high bit set means dynamic, use accessor routines to read/write. |
| @@ -366,7 +366,7 @@ struct i40e_vsi { | |||
| 366 | u8 dtype; | 366 | u8 dtype; |
| 367 | 367 | ||
| 368 | /* List of q_vectors allocated to this VSI */ | 368 | /* List of q_vectors allocated to this VSI */ |
| 369 | struct i40e_q_vector *q_vectors; | 369 | struct i40e_q_vector **q_vectors; |
| 370 | int num_q_vectors; | 370 | int num_q_vectors; |
| 371 | int base_vector; | 371 | int base_vector; |
| 372 | 372 | ||
| @@ -422,8 +422,9 @@ struct i40e_q_vector { | |||
| 422 | 422 | ||
| 423 | u8 num_ringpairs; /* total number of ring pairs in vector */ | 423 | u8 num_ringpairs; /* total number of ring pairs in vector */ |
| 424 | 424 | ||
| 425 | char name[IFNAMSIZ + 9]; | ||
| 426 | cpumask_t affinity_mask; | 425 | cpumask_t affinity_mask; |
| 426 | struct rcu_head rcu; /* to avoid race with update stats on free */ | ||
| 427 | char name[IFNAMSIZ + 9]; | ||
| 427 | } ____cacheline_internodealigned_in_smp; | 428 | } ____cacheline_internodealigned_in_smp; |
| 428 | 429 | ||
| 429 | /* lan device */ | 430 | /* lan device */ |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 8dbd91f64b74..19e248ff6c77 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c | |||
| @@ -258,12 +258,12 @@ static ssize_t i40e_dbg_dump_write(struct file *filp, | |||
| 258 | 258 | ||
| 259 | for (i = 0; i < vsi->num_queue_pairs; i++) { | 259 | for (i = 0; i < vsi->num_queue_pairs; i++) { |
| 260 | len = sizeof(struct i40e_tx_buffer); | 260 | len = sizeof(struct i40e_tx_buffer); |
| 261 | memcpy(p, vsi->tx_rings[i].tx_bi, len); | 261 | memcpy(p, vsi->tx_rings[i]->tx_bi, len); |
| 262 | p += len; | 262 | p += len; |
| 263 | } | 263 | } |
| 264 | for (i = 0; i < vsi->num_queue_pairs; i++) { | 264 | for (i = 0; i < vsi->num_queue_pairs; i++) { |
| 265 | len = sizeof(struct i40e_rx_buffer); | 265 | len = sizeof(struct i40e_rx_buffer); |
| 266 | memcpy(p, vsi->rx_rings[i].rx_bi, len); | 266 | memcpy(p, vsi->rx_rings[i]->rx_bi, len); |
| 267 | p += len; | 267 | p += len; |
| 268 | } | 268 | } |
| 269 | 269 | ||
| @@ -484,100 +484,104 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) | |||
| 484 | " tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n", | 484 | " tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n", |
| 485 | vsi->tx_restart, vsi->tx_busy, | 485 | vsi->tx_restart, vsi->tx_busy, |
| 486 | vsi->rx_buf_failed, vsi->rx_page_failed); | 486 | vsi->rx_buf_failed, vsi->rx_page_failed); |
| 487 | if (vsi->rx_rings) { | 487 | rcu_read_lock(); |
| 488 | for (i = 0; i < vsi->num_queue_pairs; i++) { | 488 | for (i = 0; i < vsi->num_queue_pairs; i++) { |
| 489 | dev_info(&pf->pdev->dev, | 489 | struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]); |
| 490 | " rx_rings[%i]: desc = %p\n", | 490 | if (!rx_ring) |
| 491 | i, vsi->rx_rings[i].desc); | 491 | continue; |
| 492 | dev_info(&pf->pdev->dev, | 492 | |
| 493 | " rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n", | 493 | dev_info(&pf->pdev->dev, |
| 494 | i, vsi->rx_rings[i].dev, | 494 | " rx_rings[%i]: desc = %p\n", |
| 495 | vsi->rx_rings[i].netdev, | 495 | i, rx_ring->desc); |
| 496 | vsi->rx_rings[i].rx_bi); | 496 | dev_info(&pf->pdev->dev, |
| 497 | dev_info(&pf->pdev->dev, | 497 | " rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n", |
| 498 | " rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n", | 498 | i, rx_ring->dev, |
| 499 | i, vsi->rx_rings[i].state, | 499 | rx_ring->netdev, |
| 500 | vsi->rx_rings[i].queue_index, | 500 | rx_ring->rx_bi); |
| 501 | vsi->rx_rings[i].reg_idx); | 501 | dev_info(&pf->pdev->dev, |
| 502 | dev_info(&pf->pdev->dev, | 502 | " rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n", |
| 503 | " rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n", | 503 | i, rx_ring->state, |
| 504 | i, vsi->rx_rings[i].rx_hdr_len, | 504 | rx_ring->queue_index, |
| 505 | vsi->rx_rings[i].rx_buf_len, | 505 | rx_ring->reg_idx); |
| 506 | vsi->rx_rings[i].dtype); | 506 | dev_info(&pf->pdev->dev, |
| 507 | dev_info(&pf->pdev->dev, | 507 | " rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n", |
| 508 | " rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n", | 508 | i, rx_ring->rx_hdr_len, |
| 509 | i, vsi->rx_rings[i].hsplit, | 509 | rx_ring->rx_buf_len, |
| 510 | vsi->rx_rings[i].next_to_use, | 510 | rx_ring->dtype); |
| 511 | vsi->rx_rings[i].next_to_clean, | 511 | dev_info(&pf->pdev->dev, |
| 512 | vsi->rx_rings[i].ring_active); | 512 | " rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n", |
| 513 | dev_info(&pf->pdev->dev, | 513 | i, rx_ring->hsplit, |
| 514 | " rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n", | 514 | rx_ring->next_to_use, |
| 515 | i, vsi->rx_rings[i].rx_stats.packets, | 515 | rx_ring->next_to_clean, |
| 516 | vsi->rx_rings[i].rx_stats.bytes, | 516 | rx_ring->ring_active); |
| 517 | vsi->rx_rings[i].rx_stats.non_eop_descs); | 517 | dev_info(&pf->pdev->dev, |
| 518 | dev_info(&pf->pdev->dev, | 518 | " rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n", |
| 519 | " rx_rings[%i]: rx_stats: alloc_rx_page_failed = %lld, alloc_rx_buff_failed = %lld\n", | 519 | i, rx_ring->stats.packets, |
| 520 | i, | 520 | rx_ring->stats.bytes, |
| 521 | vsi->rx_rings[i].rx_stats.alloc_rx_page_failed, | 521 | rx_ring->rx_stats.non_eop_descs); |
| 522 | vsi->rx_rings[i].rx_stats.alloc_rx_buff_failed); | 522 | dev_info(&pf->pdev->dev, |
| 523 | dev_info(&pf->pdev->dev, | 523 | " rx_rings[%i]: rx_stats: alloc_rx_page_failed = %lld, alloc_rx_buff_failed = %lld\n", |
| 524 | " rx_rings[%i]: size = %i, dma = 0x%08lx\n", | 524 | i, |
| 525 | i, vsi->rx_rings[i].size, | 525 | rx_ring->rx_stats.alloc_rx_page_failed, |
| 526 | (long unsigned int)vsi->rx_rings[i].dma); | 526 | rx_ring->rx_stats.alloc_rx_buff_failed); |
| 527 | dev_info(&pf->pdev->dev, | 527 | dev_info(&pf->pdev->dev, |
| 528 | " rx_rings[%i]: vsi = %p, q_vector = %p\n", | 528 | " rx_rings[%i]: size = %i, dma = 0x%08lx\n", |
| 529 | i, vsi->rx_rings[i].vsi, | 529 | i, rx_ring->size, |
| 530 | vsi->rx_rings[i].q_vector); | 530 | (long unsigned int)rx_ring->dma); |
| 531 | } | 531 | dev_info(&pf->pdev->dev, |
| 532 | " rx_rings[%i]: vsi = %p, q_vector = %p\n", | ||
| 533 | i, rx_ring->vsi, | ||
| 534 | rx_ring->q_vector); | ||
| 532 | } | 535 | } |
| 533 | if (vsi->tx_rings) { | 536 | for (i = 0; i < vsi->num_queue_pairs; i++) { |
| 534 | for (i = 0; i < vsi->num_queue_pairs; i++) { | 537 | struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]); |
| 535 | dev_info(&pf->pdev->dev, | 538 | if (!tx_ring) |
| 536 | " tx_rings[%i]: desc = %p\n", | 539 | continue; |
| 537 | i, vsi->tx_rings[i].desc); | 540 | dev_info(&pf->pdev->dev, |
| 538 | dev_info(&pf->pdev->dev, | 541 | " tx_rings[%i]: desc = %p\n", |
| 539 | " tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n", | 542 | i, tx_ring->desc); |
| 540 | i, vsi->tx_rings[i].dev, | 543 | dev_info(&pf->pdev->dev, |
| 541 | vsi->tx_rings[i].netdev, | 544 | " tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n", |
| 542 | vsi->tx_rings[i].tx_bi); | 545 | i, tx_ring->dev, |
| 543 | dev_info(&pf->pdev->dev, | 546 | tx_ring->netdev, |
| 544 | " tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n", | 547 | tx_ring->tx_bi); |
| 545 | i, vsi->tx_rings[i].state, | 548 | dev_info(&pf->pdev->dev, |
| 546 | vsi->tx_rings[i].queue_index, | 549 | " tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n", |
| 547 | vsi->tx_rings[i].reg_idx); | 550 | i, tx_ring->state, |
| 548 | dev_info(&pf->pdev->dev, | 551 | tx_ring->queue_index, |
| 549 | " tx_rings[%i]: dtype = %d\n", | 552 | tx_ring->reg_idx); |
| 550 | i, vsi->tx_rings[i].dtype); | 553 | dev_info(&pf->pdev->dev, |
| 551 | dev_info(&pf->pdev->dev, | 554 | " tx_rings[%i]: dtype = %d\n", |
| 552 | " tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n", | 555 | i, tx_ring->dtype); |
| 553 | i, vsi->tx_rings[i].hsplit, | 556 | dev_info(&pf->pdev->dev, |
| 554 | vsi->tx_rings[i].next_to_use, | 557 | " tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n", |
| 555 | vsi->tx_rings[i].next_to_clean, | 558 | i, tx_ring->hsplit, |
| 556 | vsi->tx_rings[i].ring_active); | 559 | tx_ring->next_to_use, |
| 557 | dev_info(&pf->pdev->dev, | 560 | tx_ring->next_to_clean, |
| 558 | " tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n", | 561 | tx_ring->ring_active); |
| 559 | i, vsi->tx_rings[i].tx_stats.packets, | 562 | dev_info(&pf->pdev->dev, |
| 560 | vsi->tx_rings[i].tx_stats.bytes, | 563 | " tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n", |
| 561 | vsi->tx_rings[i].tx_stats.restart_queue); | 564 | i, tx_ring->stats.packets, |
| 562 | dev_info(&pf->pdev->dev, | 565 | tx_ring->stats.bytes, |
| 563 | " tx_rings[%i]: tx_stats: tx_busy = %lld, completed = %lld, tx_done_old = %lld\n", | 566 | tx_ring->tx_stats.restart_queue); |
| 564 | i, | 567 | dev_info(&pf->pdev->dev, |
| 565 | vsi->tx_rings[i].tx_stats.tx_busy, | 568 | " tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n", |
| 566 | vsi->tx_rings[i].tx_stats.completed, | 569 | i, |
| 567 | vsi->tx_rings[i].tx_stats.tx_done_old); | 570 | tx_ring->tx_stats.tx_busy, |
| 568 | dev_info(&pf->pdev->dev, | 571 | tx_ring->tx_stats.tx_done_old); |
| 569 | " tx_rings[%i]: size = %i, dma = 0x%08lx\n", | 572 | dev_info(&pf->pdev->dev, |
| 570 | i, vsi->tx_rings[i].size, | 573 | " tx_rings[%i]: size = %i, dma = 0x%08lx\n", |
| 571 | (long unsigned int)vsi->tx_rings[i].dma); | 574 | i, tx_ring->size, |
| 572 | dev_info(&pf->pdev->dev, | 575 | (long unsigned int)tx_ring->dma); |
| 573 | " tx_rings[%i]: vsi = %p, q_vector = %p\n", | 576 | dev_info(&pf->pdev->dev, |
| 574 | i, vsi->tx_rings[i].vsi, | 577 | " tx_rings[%i]: vsi = %p, q_vector = %p\n", |
| 575 | vsi->tx_rings[i].q_vector); | 578 | i, tx_ring->vsi, |
| 576 | dev_info(&pf->pdev->dev, | 579 | tx_ring->q_vector); |
| 577 | " tx_rings[%i]: DCB tc = %d\n", | 580 | dev_info(&pf->pdev->dev, |
| 578 | i, vsi->tx_rings[i].dcb_tc); | 581 | " tx_rings[%i]: DCB tc = %d\n", |
| 579 | } | 582 | i, tx_ring->dcb_tc); |
| 580 | } | 583 | } |
| 584 | rcu_read_unlock(); | ||
| 581 | dev_info(&pf->pdev->dev, | 585 | dev_info(&pf->pdev->dev, |
| 582 | " work_limit = %d, rx_itr_setting = %d (%s), tx_itr_setting = %d (%s)\n", | 586 | " work_limit = %d, rx_itr_setting = %d (%s), tx_itr_setting = %d (%s)\n", |
| 583 | vsi->work_limit, vsi->rx_itr_setting, | 587 | vsi->work_limit, vsi->rx_itr_setting, |
| @@ -587,15 +591,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) | |||
| 587 | dev_info(&pf->pdev->dev, | 591 | dev_info(&pf->pdev->dev, |
| 588 | " max_frame = %d, rx_hdr_len = %d, rx_buf_len = %d dtype = %d\n", | 592 | " max_frame = %d, rx_hdr_len = %d, rx_buf_len = %d dtype = %d\n", |
| 589 | vsi->max_frame, vsi->rx_hdr_len, vsi->rx_buf_len, vsi->dtype); | 593 | vsi->max_frame, vsi->rx_hdr_len, vsi->rx_buf_len, vsi->dtype); |
| 590 | if (vsi->q_vectors) { | ||
| 591 | for (i = 0; i < vsi->num_q_vectors; i++) { | ||
| 592 | dev_info(&pf->pdev->dev, | ||
| 593 | " q_vectors[%i]: base index = %ld\n", | ||
| 594 | i, ((long int)*vsi->q_vectors[i].rx.ring- | ||
| 595 | (long int)*vsi->q_vectors[0].rx.ring)/ | ||
| 596 | sizeof(struct i40e_ring)); | ||
| 597 | } | ||
| 598 | } | ||
| 599 | dev_info(&pf->pdev->dev, | 594 | dev_info(&pf->pdev->dev, |
| 600 | " num_q_vectors = %i, base_vector = %i\n", | 595 | " num_q_vectors = %i, base_vector = %i\n", |
| 601 | vsi->num_q_vectors, vsi->base_vector); | 596 | vsi->num_q_vectors, vsi->base_vector); |
| @@ -792,9 +787,9 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, | |||
| 792 | return; | 787 | return; |
| 793 | } | 788 | } |
| 794 | if (is_rx_ring) | 789 | if (is_rx_ring) |
| 795 | ring = vsi->rx_rings[ring_id]; | 790 | ring = *vsi->rx_rings[ring_id]; |
| 796 | else | 791 | else |
| 797 | ring = vsi->tx_rings[ring_id]; | 792 | ring = *vsi->tx_rings[ring_id]; |
| 798 | if (cnt == 2) { | 793 | if (cnt == 2) { |
| 799 | dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n", | 794 | dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n", |
| 800 | vsi_seid, is_rx_ring ? "rx" : "tx", ring_id); | 795 | vsi_seid, is_rx_ring ? "rx" : "tx", ring_id); |
| @@ -1996,7 +1991,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp, | |||
| 1996 | goto netdev_ops_write_done; | 1991 | goto netdev_ops_write_done; |
| 1997 | } | 1992 | } |
| 1998 | for (i = 0; i < vsi->num_q_vectors; i++) | 1993 | for (i = 0; i < vsi->num_q_vectors; i++) |
| 1999 | napi_schedule(&vsi->q_vectors[i].napi); | 1994 | napi_schedule(&vsi->q_vectors[i]->napi); |
| 2000 | dev_info(&pf->pdev->dev, "napi called\n"); | 1995 | dev_info(&pf->pdev->dev, "napi called\n"); |
| 2001 | } else { | 1996 | } else { |
| 2002 | dev_info(&pf->pdev->dev, "unknown command '%s'\n", | 1997 | dev_info(&pf->pdev->dev, "unknown command '%s'\n", |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 9a76b8cec76c..1b86138fa9e1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c | |||
| @@ -399,8 +399,8 @@ static void i40e_get_ringparam(struct net_device *netdev, | |||
| 399 | ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS; | 399 | ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS; |
| 400 | ring->rx_mini_max_pending = 0; | 400 | ring->rx_mini_max_pending = 0; |
| 401 | ring->rx_jumbo_max_pending = 0; | 401 | ring->rx_jumbo_max_pending = 0; |
| 402 | ring->rx_pending = vsi->rx_rings[0].count; | 402 | ring->rx_pending = vsi->rx_rings[0]->count; |
| 403 | ring->tx_pending = vsi->tx_rings[0].count; | 403 | ring->tx_pending = vsi->tx_rings[0]->count; |
| 404 | ring->rx_mini_pending = 0; | 404 | ring->rx_mini_pending = 0; |
| 405 | ring->rx_jumbo_pending = 0; | 405 | ring->rx_jumbo_pending = 0; |
| 406 | } | 406 | } |
| @@ -429,8 +429,8 @@ static int i40e_set_ringparam(struct net_device *netdev, | |||
| 429 | new_rx_count = ALIGN(new_rx_count, I40E_REQ_DESCRIPTOR_MULTIPLE); | 429 | new_rx_count = ALIGN(new_rx_count, I40E_REQ_DESCRIPTOR_MULTIPLE); |
| 430 | 430 | ||
| 431 | /* if nothing to do return success */ | 431 | /* if nothing to do return success */ |
| 432 | if ((new_tx_count == vsi->tx_rings[0].count) && | 432 | if ((new_tx_count == vsi->tx_rings[0]->count) && |
| 433 | (new_rx_count == vsi->rx_rings[0].count)) | 433 | (new_rx_count == vsi->rx_rings[0]->count)) |
| 434 | return 0; | 434 | return 0; |
| 435 | 435 | ||
| 436 | while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state)) | 436 | while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state)) |
| @@ -439,8 +439,8 @@ static int i40e_set_ringparam(struct net_device *netdev, | |||
| 439 | if (!netif_running(vsi->netdev)) { | 439 | if (!netif_running(vsi->netdev)) { |
| 440 | /* simple case - set for the next time the netdev is started */ | 440 | /* simple case - set for the next time the netdev is started */ |
| 441 | for (i = 0; i < vsi->num_queue_pairs; i++) { | 441 | for (i = 0; i < vsi->num_queue_pairs; i++) { |
| 442 | vsi->tx_rings[i].count = new_tx_count; | 442 | vsi->tx_rings[i]->count = new_tx_count; |
| 443 | vsi->rx_rings[i].count = new_rx_count; | 443 | vsi->rx_rings[i]->count = new_rx_count; |
| 444 | } | 444 | } |
| 445 | goto done; | 445 | goto done; |
| 446 | } | 446 | } |
| @@ -451,10 +451,10 @@ static int i40e_set_ringparam(struct net_device *netdev, | |||
| 451 | */ | 451 | */ |
| 452 | 452 | ||
| 453 | /* alloc updated Tx resources */ | 453 | /* alloc updated Tx resources */ |
| 454 | if (new_tx_count != vsi->tx_rings[0].count) { | 454 | if (new_tx_count != vsi->tx_rings[0]->count) { |
| 455 | netdev_info(netdev, | 455 | netdev_info(netdev, |
| 456 | "Changing Tx descriptor count from %d to %d.\n", | 456 | "Changing Tx descriptor count from %d to %d.\n", |
| 457 | vsi->tx_rings[0].count, new_tx_count); | 457 | vsi->tx_rings[0]->count, new_tx_count); |
| 458 | tx_rings = kcalloc(vsi->alloc_queue_pairs, | 458 | tx_rings = kcalloc(vsi->alloc_queue_pairs, |
| 459 | sizeof(struct i40e_ring), GFP_KERNEL); | 459 | sizeof(struct i40e_ring), GFP_KERNEL); |
| 460 | if (!tx_rings) { | 460 | if (!tx_rings) { |
| @@ -464,7 +464,7 @@ static int i40e_set_ringparam(struct net_device *netdev, | |||
| 464 | 464 | ||
| 465 | for (i = 0; i < vsi->num_queue_pairs; i++) { | 465 | for (i = 0; i < vsi->num_queue_pairs; i++) { |
| 466 | /* clone ring and setup updated count */ | 466 | /* clone ring and setup updated count */ |
| 467 | tx_rings[i] = vsi->tx_rings[i]; | 467 | tx_rings[i] = *vsi->tx_rings[i]; |
| 468 | tx_rings[i].count = new_tx_count; | 468 | tx_rings[i].count = new_tx_count; |
| 469 | err = i40e_setup_tx_descriptors(&tx_rings[i]); | 469 | err = i40e_setup_tx_descriptors(&tx_rings[i]); |
| 470 | if (err) { | 470 | if (err) { |
| @@ -481,10 +481,10 @@ static int i40e_set_ringparam(struct net_device *netdev, | |||
| 481 | } | 481 | } |
| 482 | 482 | ||
| 483 | /* alloc updated Rx resources */ | 483 | /* alloc updated Rx resources */ |
| 484 | if (new_rx_count != vsi->rx_rings[0].count) { | 484 | if (new_rx_count != vsi->rx_rings[0]->count) { |
| 485 | netdev_info(netdev, | 485 | netdev_info(netdev, |
| 486 | "Changing Rx descriptor count from %d to %d\n", | 486 | "Changing Rx descriptor count from %d to %d\n", |
| 487 | vsi->rx_rings[0].count, new_rx_count); | 487 | vsi->rx_rings[0]->count, new_rx_count); |
| 488 | rx_rings = kcalloc(vsi->alloc_queue_pairs, | 488 | rx_rings = kcalloc(vsi->alloc_queue_pairs, |
| 489 | sizeof(struct i40e_ring), GFP_KERNEL); | 489 | sizeof(struct i40e_ring), GFP_KERNEL); |
| 490 | if (!rx_rings) { | 490 | if (!rx_rings) { |
| @@ -494,7 +494,7 @@ static int i40e_set_ringparam(struct net_device *netdev, | |||
| 494 | 494 | ||
| 495 | for (i = 0; i < vsi->num_queue_pairs; i++) { | 495 | for (i = 0; i < vsi->num_queue_pairs; i++) { |
| 496 | /* clone ring and setup updated count */ | 496 | /* clone ring and setup updated count */ |
| 497 | rx_rings[i] = vsi->rx_rings[i]; | 497 | rx_rings[i] = *vsi->rx_rings[i]; |
| 498 | rx_rings[i].count = new_rx_count; | 498 | rx_rings[i].count = new_rx_count; |
| 499 | err = i40e_setup_rx_descriptors(&rx_rings[i]); | 499 | err = i40e_setup_rx_descriptors(&rx_rings[i]); |
| 500 | if (err) { | 500 | if (err) { |
| @@ -517,8 +517,8 @@ static int i40e_set_ringparam(struct net_device *netdev, | |||
| 517 | 517 | ||
| 518 | if (tx_rings) { | 518 | if (tx_rings) { |
| 519 | for (i = 0; i < vsi->num_queue_pairs; i++) { | 519 | for (i = 0; i < vsi->num_queue_pairs; i++) { |
| 520 | i40e_free_tx_resources(&vsi->tx_rings[i]); | 520 | i40e_free_tx_resources(vsi->tx_rings[i]); |
| 521 | vsi->tx_rings[i] = tx_rings[i]; | 521 | *vsi->tx_rings[i] = tx_rings[i]; |
| 522 | } | 522 | } |
| 523 | kfree(tx_rings); | 523 | kfree(tx_rings); |
| 524 | tx_rings = NULL; | 524 | tx_rings = NULL; |
| @@ -526,8 +526,8 @@ static int i40e_set_ringparam(struct net_device *netdev, | |||
| 526 | 526 | ||
| 527 | if (rx_rings) { | 527 | if (rx_rings) { |
| 528 | for (i = 0; i < vsi->num_queue_pairs; i++) { | 528 | for (i = 0; i < vsi->num_queue_pairs; i++) { |
| 529 | i40e_free_rx_resources(&vsi->rx_rings[i]); | 529 | i40e_free_rx_resources(vsi->rx_rings[i]); |
| 530 | vsi->rx_rings[i] = rx_rings[i]; | 530 | *vsi->rx_rings[i] = rx_rings[i]; |
| 531 | } | 531 | } |
| 532 | kfree(rx_rings); | 532 | kfree(rx_rings); |
| 533 | rx_rings = NULL; | 533 | rx_rings = NULL; |
| @@ -579,6 +579,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, | |||
| 579 | char *p; | 579 | char *p; |
| 580 | int j; | 580 | int j; |
| 581 | struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi); | 581 | struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi); |
| 582 | unsigned int start; | ||
| 582 | 583 | ||
| 583 | i40e_update_stats(vsi); | 584 | i40e_update_stats(vsi); |
| 584 | 585 | ||
| @@ -587,14 +588,30 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, | |||
| 587 | data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat == | 588 | data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat == |
| 588 | sizeof(u64)) ? *(u64 *)p : *(u32 *)p; | 589 | sizeof(u64)) ? *(u64 *)p : *(u32 *)p; |
| 589 | } | 590 | } |
| 590 | for (j = 0; j < vsi->num_queue_pairs; j++) { | 591 | rcu_read_lock(); |
| 591 | data[i++] = vsi->tx_rings[j].tx_stats.packets; | 592 | for (j = 0; j < vsi->num_queue_pairs; j++, i += 4) { |
| 592 | data[i++] = vsi->tx_rings[j].tx_stats.bytes; | 593 | struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[j]); |
| 593 | } | 594 | struct i40e_ring *rx_ring; |
| 594 | for (j = 0; j < vsi->num_queue_pairs; j++) { | 595 | |
| 595 | data[i++] = vsi->rx_rings[j].rx_stats.packets; | 596 | if (!tx_ring) |
| 596 | data[i++] = vsi->rx_rings[j].rx_stats.bytes; | 597 | continue; |
| 598 | |||
| 599 | /* process Tx ring statistics */ | ||
| 600 | do { | ||
| 601 | start = u64_stats_fetch_begin_bh(&tx_ring->syncp); | ||
| 602 | data[i] = tx_ring->stats.packets; | ||
| 603 | data[i + 1] = tx_ring->stats.bytes; | ||
| 604 | } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start)); | ||
| 605 | |||
| 606 | /* Rx ring is the 2nd half of the queue pair */ | ||
| 607 | rx_ring = &tx_ring[1]; | ||
| 608 | do { | ||
| 609 | start = u64_stats_fetch_begin_bh(&rx_ring->syncp); | ||
| 610 | data[i + 2] = rx_ring->stats.packets; | ||
| 611 | data[i + 3] = rx_ring->stats.bytes; | ||
| 612 | } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start)); | ||
| 597 | } | 613 | } |
| 614 | rcu_read_unlock(); | ||
| 598 | if (vsi == pf->vsi[pf->lan_vsi]) { | 615 | if (vsi == pf->vsi[pf->lan_vsi]) { |
| 599 | for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) { | 616 | for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) { |
| 600 | p = (char *)pf + i40e_gstrings_stats[j].stat_offset; | 617 | p = (char *)pf + i40e_gstrings_stats[j].stat_offset; |
| @@ -641,8 +658,6 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset, | |||
| 641 | p += ETH_GSTRING_LEN; | 658 | p += ETH_GSTRING_LEN; |
| 642 | snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i); | 659 | snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i); |
| 643 | p += ETH_GSTRING_LEN; | 660 | p += ETH_GSTRING_LEN; |
| 644 | } | ||
| 645 | for (i = 0; i < vsi->num_queue_pairs; i++) { | ||
| 646 | snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_packets", i); | 661 | snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_packets", i); |
| 647 | p += ETH_GSTRING_LEN; | 662 | p += ETH_GSTRING_LEN; |
| 648 | snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i); | 663 | snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i); |
| @@ -910,8 +925,8 @@ static int i40e_set_coalesce(struct net_device *netdev, | |||
| 910 | } | 925 | } |
| 911 | 926 | ||
| 912 | vector = vsi->base_vector; | 927 | vector = vsi->base_vector; |
| 913 | q_vector = vsi->q_vectors; | 928 | for (i = 0; i < vsi->num_q_vectors; i++, vector++) { |
| 914 | for (i = 0; i < vsi->num_q_vectors; i++, vector++, q_vector++) { | 929 | q_vector = vsi->q_vectors[i]; |
| 915 | q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); | 930 | q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); |
| 916 | wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr); | 931 | wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr); |
| 917 | q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); | 932 | q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 221aa4795017..fbe7fe2914a9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
| @@ -36,7 +36,7 @@ static const char i40e_driver_string[] = | |||
| 36 | 36 | ||
| 37 | #define DRV_VERSION_MAJOR 0 | 37 | #define DRV_VERSION_MAJOR 0 |
| 38 | #define DRV_VERSION_MINOR 3 | 38 | #define DRV_VERSION_MINOR 3 |
| 39 | #define DRV_VERSION_BUILD 9 | 39 | #define DRV_VERSION_BUILD 10 |
| 40 | #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ | 40 | #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ |
| 41 | __stringify(DRV_VERSION_MINOR) "." \ | 41 | __stringify(DRV_VERSION_MINOR) "." \ |
| 42 | __stringify(DRV_VERSION_BUILD) DRV_KERN | 42 | __stringify(DRV_VERSION_BUILD) DRV_KERN |
| @@ -347,14 +347,53 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi) | |||
| 347 | **/ | 347 | **/ |
| 348 | static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( | 348 | static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( |
| 349 | struct net_device *netdev, | 349 | struct net_device *netdev, |
| 350 | struct rtnl_link_stats64 *storage) | 350 | struct rtnl_link_stats64 *stats) |
| 351 | { | 351 | { |
| 352 | struct i40e_netdev_priv *np = netdev_priv(netdev); | 352 | struct i40e_netdev_priv *np = netdev_priv(netdev); |
| 353 | struct i40e_vsi *vsi = np->vsi; | 353 | struct i40e_vsi *vsi = np->vsi; |
| 354 | struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); | ||
| 355 | int i; | ||
| 356 | |||
| 357 | rcu_read_lock(); | ||
| 358 | for (i = 0; i < vsi->num_queue_pairs; i++) { | ||
| 359 | struct i40e_ring *tx_ring, *rx_ring; | ||
| 360 | u64 bytes, packets; | ||
| 361 | unsigned int start; | ||
| 362 | |||
| 363 | tx_ring = ACCESS_ONCE(vsi->tx_rings[i]); | ||
| 364 | if (!tx_ring) | ||
| 365 | continue; | ||
| 354 | 366 | ||
| 355 | *storage = *i40e_get_vsi_stats_struct(vsi); | 367 | do { |
| 368 | start = u64_stats_fetch_begin_bh(&tx_ring->syncp); | ||
| 369 | packets = tx_ring->stats.packets; | ||
| 370 | bytes = tx_ring->stats.bytes; | ||
| 371 | } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start)); | ||
| 372 | |||
| 373 | stats->tx_packets += packets; | ||
| 374 | stats->tx_bytes += bytes; | ||
| 375 | rx_ring = &tx_ring[1]; | ||
| 376 | |||
| 377 | do { | ||
| 378 | start = u64_stats_fetch_begin_bh(&rx_ring->syncp); | ||
| 379 | packets = rx_ring->stats.packets; | ||
| 380 | bytes = rx_ring->stats.bytes; | ||
| 381 | } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start)); | ||
| 356 | 382 | ||
| 357 | return storage; | 383 | stats->rx_packets += packets; |
| 384 | stats->rx_bytes += bytes; | ||
| 385 | } | ||
| 386 | rcu_read_unlock(); | ||
| 387 | |||
| 388 | /* following stats updated by ixgbe_watchdog_task() */ | ||
| 389 | stats->multicast = vsi_stats->multicast; | ||
| 390 | stats->tx_errors = vsi_stats->tx_errors; | ||
| 391 | stats->tx_dropped = vsi_stats->tx_dropped; | ||
| 392 | stats->rx_errors = vsi_stats->rx_errors; | ||
| 393 | stats->rx_crc_errors = vsi_stats->rx_crc_errors; | ||
| 394 | stats->rx_length_errors = vsi_stats->rx_length_errors; | ||
| 395 | |||
| 396 | return stats; | ||
| 358 | } | 397 | } |
| 359 | 398 | ||
| 360 | /** | 399 | /** |
| @@ -376,10 +415,14 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi) | |||
| 376 | memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets)); | 415 | memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets)); |
| 377 | if (vsi->rx_rings) | 416 | if (vsi->rx_rings) |
| 378 | for (i = 0; i < vsi->num_queue_pairs; i++) { | 417 | for (i = 0; i < vsi->num_queue_pairs; i++) { |
| 379 | memset(&vsi->rx_rings[i].rx_stats, 0 , | 418 | memset(&vsi->rx_rings[i]->stats, 0 , |
| 380 | sizeof(vsi->rx_rings[i].rx_stats)); | 419 | sizeof(vsi->rx_rings[i]->stats)); |
| 381 | memset(&vsi->tx_rings[i].tx_stats, 0, | 420 | memset(&vsi->rx_rings[i]->rx_stats, 0 , |
| 382 | sizeof(vsi->tx_rings[i].tx_stats)); | 421 | sizeof(vsi->rx_rings[i]->rx_stats)); |
| 422 | memset(&vsi->tx_rings[i]->stats, 0 , | ||
| 423 | sizeof(vsi->tx_rings[i]->stats)); | ||
| 424 | memset(&vsi->tx_rings[i]->tx_stats, 0, | ||
| 425 | sizeof(vsi->tx_rings[i]->tx_stats)); | ||
| 383 | } | 426 | } |
| 384 | vsi->stat_offsets_loaded = false; | 427 | vsi->stat_offsets_loaded = false; |
| 385 | } | 428 | } |
| @@ -598,7 +641,7 @@ static void i40e_update_link_xoff_rx(struct i40e_pf *pf) | |||
| 598 | continue; | 641 | continue; |
| 599 | 642 | ||
| 600 | for (i = 0; i < vsi->num_queue_pairs; i++) { | 643 | for (i = 0; i < vsi->num_queue_pairs; i++) { |
| 601 | struct i40e_ring *ring = &vsi->tx_rings[i]; | 644 | struct i40e_ring *ring = vsi->tx_rings[i]; |
| 602 | clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state); | 645 | clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state); |
| 603 | } | 646 | } |
| 604 | } | 647 | } |
| @@ -652,7 +695,7 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf) | |||
| 652 | continue; | 695 | continue; |
| 653 | 696 | ||
| 654 | for (i = 0; i < vsi->num_queue_pairs; i++) { | 697 | for (i = 0; i < vsi->num_queue_pairs; i++) { |
| 655 | struct i40e_ring *ring = &vsi->tx_rings[i]; | 698 | struct i40e_ring *ring = vsi->tx_rings[i]; |
| 656 | 699 | ||
| 657 | tc = ring->dcb_tc; | 700 | tc = ring->dcb_tc; |
| 658 | if (xoff[tc]) | 701 | if (xoff[tc]) |
| @@ -704,21 +747,38 @@ void i40e_update_stats(struct i40e_vsi *vsi) | |||
| 704 | tx_restart = tx_busy = 0; | 747 | tx_restart = tx_busy = 0; |
| 705 | rx_page = 0; | 748 | rx_page = 0; |
| 706 | rx_buf = 0; | 749 | rx_buf = 0; |
| 750 | rcu_read_lock(); | ||
| 707 | for (q = 0; q < vsi->num_queue_pairs; q++) { | 751 | for (q = 0; q < vsi->num_queue_pairs; q++) { |
| 708 | struct i40e_ring *p; | 752 | struct i40e_ring *p; |
| 753 | u64 bytes, packets; | ||
| 754 | unsigned int start; | ||
| 709 | 755 | ||
| 710 | p = &vsi->rx_rings[q]; | 756 | /* locate Tx ring */ |
| 711 | rx_b += p->rx_stats.bytes; | 757 | p = ACCESS_ONCE(vsi->tx_rings[q]); |
| 712 | rx_p += p->rx_stats.packets; | ||
| 713 | rx_buf += p->rx_stats.alloc_rx_buff_failed; | ||
| 714 | rx_page += p->rx_stats.alloc_rx_page_failed; | ||
| 715 | 758 | ||
| 716 | p = &vsi->tx_rings[q]; | 759 | do { |
| 717 | tx_b += p->tx_stats.bytes; | 760 | start = u64_stats_fetch_begin_bh(&p->syncp); |
| 718 | tx_p += p->tx_stats.packets; | 761 | packets = p->stats.packets; |
| 762 | bytes = p->stats.bytes; | ||
| 763 | } while (u64_stats_fetch_retry_bh(&p->syncp, start)); | ||
| 764 | tx_b += bytes; | ||
| 765 | tx_p += packets; | ||
| 719 | tx_restart += p->tx_stats.restart_queue; | 766 | tx_restart += p->tx_stats.restart_queue; |
| 720 | tx_busy += p->tx_stats.tx_busy; | 767 | tx_busy += p->tx_stats.tx_busy; |
| 768 | |||
| 769 | /* Rx queue is part of the same block as Tx queue */ | ||
| 770 | p = &p[1]; | ||
| 771 | do { | ||
| 772 | start = u64_stats_fetch_begin_bh(&p->syncp); | ||
| 773 | packets = p->stats.packets; | ||
| 774 | bytes = p->stats.bytes; | ||
| 775 | } while (u64_stats_fetch_retry_bh(&p->syncp, start)); | ||
| 776 | rx_b += bytes; | ||
| 777 | rx_p += packets; | ||
| 778 | rx_buf += p->rx_stats.alloc_rx_buff_failed; | ||
| 779 | rx_page += p->rx_stats.alloc_rx_page_failed; | ||
| 721 | } | 780 | } |
| 781 | rcu_read_unlock(); | ||
| 722 | vsi->tx_restart = tx_restart; | 782 | vsi->tx_restart = tx_restart; |
| 723 | vsi->tx_busy = tx_busy; | 783 | vsi->tx_busy = tx_busy; |
| 724 | vsi->rx_page_failed = rx_page; | 784 | vsi->rx_page_failed = rx_page; |
| @@ -1988,7 +2048,7 @@ static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi) | |||
| 1988 | int i, err = 0; | 2048 | int i, err = 0; |
| 1989 | 2049 | ||
| 1990 | for (i = 0; i < vsi->num_queue_pairs && !err; i++) | 2050 | for (i = 0; i < vsi->num_queue_pairs && !err; i++) |
| 1991 | err = i40e_setup_tx_descriptors(&vsi->tx_rings[i]); | 2051 | err = i40e_setup_tx_descriptors(vsi->tx_rings[i]); |
| 1992 | 2052 | ||
| 1993 | return err; | 2053 | return err; |
| 1994 | } | 2054 | } |
| @@ -2004,8 +2064,8 @@ static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi) | |||
| 2004 | int i; | 2064 | int i; |
| 2005 | 2065 | ||
| 2006 | for (i = 0; i < vsi->num_queue_pairs; i++) | 2066 | for (i = 0; i < vsi->num_queue_pairs; i++) |
| 2007 | if (vsi->tx_rings[i].desc) | 2067 | if (vsi->tx_rings[i]->desc) |
| 2008 | i40e_free_tx_resources(&vsi->tx_rings[i]); | 2068 | i40e_free_tx_resources(vsi->tx_rings[i]); |
| 2009 | } | 2069 | } |
| 2010 | 2070 | ||
| 2011 | /** | 2071 | /** |
| @@ -2023,7 +2083,7 @@ static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi) | |||
| 2023 | int i, err = 0; | 2083 | int i, err = 0; |
| 2024 | 2084 | ||
| 2025 | for (i = 0; i < vsi->num_queue_pairs && !err; i++) | 2085 | for (i = 0; i < vsi->num_queue_pairs && !err; i++) |
| 2026 | err = i40e_setup_rx_descriptors(&vsi->rx_rings[i]); | 2086 | err = i40e_setup_rx_descriptors(vsi->rx_rings[i]); |
| 2027 | return err; | 2087 | return err; |
| 2028 | } | 2088 | } |
| 2029 | 2089 | ||
| @@ -2038,8 +2098,8 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi) | |||
| 2038 | int i; | 2098 | int i; |
| 2039 | 2099 | ||
| 2040 | for (i = 0; i < vsi->num_queue_pairs; i++) | 2100 | for (i = 0; i < vsi->num_queue_pairs; i++) |
| 2041 | if (vsi->rx_rings[i].desc) | 2101 | if (vsi->rx_rings[i]->desc) |
| 2042 | i40e_free_rx_resources(&vsi->rx_rings[i]); | 2102 | i40e_free_rx_resources(vsi->rx_rings[i]); |
| 2043 | } | 2103 | } |
| 2044 | 2104 | ||
| 2045 | /** | 2105 | /** |
| @@ -2223,8 +2283,8 @@ static int i40e_vsi_configure_tx(struct i40e_vsi *vsi) | |||
| 2223 | int err = 0; | 2283 | int err = 0; |
| 2224 | u16 i; | 2284 | u16 i; |
| 2225 | 2285 | ||
| 2226 | for (i = 0; (i < vsi->num_queue_pairs) && (!err); i++) | 2286 | for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) |
| 2227 | err = i40e_configure_tx_ring(&vsi->tx_rings[i]); | 2287 | err = i40e_configure_tx_ring(vsi->tx_rings[i]); |
| 2228 | 2288 | ||
| 2229 | return err; | 2289 | return err; |
| 2230 | } | 2290 | } |
| @@ -2274,7 +2334,7 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) | |||
| 2274 | 2334 | ||
| 2275 | /* set up individual rings */ | 2335 | /* set up individual rings */ |
| 2276 | for (i = 0; i < vsi->num_queue_pairs && !err; i++) | 2336 | for (i = 0; i < vsi->num_queue_pairs && !err; i++) |
| 2277 | err = i40e_configure_rx_ring(&vsi->rx_rings[i]); | 2337 | err = i40e_configure_rx_ring(vsi->rx_rings[i]); |
| 2278 | 2338 | ||
| 2279 | return err; | 2339 | return err; |
| 2280 | } | 2340 | } |
| @@ -2298,8 +2358,8 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) | |||
| 2298 | qoffset = vsi->tc_config.tc_info[n].qoffset; | 2358 | qoffset = vsi->tc_config.tc_info[n].qoffset; |
| 2299 | qcount = vsi->tc_config.tc_info[n].qcount; | 2359 | qcount = vsi->tc_config.tc_info[n].qcount; |
| 2300 | for (i = qoffset; i < (qoffset + qcount); i++) { | 2360 | for (i = qoffset; i < (qoffset + qcount); i++) { |
| 2301 | struct i40e_ring *rx_ring = &vsi->rx_rings[i]; | 2361 | struct i40e_ring *rx_ring = vsi->rx_rings[i]; |
| 2302 | struct i40e_ring *tx_ring = &vsi->tx_rings[i]; | 2362 | struct i40e_ring *tx_ring = vsi->tx_rings[i]; |
| 2303 | rx_ring->dcb_tc = n; | 2363 | rx_ring->dcb_tc = n; |
| 2304 | tx_ring->dcb_tc = n; | 2364 | tx_ring->dcb_tc = n; |
| 2305 | } | 2365 | } |
| @@ -2354,8 +2414,8 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) | |||
| 2354 | */ | 2414 | */ |
| 2355 | qp = vsi->base_queue; | 2415 | qp = vsi->base_queue; |
| 2356 | vector = vsi->base_vector; | 2416 | vector = vsi->base_vector; |
| 2357 | q_vector = vsi->q_vectors; | 2417 | for (i = 0; i < vsi->num_q_vectors; i++, vector++) { |
| 2358 | for (i = 0; i < vsi->num_q_vectors; i++, q_vector++, vector++) { | 2418 | q_vector = vsi->q_vectors[i]; |
| 2359 | q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); | 2419 | q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); |
| 2360 | q_vector->rx.latency_range = I40E_LOW_LATENCY; | 2420 | q_vector->rx.latency_range = I40E_LOW_LATENCY; |
| 2361 | wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), | 2421 | wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), |
| @@ -2435,7 +2495,7 @@ static void i40e_enable_misc_int_causes(struct i40e_hw *hw) | |||
| 2435 | **/ | 2495 | **/ |
| 2436 | static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) | 2496 | static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) |
| 2437 | { | 2497 | { |
| 2438 | struct i40e_q_vector *q_vector = vsi->q_vectors; | 2498 | struct i40e_q_vector *q_vector = vsi->q_vectors[0]; |
| 2439 | struct i40e_pf *pf = vsi->back; | 2499 | struct i40e_pf *pf = vsi->back; |
| 2440 | struct i40e_hw *hw = &pf->hw; | 2500 | struct i40e_hw *hw = &pf->hw; |
| 2441 | u32 val; | 2501 | u32 val; |
| @@ -2512,7 +2572,7 @@ static irqreturn_t i40e_msix_clean_rings(int irq, void *data) | |||
| 2512 | { | 2572 | { |
| 2513 | struct i40e_q_vector *q_vector = data; | 2573 | struct i40e_q_vector *q_vector = data; |
| 2514 | 2574 | ||
| 2515 | if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0]) | 2575 | if (!q_vector->tx.ring && !q_vector->rx.ring) |
| 2516 | return IRQ_HANDLED; | 2576 | return IRQ_HANDLED; |
| 2517 | 2577 | ||
| 2518 | napi_schedule(&q_vector->napi); | 2578 | napi_schedule(&q_vector->napi); |
| @@ -2529,7 +2589,7 @@ static irqreturn_t i40e_fdir_clean_rings(int irq, void *data) | |||
| 2529 | { | 2589 | { |
| 2530 | struct i40e_q_vector *q_vector = data; | 2590 | struct i40e_q_vector *q_vector = data; |
| 2531 | 2591 | ||
| 2532 | if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0]) | 2592 | if (!q_vector->tx.ring && !q_vector->rx.ring) |
| 2533 | return IRQ_HANDLED; | 2593 | return IRQ_HANDLED; |
| 2534 | 2594 | ||
| 2535 | pr_info("fdir ring cleaning needed\n"); | 2595 | pr_info("fdir ring cleaning needed\n"); |
| @@ -2554,16 +2614,16 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) | |||
| 2554 | int vector, err; | 2614 | int vector, err; |
| 2555 | 2615 | ||
| 2556 | for (vector = 0; vector < q_vectors; vector++) { | 2616 | for (vector = 0; vector < q_vectors; vector++) { |
| 2557 | struct i40e_q_vector *q_vector = &(vsi->q_vectors[vector]); | 2617 | struct i40e_q_vector *q_vector = vsi->q_vectors[vector]; |
| 2558 | 2618 | ||
| 2559 | if (q_vector->tx.ring[0] && q_vector->rx.ring[0]) { | 2619 | if (q_vector->tx.ring && q_vector->rx.ring) { |
| 2560 | snprintf(q_vector->name, sizeof(q_vector->name) - 1, | 2620 | snprintf(q_vector->name, sizeof(q_vector->name) - 1, |
| 2561 | "%s-%s-%d", basename, "TxRx", rx_int_idx++); | 2621 | "%s-%s-%d", basename, "TxRx", rx_int_idx++); |
| 2562 | tx_int_idx++; | 2622 | tx_int_idx++; |
| 2563 | } else if (q_vector->rx.ring[0]) { | 2623 | } else if (q_vector->rx.ring) { |
| 2564 | snprintf(q_vector->name, sizeof(q_vector->name) - 1, | 2624 | snprintf(q_vector->name, sizeof(q_vector->name) - 1, |
| 2565 | "%s-%s-%d", basename, "rx", rx_int_idx++); | 2625 | "%s-%s-%d", basename, "rx", rx_int_idx++); |
| 2566 | } else if (q_vector->tx.ring[0]) { | 2626 | } else if (q_vector->tx.ring) { |
| 2567 | snprintf(q_vector->name, sizeof(q_vector->name) - 1, | 2627 | snprintf(q_vector->name, sizeof(q_vector->name) - 1, |
| 2568 | "%s-%s-%d", basename, "tx", tx_int_idx++); | 2628 | "%s-%s-%d", basename, "tx", tx_int_idx++); |
| 2569 | } else { | 2629 | } else { |
| @@ -2611,8 +2671,8 @@ static void i40e_vsi_disable_irq(struct i40e_vsi *vsi) | |||
| 2611 | int i; | 2671 | int i; |
| 2612 | 2672 | ||
| 2613 | for (i = 0; i < vsi->num_queue_pairs; i++) { | 2673 | for (i = 0; i < vsi->num_queue_pairs; i++) { |
| 2614 | wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i].reg_idx), 0); | 2674 | wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0); |
| 2615 | wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i].reg_idx), 0); | 2675 | wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0); |
| 2616 | } | 2676 | } |
| 2617 | 2677 | ||
| 2618 | if (pf->flags & I40E_FLAG_MSIX_ENABLED) { | 2678 | if (pf->flags & I40E_FLAG_MSIX_ENABLED) { |
| @@ -2705,7 +2765,7 @@ static irqreturn_t i40e_intr(int irq, void *data) | |||
| 2705 | i40e_flush(hw); | 2765 | i40e_flush(hw); |
| 2706 | 2766 | ||
| 2707 | if (!test_bit(__I40E_DOWN, &pf->state)) | 2767 | if (!test_bit(__I40E_DOWN, &pf->state)) |
| 2708 | napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0].napi); | 2768 | napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi); |
| 2709 | } | 2769 | } |
| 2710 | 2770 | ||
| 2711 | if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { | 2771 | if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { |
| @@ -2774,40 +2834,26 @@ static irqreturn_t i40e_intr(int irq, void *data) | |||
| 2774 | } | 2834 | } |
| 2775 | 2835 | ||
| 2776 | /** | 2836 | /** |
| 2777 | * i40e_map_vector_to_rxq - Assigns the Rx queue to the vector | 2837 | * i40e_map_vector_to_qp - Assigns the queue pair to the vector |
| 2778 | * @vsi: the VSI being configured | 2838 | * @vsi: the VSI being configured |
| 2779 | * @v_idx: vector index | 2839 | * @v_idx: vector index |
| 2780 | * @r_idx: rx queue index | 2840 | * @qp_idx: queue pair index |
| 2781 | **/ | 2841 | **/ |
| 2782 | static void map_vector_to_rxq(struct i40e_vsi *vsi, int v_idx, int r_idx) | 2842 | static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx) |
| 2783 | { | 2843 | { |
| 2784 | struct i40e_q_vector *q_vector = &(vsi->q_vectors[v_idx]); | 2844 | struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; |
| 2785 | struct i40e_ring *rx_ring = &(vsi->rx_rings[r_idx]); | 2845 | struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx]; |
| 2786 | 2846 | struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx]; | |
| 2787 | rx_ring->q_vector = q_vector; | ||
| 2788 | q_vector->rx.ring[q_vector->rx.count] = rx_ring; | ||
| 2789 | q_vector->rx.count++; | ||
| 2790 | q_vector->rx.latency_range = I40E_LOW_LATENCY; | ||
| 2791 | q_vector->vsi = vsi; | ||
| 2792 | } | ||
| 2793 | |||
| 2794 | /** | ||
| 2795 | * i40e_map_vector_to_txq - Assigns the Tx queue to the vector | ||
| 2796 | * @vsi: the VSI being configured | ||
| 2797 | * @v_idx: vector index | ||
| 2798 | * @t_idx: tx queue index | ||
| 2799 | **/ | ||
| 2800 | static void map_vector_to_txq(struct i40e_vsi *vsi, int v_idx, int t_idx) | ||
| 2801 | { | ||
| 2802 | struct i40e_q_vector *q_vector = &(vsi->q_vectors[v_idx]); | ||
| 2803 | struct i40e_ring *tx_ring = &(vsi->tx_rings[t_idx]); | ||
| 2804 | 2847 | ||
| 2805 | tx_ring->q_vector = q_vector; | 2848 | tx_ring->q_vector = q_vector; |
| 2806 | q_vector->tx.ring[q_vector->tx.count] = tx_ring; | 2849 | tx_ring->next = q_vector->tx.ring; |
| 2850 | q_vector->tx.ring = tx_ring; | ||
| 2807 | q_vector->tx.count++; | 2851 | q_vector->tx.count++; |
| 2808 | q_vector->tx.latency_range = I40E_LOW_LATENCY; | 2852 | |
| 2809 | q_vector->num_ringpairs++; | 2853 | rx_ring->q_vector = q_vector; |
| 2810 | q_vector->vsi = vsi; | 2854 | rx_ring->next = q_vector->rx.ring; |
| 2855 | q_vector->rx.ring = rx_ring; | ||
| 2856 | q_vector->rx.count++; | ||
| 2811 | } | 2857 | } |
| 2812 | 2858 | ||
| 2813 | /** | 2859 | /** |
| @@ -2823,7 +2869,7 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi) | |||
| 2823 | { | 2869 | { |
| 2824 | int qp_remaining = vsi->num_queue_pairs; | 2870 | int qp_remaining = vsi->num_queue_pairs; |
| 2825 | int q_vectors = vsi->num_q_vectors; | 2871 | int q_vectors = vsi->num_q_vectors; |
| 2826 | int qp_per_vector; | 2872 | int num_ringpairs; |
| 2827 | int v_start = 0; | 2873 | int v_start = 0; |
| 2828 | int qp_idx = 0; | 2874 | int qp_idx = 0; |
| 2829 | 2875 | ||
| @@ -2831,11 +2877,21 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi) | |||
| 2831 | * group them so there are multiple queues per vector. | 2877 | * group them so there are multiple queues per vector. |
| 2832 | */ | 2878 | */ |
| 2833 | for (; v_start < q_vectors && qp_remaining; v_start++) { | 2879 | for (; v_start < q_vectors && qp_remaining; v_start++) { |
| 2834 | qp_per_vector = DIV_ROUND_UP(qp_remaining, q_vectors - v_start); | 2880 | struct i40e_q_vector *q_vector = vsi->q_vectors[v_start]; |
| 2835 | for (; qp_per_vector; | 2881 | |
| 2836 | qp_per_vector--, qp_idx++, qp_remaining--) { | 2882 | num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start); |
| 2837 | map_vector_to_rxq(vsi, v_start, qp_idx); | 2883 | |
| 2838 | map_vector_to_txq(vsi, v_start, qp_idx); | 2884 | q_vector->num_ringpairs = num_ringpairs; |
| 2885 | |||
| 2886 | q_vector->rx.count = 0; | ||
| 2887 | q_vector->tx.count = 0; | ||
| 2888 | q_vector->rx.ring = NULL; | ||
| 2889 | q_vector->tx.ring = NULL; | ||
| 2890 | |||
| 2891 | while (num_ringpairs--) { | ||
| 2892 | map_vector_to_qp(vsi, v_start, qp_idx); | ||
| 2893 | qp_idx++; | ||
| 2894 | qp_remaining--; | ||
| 2839 | } | 2895 | } |
| 2840 | } | 2896 | } |
| 2841 | } | 2897 | } |
| @@ -2887,7 +2943,7 @@ static void i40e_netpoll(struct net_device *netdev) | |||
| 2887 | pf->flags |= I40E_FLAG_IN_NETPOLL; | 2943 | pf->flags |= I40E_FLAG_IN_NETPOLL; |
| 2888 | if (pf->flags & I40E_FLAG_MSIX_ENABLED) { | 2944 | if (pf->flags & I40E_FLAG_MSIX_ENABLED) { |
| 2889 | for (i = 0; i < vsi->num_q_vectors; i++) | 2945 | for (i = 0; i < vsi->num_q_vectors; i++) |
| 2890 | i40e_msix_clean_rings(0, &vsi->q_vectors[i]); | 2946 | i40e_msix_clean_rings(0, vsi->q_vectors[i]); |
| 2891 | } else { | 2947 | } else { |
| 2892 | i40e_intr(pf->pdev->irq, netdev); | 2948 | i40e_intr(pf->pdev->irq, netdev); |
| 2893 | } | 2949 | } |
| @@ -3073,14 +3129,14 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi) | |||
| 3073 | u16 vector = i + base; | 3129 | u16 vector = i + base; |
| 3074 | 3130 | ||
| 3075 | /* free only the irqs that were actually requested */ | 3131 | /* free only the irqs that were actually requested */ |
| 3076 | if (vsi->q_vectors[i].num_ringpairs == 0) | 3132 | if (vsi->q_vectors[i]->num_ringpairs == 0) |
| 3077 | continue; | 3133 | continue; |
| 3078 | 3134 | ||
| 3079 | /* clear the affinity_mask in the IRQ descriptor */ | 3135 | /* clear the affinity_mask in the IRQ descriptor */ |
| 3080 | irq_set_affinity_hint(pf->msix_entries[vector].vector, | 3136 | irq_set_affinity_hint(pf->msix_entries[vector].vector, |
| 3081 | NULL); | 3137 | NULL); |
| 3082 | free_irq(pf->msix_entries[vector].vector, | 3138 | free_irq(pf->msix_entries[vector].vector, |
| 3083 | &vsi->q_vectors[i]); | 3139 | vsi->q_vectors[i]); |
| 3084 | 3140 | ||
| 3085 | /* Tear down the interrupt queue link list | 3141 | /* Tear down the interrupt queue link list |
| 3086 | * | 3142 | * |
| @@ -3164,6 +3220,39 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi) | |||
| 3164 | } | 3220 | } |
| 3165 | 3221 | ||
| 3166 | /** | 3222 | /** |
| 3223 | * i40e_free_q_vector - Free memory allocated for specific interrupt vector | ||
| 3224 | * @vsi: the VSI being configured | ||
| 3225 | * @v_idx: Index of vector to be freed | ||
| 3226 | * | ||
| 3227 | * This function frees the memory allocated to the q_vector. In addition if | ||
| 3228 | * NAPI is enabled it will delete any references to the NAPI struct prior | ||
| 3229 | * to freeing the q_vector. | ||
| 3230 | **/ | ||
| 3231 | static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx) | ||
| 3232 | { | ||
| 3233 | struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; | ||
| 3234 | struct i40e_ring *ring; | ||
| 3235 | |||
| 3236 | if (!q_vector) | ||
| 3237 | return; | ||
| 3238 | |||
| 3239 | /* disassociate q_vector from rings */ | ||
| 3240 | i40e_for_each_ring(ring, q_vector->tx) | ||
| 3241 | ring->q_vector = NULL; | ||
| 3242 | |||
| 3243 | i40e_for_each_ring(ring, q_vector->rx) | ||
| 3244 | ring->q_vector = NULL; | ||
| 3245 | |||
| 3246 | /* only VSI w/ an associated netdev is set up w/ NAPI */ | ||
| 3247 | if (vsi->netdev) | ||
| 3248 | netif_napi_del(&q_vector->napi); | ||
| 3249 | |||
| 3250 | vsi->q_vectors[v_idx] = NULL; | ||
| 3251 | |||
| 3252 | kfree_rcu(q_vector, rcu); | ||
| 3253 | } | ||
| 3254 | |||
| 3255 | /** | ||
| 3167 | * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors | 3256 | * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors |
| 3168 | * @vsi: the VSI being un-configured | 3257 | * @vsi: the VSI being un-configured |
| 3169 | * | 3258 | * |
| @@ -3174,24 +3263,8 @@ static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi) | |||
| 3174 | { | 3263 | { |
| 3175 | int v_idx; | 3264 | int v_idx; |
| 3176 | 3265 | ||
| 3177 | for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) { | 3266 | for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) |
| 3178 | struct i40e_q_vector *q_vector = &vsi->q_vectors[v_idx]; | 3267 | i40e_free_q_vector(vsi, v_idx); |
| 3179 | int r_idx; | ||
| 3180 | |||
| 3181 | if (!q_vector) | ||
| 3182 | continue; | ||
| 3183 | |||
| 3184 | /* disassociate q_vector from rings */ | ||
| 3185 | for (r_idx = 0; r_idx < q_vector->tx.count; r_idx++) | ||
| 3186 | q_vector->tx.ring[r_idx]->q_vector = NULL; | ||
| 3187 | for (r_idx = 0; r_idx < q_vector->rx.count; r_idx++) | ||
| 3188 | q_vector->rx.ring[r_idx]->q_vector = NULL; | ||
| 3189 | |||
| 3190 | /* only VSI w/ an associated netdev is set up w/ NAPI */ | ||
| 3191 | if (vsi->netdev) | ||
| 3192 | netif_napi_del(&q_vector->napi); | ||
| 3193 | } | ||
| 3194 | kfree(vsi->q_vectors); | ||
| 3195 | } | 3268 | } |
| 3196 | 3269 | ||
| 3197 | /** | 3270 | /** |
| @@ -3241,7 +3314,7 @@ static void i40e_napi_enable_all(struct i40e_vsi *vsi) | |||
| 3241 | return; | 3314 | return; |
| 3242 | 3315 | ||
| 3243 | for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) | 3316 | for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) |
| 3244 | napi_enable(&vsi->q_vectors[q_idx].napi); | 3317 | napi_enable(&vsi->q_vectors[q_idx]->napi); |
| 3245 | } | 3318 | } |
| 3246 | 3319 | ||
| 3247 | /** | 3320 | /** |
| @@ -3256,7 +3329,7 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi) | |||
| 3256 | return; | 3329 | return; |
| 3257 | 3330 | ||
| 3258 | for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) | 3331 | for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) |
| 3259 | napi_disable(&vsi->q_vectors[q_idx].napi); | 3332 | napi_disable(&vsi->q_vectors[q_idx]->napi); |
| 3260 | } | 3333 | } |
| 3261 | 3334 | ||
| 3262 | /** | 3335 | /** |
| @@ -3703,8 +3776,11 @@ static int i40e_up_complete(struct i40e_vsi *vsi) | |||
| 3703 | 3776 | ||
| 3704 | if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) && | 3777 | if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) && |
| 3705 | (vsi->netdev)) { | 3778 | (vsi->netdev)) { |
| 3779 | netdev_info(vsi->netdev, "NIC Link is Up\n"); | ||
| 3706 | netif_tx_start_all_queues(vsi->netdev); | 3780 | netif_tx_start_all_queues(vsi->netdev); |
| 3707 | netif_carrier_on(vsi->netdev); | 3781 | netif_carrier_on(vsi->netdev); |
| 3782 | } else if (vsi->netdev) { | ||
| 3783 | netdev_info(vsi->netdev, "NIC Link is Down\n"); | ||
| 3708 | } | 3784 | } |
| 3709 | i40e_service_event_schedule(pf); | 3785 | i40e_service_event_schedule(pf); |
| 3710 | 3786 | ||
| @@ -3772,8 +3848,8 @@ void i40e_down(struct i40e_vsi *vsi) | |||
| 3772 | i40e_napi_disable_all(vsi); | 3848 | i40e_napi_disable_all(vsi); |
| 3773 | 3849 | ||
| 3774 | for (i = 0; i < vsi->num_queue_pairs; i++) { | 3850 | for (i = 0; i < vsi->num_queue_pairs; i++) { |
| 3775 | i40e_clean_tx_ring(&vsi->tx_rings[i]); | 3851 | i40e_clean_tx_ring(vsi->tx_rings[i]); |
| 3776 | i40e_clean_rx_ring(&vsi->rx_rings[i]); | 3852 | i40e_clean_rx_ring(vsi->rx_rings[i]); |
| 3777 | } | 3853 | } |
| 3778 | } | 3854 | } |
| 3779 | 3855 | ||
| @@ -4153,8 +4229,9 @@ static void i40e_link_event(struct i40e_pf *pf) | |||
| 4153 | if (new_link == old_link) | 4229 | if (new_link == old_link) |
| 4154 | return; | 4230 | return; |
| 4155 | 4231 | ||
| 4156 | netdev_info(pf->vsi[pf->lan_vsi]->netdev, | 4232 | if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state)) |
| 4157 | "NIC Link is %s\n", (new_link ? "Up" : "Down")); | 4233 | netdev_info(pf->vsi[pf->lan_vsi]->netdev, |
| 4234 | "NIC Link is %s\n", (new_link ? "Up" : "Down")); | ||
| 4158 | 4235 | ||
| 4159 | /* Notify the base of the switch tree connected to | 4236 | /* Notify the base of the switch tree connected to |
| 4160 | * the link. Floating VEBs are not notified. | 4237 | * the link. Floating VEBs are not notified. |
| @@ -4199,9 +4276,9 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf) | |||
| 4199 | continue; | 4276 | continue; |
| 4200 | 4277 | ||
| 4201 | for (i = 0; i < vsi->num_queue_pairs; i++) { | 4278 | for (i = 0; i < vsi->num_queue_pairs; i++) { |
| 4202 | set_check_for_tx_hang(&vsi->tx_rings[i]); | 4279 | set_check_for_tx_hang(vsi->tx_rings[i]); |
| 4203 | if (test_bit(__I40E_HANG_CHECK_ARMED, | 4280 | if (test_bit(__I40E_HANG_CHECK_ARMED, |
| 4204 | &vsi->tx_rings[i].state)) | 4281 | &vsi->tx_rings[i]->state)) |
| 4205 | armed++; | 4282 | armed++; |
| 4206 | } | 4283 | } |
| 4207 | 4284 | ||
| @@ -4937,6 +5014,8 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) | |||
| 4937 | { | 5014 | { |
| 4938 | int ret = -ENODEV; | 5015 | int ret = -ENODEV; |
| 4939 | struct i40e_vsi *vsi; | 5016 | struct i40e_vsi *vsi; |
| 5017 | int sz_vectors; | ||
| 5018 | int sz_rings; | ||
| 4940 | int vsi_idx; | 5019 | int vsi_idx; |
| 4941 | int i; | 5020 | int i; |
| 4942 | 5021 | ||
| @@ -4962,14 +5041,14 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) | |||
| 4962 | vsi_idx = i; /* Found one! */ | 5041 | vsi_idx = i; /* Found one! */ |
| 4963 | } else { | 5042 | } else { |
| 4964 | ret = -ENODEV; | 5043 | ret = -ENODEV; |
| 4965 | goto err_alloc_vsi; /* out of VSI slots! */ | 5044 | goto unlock_pf; /* out of VSI slots! */ |
| 4966 | } | 5045 | } |
| 4967 | pf->next_vsi = ++i; | 5046 | pf->next_vsi = ++i; |
| 4968 | 5047 | ||
| 4969 | vsi = kzalloc(sizeof(*vsi), GFP_KERNEL); | 5048 | vsi = kzalloc(sizeof(*vsi), GFP_KERNEL); |
| 4970 | if (!vsi) { | 5049 | if (!vsi) { |
| 4971 | ret = -ENOMEM; | 5050 | ret = -ENOMEM; |
| 4972 | goto err_alloc_vsi; | 5051 | goto unlock_pf; |
| 4973 | } | 5052 | } |
| 4974 | vsi->type = type; | 5053 | vsi->type = type; |
| 4975 | vsi->back = pf; | 5054 | vsi->back = pf; |
| @@ -4982,14 +5061,40 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) | |||
| 4982 | vsi->work_limit = I40E_DEFAULT_IRQ_WORK; | 5061 | vsi->work_limit = I40E_DEFAULT_IRQ_WORK; |
| 4983 | INIT_LIST_HEAD(&vsi->mac_filter_list); | 5062 | INIT_LIST_HEAD(&vsi->mac_filter_list); |
| 4984 | 5063 | ||
| 4985 | i40e_set_num_rings_in_vsi(vsi); | 5064 | ret = i40e_set_num_rings_in_vsi(vsi); |
| 5065 | if (ret) | ||
| 5066 | goto err_rings; | ||
| 5067 | |||
| 5068 | /* allocate memory for ring pointers */ | ||
| 5069 | sz_rings = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2; | ||
| 5070 | vsi->tx_rings = kzalloc(sz_rings, GFP_KERNEL); | ||
| 5071 | if (!vsi->tx_rings) { | ||
| 5072 | ret = -ENOMEM; | ||
| 5073 | goto err_rings; | ||
| 5074 | } | ||
| 5075 | vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs]; | ||
| 5076 | |||
| 5077 | /* allocate memory for q_vector pointers */ | ||
| 5078 | sz_vectors = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors; | ||
| 5079 | vsi->q_vectors = kzalloc(sz_vectors, GFP_KERNEL); | ||
| 5080 | if (!vsi->q_vectors) { | ||
| 5081 | ret = -ENOMEM; | ||
| 5082 | goto err_vectors; | ||
| 5083 | } | ||
| 4986 | 5084 | ||
| 4987 | /* Setup default MSIX irq handler for VSI */ | 5085 | /* Setup default MSIX irq handler for VSI */ |
| 4988 | i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings); | 5086 | i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings); |
| 4989 | 5087 | ||
| 4990 | pf->vsi[vsi_idx] = vsi; | 5088 | pf->vsi[vsi_idx] = vsi; |
| 4991 | ret = vsi_idx; | 5089 | ret = vsi_idx; |
| 4992 | err_alloc_vsi: | 5090 | goto unlock_pf; |
| 5091 | |||
| 5092 | err_vectors: | ||
| 5093 | kfree(vsi->tx_rings); | ||
| 5094 | err_rings: | ||
| 5095 | pf->next_vsi = i - 1; | ||
| 5096 | kfree(vsi); | ||
| 5097 | unlock_pf: | ||
| 4993 | mutex_unlock(&pf->switch_mutex); | 5098 | mutex_unlock(&pf->switch_mutex); |
| 4994 | return ret; | 5099 | return ret; |
| 4995 | } | 5100 | } |
| @@ -5030,6 +5135,10 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi) | |||
| 5030 | i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); | 5135 | i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); |
| 5031 | i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); | 5136 | i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); |
| 5032 | 5137 | ||
| 5138 | /* free the ring and vector containers */ | ||
| 5139 | kfree(vsi->q_vectors); | ||
| 5140 | kfree(vsi->tx_rings); | ||
| 5141 | |||
| 5033 | pf->vsi[vsi->idx] = NULL; | 5142 | pf->vsi[vsi->idx] = NULL; |
| 5034 | if (vsi->idx < pf->next_vsi) | 5143 | if (vsi->idx < pf->next_vsi) |
| 5035 | pf->next_vsi = vsi->idx; | 5144 | pf->next_vsi = vsi->idx; |
| @@ -5043,34 +5152,39 @@ free_vsi: | |||
| 5043 | } | 5152 | } |
| 5044 | 5153 | ||
| 5045 | /** | 5154 | /** |
| 5155 | * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI | ||
| 5156 | * @vsi: the VSI being cleaned | ||
| 5157 | **/ | ||
| 5158 | static s32 i40e_vsi_clear_rings(struct i40e_vsi *vsi) | ||
| 5159 | { | ||
| 5160 | int i; | ||
| 5161 | |||
| 5162 | for (i = 0; i < vsi->alloc_queue_pairs; i++) { | ||
| 5163 | kfree_rcu(vsi->tx_rings[i], rcu); | ||
| 5164 | vsi->tx_rings[i] = NULL; | ||
| 5165 | vsi->rx_rings[i] = NULL; | ||
| 5166 | } | ||
| 5167 | |||
| 5168 | return 0; | ||
| 5169 | } | ||
| 5170 | |||
| 5171 | /** | ||
| 5046 | * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI | 5172 | * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI |
| 5047 | * @vsi: the VSI being configured | 5173 | * @vsi: the VSI being configured |
| 5048 | **/ | 5174 | **/ |
| 5049 | static int i40e_alloc_rings(struct i40e_vsi *vsi) | 5175 | static int i40e_alloc_rings(struct i40e_vsi *vsi) |
| 5050 | { | 5176 | { |
| 5051 | struct i40e_pf *pf = vsi->back; | 5177 | struct i40e_pf *pf = vsi->back; |
| 5052 | int ret = 0; | ||
| 5053 | int i; | 5178 | int i; |
| 5054 | 5179 | ||
| 5055 | vsi->rx_rings = kcalloc(vsi->alloc_queue_pairs, | ||
| 5056 | sizeof(struct i40e_ring), GFP_KERNEL); | ||
| 5057 | if (!vsi->rx_rings) { | ||
| 5058 | ret = -ENOMEM; | ||
| 5059 | goto err_alloc_rings; | ||
| 5060 | } | ||
| 5061 | |||
| 5062 | vsi->tx_rings = kcalloc(vsi->alloc_queue_pairs, | ||
| 5063 | sizeof(struct i40e_ring), GFP_KERNEL); | ||
| 5064 | if (!vsi->tx_rings) { | ||
| 5065 | ret = -ENOMEM; | ||
| 5066 | kfree(vsi->rx_rings); | ||
| 5067 | goto err_alloc_rings; | ||
| 5068 | } | ||
| 5069 | |||
| 5070 | /* Set basic values in the rings to be used later during open() */ | 5180 | /* Set basic values in the rings to be used later during open() */ |
| 5071 | for (i = 0; i < vsi->alloc_queue_pairs; i++) { | 5181 | for (i = 0; i < vsi->alloc_queue_pairs; i++) { |
| 5072 | struct i40e_ring *rx_ring = &vsi->rx_rings[i]; | 5182 | struct i40e_ring *tx_ring; |
| 5073 | struct i40e_ring *tx_ring = &vsi->tx_rings[i]; | 5183 | struct i40e_ring *rx_ring; |
| 5184 | |||
| 5185 | tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL); | ||
| 5186 | if (!tx_ring) | ||
| 5187 | goto err_out; | ||
| 5074 | 5188 | ||
| 5075 | tx_ring->queue_index = i; | 5189 | tx_ring->queue_index = i; |
| 5076 | tx_ring->reg_idx = vsi->base_queue + i; | 5190 | tx_ring->reg_idx = vsi->base_queue + i; |
| @@ -5081,7 +5195,9 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) | |||
| 5081 | tx_ring->count = vsi->num_desc; | 5195 | tx_ring->count = vsi->num_desc; |
| 5082 | tx_ring->size = 0; | 5196 | tx_ring->size = 0; |
| 5083 | tx_ring->dcb_tc = 0; | 5197 | tx_ring->dcb_tc = 0; |
| 5198 | vsi->tx_rings[i] = tx_ring; | ||
| 5084 | 5199 | ||
| 5200 | rx_ring = &tx_ring[1]; | ||
| 5085 | rx_ring->queue_index = i; | 5201 | rx_ring->queue_index = i; |
| 5086 | rx_ring->reg_idx = vsi->base_queue + i; | 5202 | rx_ring->reg_idx = vsi->base_queue + i; |
| 5087 | rx_ring->ring_active = false; | 5203 | rx_ring->ring_active = false; |
| @@ -5095,24 +5211,14 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) | |||
| 5095 | set_ring_16byte_desc_enabled(rx_ring); | 5211 | set_ring_16byte_desc_enabled(rx_ring); |
| 5096 | else | 5212 | else |
| 5097 | clear_ring_16byte_desc_enabled(rx_ring); | 5213 | clear_ring_16byte_desc_enabled(rx_ring); |
| 5098 | } | 5214 | vsi->rx_rings[i] = rx_ring; |
| 5099 | |||
| 5100 | err_alloc_rings: | ||
| 5101 | return ret; | ||
| 5102 | } | ||
| 5103 | |||
| 5104 | /** | ||
| 5105 | * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI | ||
| 5106 | * @vsi: the VSI being cleaned | ||
| 5107 | **/ | ||
| 5108 | static int i40e_vsi_clear_rings(struct i40e_vsi *vsi) | ||
| 5109 | { | ||
| 5110 | if (vsi) { | ||
| 5111 | kfree(vsi->rx_rings); | ||
| 5112 | kfree(vsi->tx_rings); | ||
| 5113 | } | 5215 | } |
| 5114 | 5216 | ||
| 5115 | return 0; | 5217 | return 0; |
| 5218 | |||
| 5219 | err_out: | ||
| 5220 | i40e_vsi_clear_rings(vsi); | ||
| 5221 | return -ENOMEM; | ||
| 5116 | } | 5222 | } |
| 5117 | 5223 | ||
| 5118 | /** | 5224 | /** |
| @@ -5249,6 +5355,38 @@ static int i40e_init_msix(struct i40e_pf *pf) | |||
| 5249 | } | 5355 | } |
| 5250 | 5356 | ||
| 5251 | /** | 5357 | /** |
| 5358 | * i40e_alloc_q_vector - Allocate memory for a single interrupt vector | ||
| 5359 | * @vsi: the VSI being configured | ||
| 5360 | * @v_idx: index of the vector in the vsi struct | ||
| 5361 | * | ||
| 5362 | * We allocate one q_vector. If allocation fails we return -ENOMEM. | ||
| 5363 | **/ | ||
| 5364 | static int i40e_alloc_q_vector(struct i40e_vsi *vsi, int v_idx) | ||
| 5365 | { | ||
| 5366 | struct i40e_q_vector *q_vector; | ||
| 5367 | |||
| 5368 | /* allocate q_vector */ | ||
| 5369 | q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL); | ||
| 5370 | if (!q_vector) | ||
| 5371 | return -ENOMEM; | ||
| 5372 | |||
| 5373 | q_vector->vsi = vsi; | ||
| 5374 | q_vector->v_idx = v_idx; | ||
| 5375 | cpumask_set_cpu(v_idx, &q_vector->affinity_mask); | ||
| 5376 | if (vsi->netdev) | ||
| 5377 | netif_napi_add(vsi->netdev, &q_vector->napi, | ||
| 5378 | i40e_napi_poll, vsi->work_limit); | ||
| 5379 | |||
| 5380 | q_vector->rx.latency_range = I40E_LOW_LATENCY; | ||
| 5381 | q_vector->tx.latency_range = I40E_LOW_LATENCY; | ||
| 5382 | |||
| 5383 | /* tie q_vector and vsi together */ | ||
| 5384 | vsi->q_vectors[v_idx] = q_vector; | ||
| 5385 | |||
| 5386 | return 0; | ||
| 5387 | } | ||
| 5388 | |||
| 5389 | /** | ||
| 5252 | * i40e_alloc_q_vectors - Allocate memory for interrupt vectors | 5390 | * i40e_alloc_q_vectors - Allocate memory for interrupt vectors |
| 5253 | * @vsi: the VSI being configured | 5391 | * @vsi: the VSI being configured |
| 5254 | * | 5392 | * |
| @@ -5259,6 +5397,7 @@ static int i40e_alloc_q_vectors(struct i40e_vsi *vsi) | |||
| 5259 | { | 5397 | { |
| 5260 | struct i40e_pf *pf = vsi->back; | 5398 | struct i40e_pf *pf = vsi->back; |
| 5261 | int v_idx, num_q_vectors; | 5399 | int v_idx, num_q_vectors; |
| 5400 | int err; | ||
| 5262 | 5401 | ||
| 5263 | /* if not MSIX, give the one vector only to the LAN VSI */ | 5402 | /* if not MSIX, give the one vector only to the LAN VSI */ |
| 5264 | if (pf->flags & I40E_FLAG_MSIX_ENABLED) | 5403 | if (pf->flags & I40E_FLAG_MSIX_ENABLED) |
| @@ -5268,22 +5407,19 @@ static int i40e_alloc_q_vectors(struct i40e_vsi *vsi) | |||
| 5268 | else | 5407 | else |
| 5269 | return -EINVAL; | 5408 | return -EINVAL; |
| 5270 | 5409 | ||
| 5271 | vsi->q_vectors = kcalloc(num_q_vectors, | ||
| 5272 | sizeof(struct i40e_q_vector), | ||
| 5273 | GFP_KERNEL); | ||
| 5274 | if (!vsi->q_vectors) | ||
| 5275 | return -ENOMEM; | ||
| 5276 | |||
| 5277 | for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { | 5410 | for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { |
| 5278 | vsi->q_vectors[v_idx].vsi = vsi; | 5411 | err = i40e_alloc_q_vector(vsi, v_idx); |
| 5279 | vsi->q_vectors[v_idx].v_idx = v_idx; | 5412 | if (err) |
| 5280 | cpumask_set_cpu(v_idx, &vsi->q_vectors[v_idx].affinity_mask); | 5413 | goto err_out; |
| 5281 | if (vsi->netdev) | ||
| 5282 | netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx].napi, | ||
| 5283 | i40e_napi_poll, vsi->work_limit); | ||
| 5284 | } | 5414 | } |
| 5285 | 5415 | ||
| 5286 | return 0; | 5416 | return 0; |
| 5417 | |||
| 5418 | err_out: | ||
| 5419 | while (v_idx--) | ||
| 5420 | i40e_free_q_vector(vsi, v_idx); | ||
| 5421 | |||
| 5422 | return err; | ||
| 5287 | } | 5423 | } |
| 5288 | 5424 | ||
| 5289 | /** | 5425 | /** |
| @@ -5950,7 +6086,7 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi) | |||
| 5950 | int ret = -ENOENT; | 6086 | int ret = -ENOENT; |
| 5951 | struct i40e_pf *pf = vsi->back; | 6087 | struct i40e_pf *pf = vsi->back; |
| 5952 | 6088 | ||
| 5953 | if (vsi->q_vectors) { | 6089 | if (vsi->q_vectors[0]) { |
| 5954 | dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n", | 6090 | dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n", |
| 5955 | vsi->seid); | 6091 | vsi->seid); |
| 5956 | return -EEXIST; | 6092 | return -EEXIST; |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 49d2cfa9b0cc..dc89e72fd0f4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c | |||
| @@ -64,7 +64,7 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data, | |||
| 64 | if (!vsi) | 64 | if (!vsi) |
| 65 | return -ENOENT; | 65 | return -ENOENT; |
| 66 | 66 | ||
| 67 | tx_ring = &vsi->tx_rings[0]; | 67 | tx_ring = vsi->tx_rings[0]; |
| 68 | dev = tx_ring->dev; | 68 | dev = tx_ring->dev; |
| 69 | 69 | ||
| 70 | dma = dma_map_single(dev, fdir_data->raw_packet, | 70 | dma = dma_map_single(dev, fdir_data->raw_packet, |
| @@ -73,11 +73,12 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data, | |||
| 73 | goto dma_fail; | 73 | goto dma_fail; |
| 74 | 74 | ||
| 75 | /* grab the next descriptor */ | 75 | /* grab the next descriptor */ |
| 76 | fdir_desc = I40E_TX_FDIRDESC(tx_ring, tx_ring->next_to_use); | 76 | i = tx_ring->next_to_use; |
| 77 | tx_buf = &tx_ring->tx_bi[tx_ring->next_to_use]; | 77 | fdir_desc = I40E_TX_FDIRDESC(tx_ring, i); |
| 78 | tx_ring->next_to_use++; | 78 | tx_buf = &tx_ring->tx_bi[i]; |
| 79 | if (tx_ring->next_to_use == tx_ring->count) | 79 | |
| 80 | tx_ring->next_to_use = 0; | 80 | i++; |
| 81 | tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; | ||
| 81 | 82 | ||
| 82 | fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32((fdir_data->q_index | 83 | fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32((fdir_data->q_index |
| 83 | << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) | 84 | << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) |
| @@ -134,11 +135,11 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data, | |||
| 134 | fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id); | 135 | fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id); |
| 135 | 136 | ||
| 136 | /* Now program a dummy descriptor */ | 137 | /* Now program a dummy descriptor */ |
| 137 | tx_desc = I40E_TX_DESC(tx_ring, tx_ring->next_to_use); | 138 | i = tx_ring->next_to_use; |
| 138 | tx_buf = &tx_ring->tx_bi[tx_ring->next_to_use]; | 139 | tx_desc = I40E_TX_DESC(tx_ring, i); |
| 139 | tx_ring->next_to_use++; | 140 | |
| 140 | if (tx_ring->next_to_use == tx_ring->count) | 141 | i++; |
| 141 | tx_ring->next_to_use = 0; | 142 | tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; |
| 142 | 143 | ||
| 143 | tx_desc->buffer_addr = cpu_to_le64(dma); | 144 | tx_desc->buffer_addr = cpu_to_le64(dma); |
| 144 | td_cmd = I40E_TX_DESC_CMD_EOP | | 145 | td_cmd = I40E_TX_DESC_CMD_EOP | |
| @@ -148,9 +149,6 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data, | |||
| 148 | tx_desc->cmd_type_offset_bsz = | 149 | tx_desc->cmd_type_offset_bsz = |
| 149 | build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 0); | 150 | build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 0); |
| 150 | 151 | ||
| 151 | /* Mark the data descriptor to be watched */ | ||
| 152 | tx_buf->next_to_watch = tx_desc; | ||
| 153 | |||
| 154 | /* Force memory writes to complete before letting h/w | 152 | /* Force memory writes to complete before letting h/w |
| 155 | * know there are new descriptors to fetch. (Only | 153 | * know there are new descriptors to fetch. (Only |
| 156 | * applicable for weak-ordered memory model archs, | 154 | * applicable for weak-ordered memory model archs, |
| @@ -158,6 +156,9 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data, | |||
| 158 | */ | 156 | */ |
| 159 | wmb(); | 157 | wmb(); |
| 160 | 158 | ||
| 159 | /* Mark the data descriptor to be watched */ | ||
| 160 | tx_buf->next_to_watch = tx_desc; | ||
| 161 | |||
| 161 | writel(tx_ring->next_to_use, tx_ring->tail); | 162 | writel(tx_ring->next_to_use, tx_ring->tail); |
| 162 | return 0; | 163 | return 0; |
| 163 | 164 | ||
| @@ -188,27 +189,30 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u32 qw, u8 prog_id) | |||
| 188 | } | 189 | } |
| 189 | 190 | ||
| 190 | /** | 191 | /** |
| 191 | * i40e_unmap_tx_resource - Release a Tx buffer | 192 | * i40e_unmap_and_free_tx_resource - Release a Tx buffer |
| 192 | * @ring: the ring that owns the buffer | 193 | * @ring: the ring that owns the buffer |
| 193 | * @tx_buffer: the buffer to free | 194 | * @tx_buffer: the buffer to free |
| 194 | **/ | 195 | **/ |
| 195 | static inline void i40e_unmap_tx_resource(struct i40e_ring *ring, | 196 | static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, |
| 196 | struct i40e_tx_buffer *tx_buffer) | 197 | struct i40e_tx_buffer *tx_buffer) |
| 197 | { | 198 | { |
| 198 | if (tx_buffer->dma) { | 199 | if (tx_buffer->skb) { |
| 199 | if (tx_buffer->tx_flags & I40E_TX_FLAGS_MAPPED_AS_PAGE) | 200 | dev_kfree_skb_any(tx_buffer->skb); |
| 200 | dma_unmap_page(ring->dev, | 201 | if (dma_unmap_len(tx_buffer, len)) |
| 201 | tx_buffer->dma, | ||
| 202 | tx_buffer->length, | ||
| 203 | DMA_TO_DEVICE); | ||
| 204 | else | ||
| 205 | dma_unmap_single(ring->dev, | 202 | dma_unmap_single(ring->dev, |
| 206 | tx_buffer->dma, | 203 | dma_unmap_addr(tx_buffer, dma), |
| 207 | tx_buffer->length, | 204 | dma_unmap_len(tx_buffer, len), |
| 208 | DMA_TO_DEVICE); | 205 | DMA_TO_DEVICE); |
| 206 | } else if (dma_unmap_len(tx_buffer, len)) { | ||
| 207 | dma_unmap_page(ring->dev, | ||
| 208 | dma_unmap_addr(tx_buffer, dma), | ||
| 209 | dma_unmap_len(tx_buffer, len), | ||
| 210 | DMA_TO_DEVICE); | ||
| 209 | } | 211 | } |
| 210 | tx_buffer->dma = 0; | 212 | tx_buffer->next_to_watch = NULL; |
| 211 | tx_buffer->time_stamp = 0; | 213 | tx_buffer->skb = NULL; |
| 214 | dma_unmap_len_set(tx_buffer, len, 0); | ||
| 215 | /* tx_buffer must be completely set up in the transmit path */ | ||
| 212 | } | 216 | } |
| 213 | 217 | ||
| 214 | /** | 218 | /** |
| @@ -217,7 +221,6 @@ static inline void i40e_unmap_tx_resource(struct i40e_ring *ring, | |||
| 217 | **/ | 221 | **/ |
| 218 | void i40e_clean_tx_ring(struct i40e_ring *tx_ring) | 222 | void i40e_clean_tx_ring(struct i40e_ring *tx_ring) |
| 219 | { | 223 | { |
| 220 | struct i40e_tx_buffer *tx_buffer; | ||
| 221 | unsigned long bi_size; | 224 | unsigned long bi_size; |
| 222 | u16 i; | 225 | u16 i; |
| 223 | 226 | ||
| @@ -226,13 +229,8 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring) | |||
| 226 | return; | 229 | return; |
| 227 | 230 | ||
| 228 | /* Free all the Tx ring sk_buffs */ | 231 | /* Free all the Tx ring sk_buffs */ |
| 229 | for (i = 0; i < tx_ring->count; i++) { | 232 | for (i = 0; i < tx_ring->count; i++) |
| 230 | tx_buffer = &tx_ring->tx_bi[i]; | 233 | i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]); |
| 231 | i40e_unmap_tx_resource(tx_ring, tx_buffer); | ||
| 232 | if (tx_buffer->skb) | ||
| 233 | dev_kfree_skb_any(tx_buffer->skb); | ||
| 234 | tx_buffer->skb = NULL; | ||
| 235 | } | ||
| 236 | 234 | ||
| 237 | bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; | 235 | bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; |
| 238 | memset(tx_ring->tx_bi, 0, bi_size); | 236 | memset(tx_ring->tx_bi, 0, bi_size); |
| @@ -242,6 +240,13 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring) | |||
| 242 | 240 | ||
| 243 | tx_ring->next_to_use = 0; | 241 | tx_ring->next_to_use = 0; |
| 244 | tx_ring->next_to_clean = 0; | 242 | tx_ring->next_to_clean = 0; |
| 243 | |||
| 244 | if (!tx_ring->netdev) | ||
| 245 | return; | ||
| 246 | |||
| 247 | /* cleanup Tx queue statistics */ | ||
| 248 | netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev, | ||
| 249 | tx_ring->queue_index)); | ||
| 245 | } | 250 | } |
| 246 | 251 | ||
| 247 | /** | 252 | /** |
| @@ -300,14 +305,14 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) | |||
| 300 | * run the check_tx_hang logic with a transmit completion | 305 | * run the check_tx_hang logic with a transmit completion |
| 301 | * pending but without time to complete it yet. | 306 | * pending but without time to complete it yet. |
| 302 | */ | 307 | */ |
| 303 | if ((tx_ring->tx_stats.tx_done_old == tx_ring->tx_stats.packets) && | 308 | if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && |
| 304 | tx_pending) { | 309 | tx_pending) { |
| 305 | /* make sure it is true for two checks in a row */ | 310 | /* make sure it is true for two checks in a row */ |
| 306 | ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, | 311 | ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, |
| 307 | &tx_ring->state); | 312 | &tx_ring->state); |
| 308 | } else { | 313 | } else { |
| 309 | /* update completed stats and disarm the hang check */ | 314 | /* update completed stats and disarm the hang check */ |
| 310 | tx_ring->tx_stats.tx_done_old = tx_ring->tx_stats.packets; | 315 | tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets; |
| 311 | clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); | 316 | clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); |
| 312 | } | 317 | } |
| 313 | 318 | ||
| @@ -331,62 +336,88 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) | |||
| 331 | 336 | ||
| 332 | tx_buf = &tx_ring->tx_bi[i]; | 337 | tx_buf = &tx_ring->tx_bi[i]; |
| 333 | tx_desc = I40E_TX_DESC(tx_ring, i); | 338 | tx_desc = I40E_TX_DESC(tx_ring, i); |
| 339 | i -= tx_ring->count; | ||
| 334 | 340 | ||
| 335 | for (; budget; budget--) { | 341 | do { |
| 336 | struct i40e_tx_desc *eop_desc; | 342 | struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; |
| 337 | |||
| 338 | eop_desc = tx_buf->next_to_watch; | ||
| 339 | 343 | ||
| 340 | /* if next_to_watch is not set then there is no work pending */ | 344 | /* if next_to_watch is not set then there is no work pending */ |
| 341 | if (!eop_desc) | 345 | if (!eop_desc) |
| 342 | break; | 346 | break; |
| 343 | 347 | ||
| 348 | /* prevent any other reads prior to eop_desc */ | ||
| 349 | read_barrier_depends(); | ||
| 350 | |||
| 344 | /* if the descriptor isn't done, no work yet to do */ | 351 | /* if the descriptor isn't done, no work yet to do */ |
| 345 | if (!(eop_desc->cmd_type_offset_bsz & | 352 | if (!(eop_desc->cmd_type_offset_bsz & |
| 346 | cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE))) | 353 | cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE))) |
| 347 | break; | 354 | break; |
| 348 | 355 | ||
| 349 | /* count the packet as being completed */ | 356 | /* clear next_to_watch to prevent false hangs */ |
| 350 | tx_ring->tx_stats.completed++; | ||
| 351 | tx_buf->next_to_watch = NULL; | 357 | tx_buf->next_to_watch = NULL; |
| 352 | tx_buf->time_stamp = 0; | ||
| 353 | |||
| 354 | /* set memory barrier before eop_desc is verified */ | ||
| 355 | rmb(); | ||
| 356 | 358 | ||
| 357 | do { | 359 | /* update the statistics for this packet */ |
| 358 | i40e_unmap_tx_resource(tx_ring, tx_buf); | 360 | total_bytes += tx_buf->bytecount; |
| 361 | total_packets += tx_buf->gso_segs; | ||
| 359 | 362 | ||
| 360 | /* clear dtype status */ | 363 | /* free the skb */ |
| 361 | tx_desc->cmd_type_offset_bsz &= | 364 | dev_kfree_skb_any(tx_buf->skb); |
| 362 | ~cpu_to_le64(I40E_TXD_QW1_DTYPE_MASK); | ||
| 363 | 365 | ||
| 364 | if (likely(tx_desc == eop_desc)) { | 366 | /* unmap skb header data */ |
| 365 | eop_desc = NULL; | 367 | dma_unmap_single(tx_ring->dev, |
| 368 | dma_unmap_addr(tx_buf, dma), | ||
| 369 | dma_unmap_len(tx_buf, len), | ||
| 370 | DMA_TO_DEVICE); | ||
| 366 | 371 | ||
| 367 | dev_kfree_skb_any(tx_buf->skb); | 372 | /* clear tx_buffer data */ |
| 368 | tx_buf->skb = NULL; | 373 | tx_buf->skb = NULL; |
| 374 | dma_unmap_len_set(tx_buf, len, 0); | ||
| 369 | 375 | ||
| 370 | total_bytes += tx_buf->bytecount; | 376 | /* unmap remaining buffers */ |
| 371 | total_packets += tx_buf->gso_segs; | 377 | while (tx_desc != eop_desc) { |
| 372 | } | ||
| 373 | 378 | ||
| 374 | tx_buf++; | 379 | tx_buf++; |
| 375 | tx_desc++; | 380 | tx_desc++; |
| 376 | i++; | 381 | i++; |
| 377 | if (unlikely(i == tx_ring->count)) { | 382 | if (unlikely(!i)) { |
| 378 | i = 0; | 383 | i -= tx_ring->count; |
| 379 | tx_buf = tx_ring->tx_bi; | 384 | tx_buf = tx_ring->tx_bi; |
| 380 | tx_desc = I40E_TX_DESC(tx_ring, 0); | 385 | tx_desc = I40E_TX_DESC(tx_ring, 0); |
| 381 | } | 386 | } |
| 382 | } while (eop_desc); | ||
| 383 | } | ||
| 384 | 387 | ||
| 388 | /* unmap any remaining paged data */ | ||
| 389 | if (dma_unmap_len(tx_buf, len)) { | ||
| 390 | dma_unmap_page(tx_ring->dev, | ||
| 391 | dma_unmap_addr(tx_buf, dma), | ||
| 392 | dma_unmap_len(tx_buf, len), | ||
| 393 | DMA_TO_DEVICE); | ||
| 394 | dma_unmap_len_set(tx_buf, len, 0); | ||
| 395 | } | ||
| 396 | } | ||
| 397 | |||
| 398 | /* move us one more past the eop_desc for start of next pkt */ | ||
| 399 | tx_buf++; | ||
| 400 | tx_desc++; | ||
| 401 | i++; | ||
| 402 | if (unlikely(!i)) { | ||
| 403 | i -= tx_ring->count; | ||
| 404 | tx_buf = tx_ring->tx_bi; | ||
| 405 | tx_desc = I40E_TX_DESC(tx_ring, 0); | ||
| 406 | } | ||
| 407 | |||
| 408 | /* update budget accounting */ | ||
| 409 | budget--; | ||
| 410 | } while (likely(budget)); | ||
| 411 | |||
| 412 | i += tx_ring->count; | ||
| 385 | tx_ring->next_to_clean = i; | 413 | tx_ring->next_to_clean = i; |
| 386 | tx_ring->tx_stats.bytes += total_bytes; | 414 | u64_stats_update_begin(&tx_ring->syncp); |
| 387 | tx_ring->tx_stats.packets += total_packets; | 415 | tx_ring->stats.bytes += total_bytes; |
| 416 | tx_ring->stats.packets += total_packets; | ||
| 417 | u64_stats_update_end(&tx_ring->syncp); | ||
| 388 | tx_ring->q_vector->tx.total_bytes += total_bytes; | 418 | tx_ring->q_vector->tx.total_bytes += total_bytes; |
| 389 | tx_ring->q_vector->tx.total_packets += total_packets; | 419 | tx_ring->q_vector->tx.total_packets += total_packets; |
| 420 | |||
| 390 | if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) { | 421 | if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) { |
| 391 | /* schedule immediate reset if we believe we hung */ | 422 | /* schedule immediate reset if we believe we hung */ |
| 392 | dev_info(tx_ring->dev, "Detected Tx Unit Hang\n" | 423 | dev_info(tx_ring->dev, "Detected Tx Unit Hang\n" |
| @@ -414,6 +445,10 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) | |||
| 414 | return true; | 445 | return true; |
| 415 | } | 446 | } |
| 416 | 447 | ||
| 448 | netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev, | ||
| 449 | tx_ring->queue_index), | ||
| 450 | total_packets, total_bytes); | ||
| 451 | |||
| 417 | #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) | 452 | #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) |
| 418 | if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && | 453 | if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && |
| 419 | (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { | 454 | (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { |
| @@ -1042,8 +1077,10 @@ next_desc: | |||
| 1042 | } | 1077 | } |
| 1043 | 1078 | ||
| 1044 | rx_ring->next_to_clean = i; | 1079 | rx_ring->next_to_clean = i; |
| 1045 | rx_ring->rx_stats.packets += total_rx_packets; | 1080 | u64_stats_update_begin(&rx_ring->syncp); |
| 1046 | rx_ring->rx_stats.bytes += total_rx_bytes; | 1081 | rx_ring->stats.packets += total_rx_packets; |
| 1082 | rx_ring->stats.bytes += total_rx_bytes; | ||
| 1083 | u64_stats_update_end(&rx_ring->syncp); | ||
| 1047 | rx_ring->q_vector->rx.total_packets += total_rx_packets; | 1084 | rx_ring->q_vector->rx.total_packets += total_rx_packets; |
| 1048 | rx_ring->q_vector->rx.total_bytes += total_rx_bytes; | 1085 | rx_ring->q_vector->rx.total_bytes += total_rx_bytes; |
| 1049 | 1086 | ||
| @@ -1067,27 +1104,28 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) | |||
| 1067 | struct i40e_q_vector *q_vector = | 1104 | struct i40e_q_vector *q_vector = |
| 1068 | container_of(napi, struct i40e_q_vector, napi); | 1105 | container_of(napi, struct i40e_q_vector, napi); |
| 1069 | struct i40e_vsi *vsi = q_vector->vsi; | 1106 | struct i40e_vsi *vsi = q_vector->vsi; |
| 1107 | struct i40e_ring *ring; | ||
| 1070 | bool clean_complete = true; | 1108 | bool clean_complete = true; |
| 1071 | int budget_per_ring; | 1109 | int budget_per_ring; |
| 1072 | int i; | ||
| 1073 | 1110 | ||
| 1074 | if (test_bit(__I40E_DOWN, &vsi->state)) { | 1111 | if (test_bit(__I40E_DOWN, &vsi->state)) { |
| 1075 | napi_complete(napi); | 1112 | napi_complete(napi); |
| 1076 | return 0; | 1113 | return 0; |
| 1077 | } | 1114 | } |
| 1078 | 1115 | ||
| 1116 | /* Since the actual Tx work is minimal, we can give the Tx a larger | ||
| 1117 | * budget and be more aggressive about cleaning up the Tx descriptors. | ||
| 1118 | */ | ||
| 1119 | i40e_for_each_ring(ring, q_vector->tx) | ||
| 1120 | clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit); | ||
| 1121 | |||
| 1079 | /* We attempt to distribute budget to each Rx queue fairly, but don't | 1122 | /* We attempt to distribute budget to each Rx queue fairly, but don't |
| 1080 | * allow the budget to go below 1 because that would exit polling early. | 1123 | * allow the budget to go below 1 because that would exit polling early. |
| 1081 | * Since the actual Tx work is minimal, we can give the Tx a larger | ||
| 1082 | * budget and be more aggressive about cleaning up the Tx descriptors. | ||
| 1083 | */ | 1124 | */ |
| 1084 | budget_per_ring = max(budget/q_vector->num_ringpairs, 1); | 1125 | budget_per_ring = max(budget/q_vector->num_ringpairs, 1); |
| 1085 | for (i = 0; i < q_vector->num_ringpairs; i++) { | 1126 | |
| 1086 | clean_complete &= i40e_clean_tx_irq(q_vector->tx.ring[i], | 1127 | i40e_for_each_ring(ring, q_vector->rx) |
| 1087 | vsi->work_limit); | 1128 | clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring); |
| 1088 | clean_complete &= i40e_clean_rx_irq(q_vector->rx.ring[i], | ||
| 1089 | budget_per_ring); | ||
| 1090 | } | ||
| 1091 | 1129 | ||
| 1092 | /* If work not completed, return budget and polling will return */ | 1130 | /* If work not completed, return budget and polling will return */ |
| 1093 | if (!clean_complete) | 1131 | if (!clean_complete) |
| @@ -1144,6 +1182,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, | |||
| 1144 | struct tcphdr *th; | 1182 | struct tcphdr *th; |
| 1145 | unsigned int hlen; | 1183 | unsigned int hlen; |
| 1146 | u32 flex_ptype, dtype_cmd; | 1184 | u32 flex_ptype, dtype_cmd; |
| 1185 | u16 i; | ||
| 1147 | 1186 | ||
| 1148 | /* make sure ATR is enabled */ | 1187 | /* make sure ATR is enabled */ |
| 1149 | if (!(pf->flags & I40E_FLAG_FDIR_ATR_ENABLED)) | 1188 | if (!(pf->flags & I40E_FLAG_FDIR_ATR_ENABLED)) |
| @@ -1183,10 +1222,11 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, | |||
| 1183 | tx_ring->atr_count = 0; | 1222 | tx_ring->atr_count = 0; |
| 1184 | 1223 | ||
| 1185 | /* grab the next descriptor */ | 1224 | /* grab the next descriptor */ |
| 1186 | fdir_desc = I40E_TX_FDIRDESC(tx_ring, tx_ring->next_to_use); | 1225 | i = tx_ring->next_to_use; |
| 1187 | tx_ring->next_to_use++; | 1226 | fdir_desc = I40E_TX_FDIRDESC(tx_ring, i); |
| 1188 | if (tx_ring->next_to_use == tx_ring->count) | 1227 | |
| 1189 | tx_ring->next_to_use = 0; | 1228 | i++; |
| 1229 | tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; | ||
| 1190 | 1230 | ||
| 1191 | flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) & | 1231 | flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) & |
| 1192 | I40E_TXD_FLTR_QW0_QINDEX_MASK; | 1232 | I40E_TXD_FLTR_QW0_QINDEX_MASK; |
| @@ -1276,27 +1316,6 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, | |||
| 1276 | } | 1316 | } |
| 1277 | 1317 | ||
| 1278 | /** | 1318 | /** |
| 1279 | * i40e_tx_csum - is checksum offload requested | ||
| 1280 | * @tx_ring: ptr to the ring to send | ||
| 1281 | * @skb: ptr to the skb we're sending | ||
| 1282 | * @tx_flags: the collected send information | ||
| 1283 | * @protocol: the send protocol | ||
| 1284 | * | ||
| 1285 | * Returns true if checksum offload is requested | ||
| 1286 | **/ | ||
| 1287 | static bool i40e_tx_csum(struct i40e_ring *tx_ring, struct sk_buff *skb, | ||
| 1288 | u32 tx_flags, __be16 protocol) | ||
| 1289 | { | ||
| 1290 | if ((skb->ip_summed != CHECKSUM_PARTIAL) && | ||
| 1291 | !(tx_flags & I40E_TX_FLAGS_TXSW)) { | ||
| 1292 | if (!(tx_flags & I40E_TX_FLAGS_HW_VLAN)) | ||
| 1293 | return false; | ||
| 1294 | } | ||
| 1295 | |||
| 1296 | return skb->ip_summed == CHECKSUM_PARTIAL; | ||
| 1297 | } | ||
| 1298 | |||
| 1299 | /** | ||
| 1300 | * i40e_tso - set up the tso context descriptor | 1319 | * i40e_tso - set up the tso context descriptor |
| 1301 | * @tx_ring: ptr to the ring to send | 1320 | * @tx_ring: ptr to the ring to send |
| 1302 | * @skb: ptr to the skb we're sending | 1321 | * @skb: ptr to the skb we're sending |
| @@ -1482,15 +1501,16 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, | |||
| 1482 | const u32 cd_tunneling, const u32 cd_l2tag2) | 1501 | const u32 cd_tunneling, const u32 cd_l2tag2) |
| 1483 | { | 1502 | { |
| 1484 | struct i40e_tx_context_desc *context_desc; | 1503 | struct i40e_tx_context_desc *context_desc; |
| 1504 | int i = tx_ring->next_to_use; | ||
| 1485 | 1505 | ||
| 1486 | if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2) | 1506 | if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2) |
| 1487 | return; | 1507 | return; |
| 1488 | 1508 | ||
| 1489 | /* grab the next descriptor */ | 1509 | /* grab the next descriptor */ |
| 1490 | context_desc = I40E_TX_CTXTDESC(tx_ring, tx_ring->next_to_use); | 1510 | context_desc = I40E_TX_CTXTDESC(tx_ring, i); |
| 1491 | tx_ring->next_to_use++; | 1511 | |
| 1492 | if (tx_ring->next_to_use == tx_ring->count) | 1512 | i++; |
| 1493 | tx_ring->next_to_use = 0; | 1513 | tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; |
| 1494 | 1514 | ||
| 1495 | /* cpu_to_le32 and assign to struct fields */ | 1515 | /* cpu_to_le32 and assign to struct fields */ |
| 1496 | context_desc->tunneling_params = cpu_to_le32(cd_tunneling); | 1516 | context_desc->tunneling_params = cpu_to_le32(cd_tunneling); |
| @@ -1512,68 +1532,71 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, | |||
| 1512 | struct i40e_tx_buffer *first, u32 tx_flags, | 1532 | struct i40e_tx_buffer *first, u32 tx_flags, |
| 1513 | const u8 hdr_len, u32 td_cmd, u32 td_offset) | 1533 | const u8 hdr_len, u32 td_cmd, u32 td_offset) |
| 1514 | { | 1534 | { |
| 1515 | struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; | ||
| 1516 | unsigned int data_len = skb->data_len; | 1535 | unsigned int data_len = skb->data_len; |
| 1517 | unsigned int size = skb_headlen(skb); | 1536 | unsigned int size = skb_headlen(skb); |
| 1518 | struct device *dev = tx_ring->dev; | 1537 | struct skb_frag_struct *frag; |
| 1519 | u32 paylen = skb->len - hdr_len; | ||
| 1520 | u16 i = tx_ring->next_to_use; | ||
| 1521 | struct i40e_tx_buffer *tx_bi; | 1538 | struct i40e_tx_buffer *tx_bi; |
| 1522 | struct i40e_tx_desc *tx_desc; | 1539 | struct i40e_tx_desc *tx_desc; |
| 1523 | u32 buf_offset = 0; | 1540 | u16 i = tx_ring->next_to_use; |
| 1524 | u32 td_tag = 0; | 1541 | u32 td_tag = 0; |
| 1525 | dma_addr_t dma; | 1542 | dma_addr_t dma; |
| 1526 | u16 gso_segs; | 1543 | u16 gso_segs; |
| 1527 | 1544 | ||
| 1528 | dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); | ||
| 1529 | if (dma_mapping_error(dev, dma)) | ||
| 1530 | goto dma_error; | ||
| 1531 | |||
| 1532 | if (tx_flags & I40E_TX_FLAGS_HW_VLAN) { | 1545 | if (tx_flags & I40E_TX_FLAGS_HW_VLAN) { |
| 1533 | td_cmd |= I40E_TX_DESC_CMD_IL2TAG1; | 1546 | td_cmd |= I40E_TX_DESC_CMD_IL2TAG1; |
| 1534 | td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >> | 1547 | td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >> |
| 1535 | I40E_TX_FLAGS_VLAN_SHIFT; | 1548 | I40E_TX_FLAGS_VLAN_SHIFT; |
| 1536 | } | 1549 | } |
| 1537 | 1550 | ||
| 1551 | if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) | ||
| 1552 | gso_segs = skb_shinfo(skb)->gso_segs; | ||
| 1553 | else | ||
| 1554 | gso_segs = 1; | ||
| 1555 | |||
| 1556 | /* multiply data chunks by size of headers */ | ||
| 1557 | first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len); | ||
| 1558 | first->gso_segs = gso_segs; | ||
| 1559 | first->skb = skb; | ||
| 1560 | first->tx_flags = tx_flags; | ||
| 1561 | |||
| 1562 | dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); | ||
| 1563 | |||
| 1538 | tx_desc = I40E_TX_DESC(tx_ring, i); | 1564 | tx_desc = I40E_TX_DESC(tx_ring, i); |
| 1539 | for (;;) { | 1565 | tx_bi = first; |
| 1540 | while (size > I40E_MAX_DATA_PER_TXD) { | 1566 | |
| 1541 | tx_desc->buffer_addr = cpu_to_le64(dma + buf_offset); | 1567 | for (frag = &skb_shinfo(skb)->frags[0];; frag++) { |
| 1568 | if (dma_mapping_error(tx_ring->dev, dma)) | ||
| 1569 | goto dma_error; | ||
| 1570 | |||
| 1571 | /* record length, and DMA address */ | ||
| 1572 | dma_unmap_len_set(tx_bi, len, size); | ||
| 1573 | dma_unmap_addr_set(tx_bi, dma, dma); | ||
| 1574 | |||
| 1575 | tx_desc->buffer_addr = cpu_to_le64(dma); | ||
| 1576 | |||
| 1577 | while (unlikely(size > I40E_MAX_DATA_PER_TXD)) { | ||
| 1542 | tx_desc->cmd_type_offset_bsz = | 1578 | tx_desc->cmd_type_offset_bsz = |
| 1543 | build_ctob(td_cmd, td_offset, | 1579 | build_ctob(td_cmd, td_offset, |
| 1544 | I40E_MAX_DATA_PER_TXD, td_tag); | 1580 | I40E_MAX_DATA_PER_TXD, td_tag); |
| 1545 | 1581 | ||
| 1546 | buf_offset += I40E_MAX_DATA_PER_TXD; | ||
| 1547 | size -= I40E_MAX_DATA_PER_TXD; | ||
| 1548 | |||
| 1549 | tx_desc++; | 1582 | tx_desc++; |
| 1550 | i++; | 1583 | i++; |
| 1551 | if (i == tx_ring->count) { | 1584 | if (i == tx_ring->count) { |
| 1552 | tx_desc = I40E_TX_DESC(tx_ring, 0); | 1585 | tx_desc = I40E_TX_DESC(tx_ring, 0); |
| 1553 | i = 0; | 1586 | i = 0; |
| 1554 | } | 1587 | } |
| 1555 | } | ||
| 1556 | 1588 | ||
| 1557 | tx_bi = &tx_ring->tx_bi[i]; | 1589 | dma += I40E_MAX_DATA_PER_TXD; |
| 1558 | tx_bi->length = buf_offset + size; | 1590 | size -= I40E_MAX_DATA_PER_TXD; |
| 1559 | tx_bi->tx_flags = tx_flags; | ||
| 1560 | tx_bi->dma = dma; | ||
| 1561 | 1591 | ||
| 1562 | tx_desc->buffer_addr = cpu_to_le64(dma + buf_offset); | 1592 | tx_desc->buffer_addr = cpu_to_le64(dma); |
| 1563 | tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, | 1593 | } |
| 1564 | size, td_tag); | ||
| 1565 | 1594 | ||
| 1566 | if (likely(!data_len)) | 1595 | if (likely(!data_len)) |
| 1567 | break; | 1596 | break; |
| 1568 | 1597 | ||
| 1569 | size = skb_frag_size(frag); | 1598 | tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, |
| 1570 | data_len -= size; | 1599 | size, td_tag); |
| 1571 | buf_offset = 0; | ||
| 1572 | tx_flags |= I40E_TX_FLAGS_MAPPED_AS_PAGE; | ||
| 1573 | |||
| 1574 | dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); | ||
| 1575 | if (dma_mapping_error(dev, dma)) | ||
| 1576 | goto dma_error; | ||
| 1577 | 1600 | ||
| 1578 | tx_desc++; | 1601 | tx_desc++; |
| 1579 | i++; | 1602 | i++; |
| @@ -1582,31 +1605,25 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, | |||
| 1582 | i = 0; | 1605 | i = 0; |
| 1583 | } | 1606 | } |
| 1584 | 1607 | ||
| 1585 | frag++; | 1608 | size = skb_frag_size(frag); |
| 1586 | } | 1609 | data_len -= size; |
| 1587 | |||
| 1588 | tx_desc->cmd_type_offset_bsz |= | ||
| 1589 | cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT); | ||
| 1590 | 1610 | ||
| 1591 | i++; | 1611 | dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, |
| 1592 | if (i == tx_ring->count) | 1612 | DMA_TO_DEVICE); |
| 1593 | i = 0; | ||
| 1594 | 1613 | ||
| 1595 | tx_ring->next_to_use = i; | 1614 | tx_bi = &tx_ring->tx_bi[i]; |
| 1615 | } | ||
| 1596 | 1616 | ||
| 1597 | if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) | 1617 | tx_desc->cmd_type_offset_bsz = |
| 1598 | gso_segs = skb_shinfo(skb)->gso_segs; | 1618 | build_ctob(td_cmd, td_offset, size, td_tag) | |
| 1599 | else | 1619 | cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT); |
| 1600 | gso_segs = 1; | ||
| 1601 | 1620 | ||
| 1602 | /* multiply data chunks by size of headers */ | 1621 | netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev, |
| 1603 | tx_bi->bytecount = paylen + (gso_segs * hdr_len); | 1622 | tx_ring->queue_index), |
| 1604 | tx_bi->gso_segs = gso_segs; | 1623 | first->bytecount); |
| 1605 | tx_bi->skb = skb; | ||
| 1606 | 1624 | ||
| 1607 | /* set the timestamp and next to watch values */ | 1625 | /* set the timestamp */ |
| 1608 | first->time_stamp = jiffies; | 1626 | first->time_stamp = jiffies; |
| 1609 | first->next_to_watch = tx_desc; | ||
| 1610 | 1627 | ||
| 1611 | /* Force memory writes to complete before letting h/w | 1628 | /* Force memory writes to complete before letting h/w |
| 1612 | * know there are new descriptors to fetch. (Only | 1629 | * know there are new descriptors to fetch. (Only |
| @@ -1615,16 +1632,27 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, | |||
| 1615 | */ | 1632 | */ |
| 1616 | wmb(); | 1633 | wmb(); |
| 1617 | 1634 | ||
| 1635 | /* set next_to_watch value indicating a packet is present */ | ||
| 1636 | first->next_to_watch = tx_desc; | ||
| 1637 | |||
| 1638 | i++; | ||
| 1639 | if (i == tx_ring->count) | ||
| 1640 | i = 0; | ||
| 1641 | |||
| 1642 | tx_ring->next_to_use = i; | ||
| 1643 | |||
| 1644 | /* notify HW of packet */ | ||
| 1618 | writel(i, tx_ring->tail); | 1645 | writel(i, tx_ring->tail); |
| 1646 | |||
| 1619 | return; | 1647 | return; |
| 1620 | 1648 | ||
| 1621 | dma_error: | 1649 | dma_error: |
| 1622 | dev_info(dev, "TX DMA map failed\n"); | 1650 | dev_info(tx_ring->dev, "TX DMA map failed\n"); |
| 1623 | 1651 | ||
| 1624 | /* clear dma mappings for failed tx_bi map */ | 1652 | /* clear dma mappings for failed tx_bi map */ |
| 1625 | for (;;) { | 1653 | for (;;) { |
| 1626 | tx_bi = &tx_ring->tx_bi[i]; | 1654 | tx_bi = &tx_ring->tx_bi[i]; |
| 1627 | i40e_unmap_tx_resource(tx_ring, tx_bi); | 1655 | i40e_unmap_and_free_tx_resource(tx_ring, tx_bi); |
| 1628 | if (tx_bi == first) | 1656 | if (tx_bi == first) |
| 1629 | break; | 1657 | break; |
| 1630 | if (i == 0) | 1658 | if (i == 0) |
| @@ -1632,8 +1660,6 @@ dma_error: | |||
| 1632 | i--; | 1660 | i--; |
| 1633 | } | 1661 | } |
| 1634 | 1662 | ||
| 1635 | dev_kfree_skb_any(skb); | ||
| 1636 | |||
| 1637 | tx_ring->next_to_use = i; | 1663 | tx_ring->next_to_use = i; |
| 1638 | } | 1664 | } |
| 1639 | 1665 | ||
| @@ -1758,16 +1784,16 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, | |||
| 1758 | 1784 | ||
| 1759 | skb_tx_timestamp(skb); | 1785 | skb_tx_timestamp(skb); |
| 1760 | 1786 | ||
| 1787 | /* always enable CRC insertion offload */ | ||
| 1788 | td_cmd |= I40E_TX_DESC_CMD_ICRC; | ||
| 1789 | |||
| 1761 | /* Always offload the checksum, since it's in the data descriptor */ | 1790 | /* Always offload the checksum, since it's in the data descriptor */ |
| 1762 | if (i40e_tx_csum(tx_ring, skb, tx_flags, protocol)) | 1791 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
| 1763 | tx_flags |= I40E_TX_FLAGS_CSUM; | 1792 | tx_flags |= I40E_TX_FLAGS_CSUM; |
| 1764 | 1793 | ||
| 1765 | /* always enable offload insertion */ | ||
| 1766 | td_cmd |= I40E_TX_DESC_CMD_ICRC; | ||
| 1767 | |||
| 1768 | if (tx_flags & I40E_TX_FLAGS_CSUM) | ||
| 1769 | i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset, | 1794 | i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset, |
| 1770 | tx_ring, &cd_tunneling); | 1795 | tx_ring, &cd_tunneling); |
| 1796 | } | ||
| 1771 | 1797 | ||
| 1772 | i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, | 1798 | i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, |
| 1773 | cd_tunneling, cd_l2tag2); | 1799 | cd_tunneling, cd_l2tag2); |
| @@ -1801,7 +1827,7 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
| 1801 | { | 1827 | { |
| 1802 | struct i40e_netdev_priv *np = netdev_priv(netdev); | 1828 | struct i40e_netdev_priv *np = netdev_priv(netdev); |
| 1803 | struct i40e_vsi *vsi = np->vsi; | 1829 | struct i40e_vsi *vsi = np->vsi; |
| 1804 | struct i40e_ring *tx_ring = &vsi->tx_rings[skb->queue_mapping]; | 1830 | struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping]; |
| 1805 | 1831 | ||
| 1806 | /* hardware can't handle really short frames, hardware padding works | 1832 | /* hardware can't handle really short frames, hardware padding works |
| 1807 | * beyond this point | 1833 | * beyond this point |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index b1d7722d98a7..db55d9947f15 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h | |||
| @@ -102,23 +102,20 @@ | |||
| 102 | #define I40E_TX_FLAGS_IPV6 (u32)(1 << 5) | 102 | #define I40E_TX_FLAGS_IPV6 (u32)(1 << 5) |
| 103 | #define I40E_TX_FLAGS_FCCRC (u32)(1 << 6) | 103 | #define I40E_TX_FLAGS_FCCRC (u32)(1 << 6) |
| 104 | #define I40E_TX_FLAGS_FSO (u32)(1 << 7) | 104 | #define I40E_TX_FLAGS_FSO (u32)(1 << 7) |
| 105 | #define I40E_TX_FLAGS_TXSW (u32)(1 << 8) | ||
| 106 | #define I40E_TX_FLAGS_MAPPED_AS_PAGE (u32)(1 << 9) | ||
| 107 | #define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 | 105 | #define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 |
| 108 | #define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 | 106 | #define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 |
| 109 | #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 | 107 | #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 |
| 110 | #define I40E_TX_FLAGS_VLAN_SHIFT 16 | 108 | #define I40E_TX_FLAGS_VLAN_SHIFT 16 |
| 111 | 109 | ||
| 112 | struct i40e_tx_buffer { | 110 | struct i40e_tx_buffer { |
| 113 | struct sk_buff *skb; | ||
| 114 | dma_addr_t dma; | ||
| 115 | unsigned long time_stamp; | ||
| 116 | u16 length; | ||
| 117 | u32 tx_flags; | ||
| 118 | struct i40e_tx_desc *next_to_watch; | 111 | struct i40e_tx_desc *next_to_watch; |
| 112 | unsigned long time_stamp; | ||
| 113 | struct sk_buff *skb; | ||
| 119 | unsigned int bytecount; | 114 | unsigned int bytecount; |
| 120 | u16 gso_segs; | 115 | unsigned short gso_segs; |
| 121 | u8 mapped_as_page; | 116 | DEFINE_DMA_UNMAP_ADDR(dma); |
| 117 | DEFINE_DMA_UNMAP_LEN(len); | ||
| 118 | u32 tx_flags; | ||
| 122 | }; | 119 | }; |
| 123 | 120 | ||
| 124 | struct i40e_rx_buffer { | 121 | struct i40e_rx_buffer { |
| @@ -129,18 +126,18 @@ struct i40e_rx_buffer { | |||
| 129 | unsigned int page_offset; | 126 | unsigned int page_offset; |
| 130 | }; | 127 | }; |
| 131 | 128 | ||
| 132 | struct i40e_tx_queue_stats { | 129 | struct i40e_queue_stats { |
| 133 | u64 packets; | 130 | u64 packets; |
| 134 | u64 bytes; | 131 | u64 bytes; |
| 132 | }; | ||
| 133 | |||
| 134 | struct i40e_tx_queue_stats { | ||
| 135 | u64 restart_queue; | 135 | u64 restart_queue; |
| 136 | u64 tx_busy; | 136 | u64 tx_busy; |
| 137 | u64 completed; | ||
| 138 | u64 tx_done_old; | 137 | u64 tx_done_old; |
| 139 | }; | 138 | }; |
| 140 | 139 | ||
| 141 | struct i40e_rx_queue_stats { | 140 | struct i40e_rx_queue_stats { |
| 142 | u64 packets; | ||
| 143 | u64 bytes; | ||
| 144 | u64 non_eop_descs; | 141 | u64 non_eop_descs; |
| 145 | u64 alloc_rx_page_failed; | 142 | u64 alloc_rx_page_failed; |
| 146 | u64 alloc_rx_buff_failed; | 143 | u64 alloc_rx_buff_failed; |
| @@ -183,6 +180,7 @@ enum i40e_ring_state_t { | |||
| 183 | 180 | ||
| 184 | /* struct that defines a descriptor ring, associated with a VSI */ | 181 | /* struct that defines a descriptor ring, associated with a VSI */ |
| 185 | struct i40e_ring { | 182 | struct i40e_ring { |
| 183 | struct i40e_ring *next; /* pointer to next ring in q_vector */ | ||
| 186 | void *desc; /* Descriptor ring memory */ | 184 | void *desc; /* Descriptor ring memory */ |
| 187 | struct device *dev; /* Used for DMA mapping */ | 185 | struct device *dev; /* Used for DMA mapping */ |
| 188 | struct net_device *netdev; /* netdev ring maps to */ | 186 | struct net_device *netdev; /* netdev ring maps to */ |
| @@ -219,6 +217,8 @@ struct i40e_ring { | |||
| 219 | bool ring_active; /* is ring online or not */ | 217 | bool ring_active; /* is ring online or not */ |
| 220 | 218 | ||
| 221 | /* stats structs */ | 219 | /* stats structs */ |
| 220 | struct i40e_queue_stats stats; | ||
| 221 | struct u64_stats_sync syncp; | ||
| 222 | union { | 222 | union { |
| 223 | struct i40e_tx_queue_stats tx_stats; | 223 | struct i40e_tx_queue_stats tx_stats; |
| 224 | struct i40e_rx_queue_stats rx_stats; | 224 | struct i40e_rx_queue_stats rx_stats; |
| @@ -229,6 +229,8 @@ struct i40e_ring { | |||
| 229 | 229 | ||
| 230 | struct i40e_vsi *vsi; /* Backreference to associated VSI */ | 230 | struct i40e_vsi *vsi; /* Backreference to associated VSI */ |
| 231 | struct i40e_q_vector *q_vector; /* Backreference to associated vector */ | 231 | struct i40e_q_vector *q_vector; /* Backreference to associated vector */ |
| 232 | |||
| 233 | struct rcu_head rcu; /* to avoid race on free */ | ||
| 232 | } ____cacheline_internodealigned_in_smp; | 234 | } ____cacheline_internodealigned_in_smp; |
| 233 | 235 | ||
| 234 | enum i40e_latency_range { | 236 | enum i40e_latency_range { |
| @@ -238,9 +240,8 @@ enum i40e_latency_range { | |||
| 238 | }; | 240 | }; |
| 239 | 241 | ||
| 240 | struct i40e_ring_container { | 242 | struct i40e_ring_container { |
| 241 | #define I40E_MAX_RINGPAIR_PER_VECTOR 8 | ||
| 242 | /* array of pointers to rings */ | 243 | /* array of pointers to rings */ |
| 243 | struct i40e_ring *ring[I40E_MAX_RINGPAIR_PER_VECTOR]; | 244 | struct i40e_ring *ring; |
| 244 | unsigned int total_bytes; /* total bytes processed this int */ | 245 | unsigned int total_bytes; /* total bytes processed this int */ |
| 245 | unsigned int total_packets; /* total packets processed this int */ | 246 | unsigned int total_packets; /* total packets processed this int */ |
| 246 | u16 count; | 247 | u16 count; |
| @@ -248,6 +249,10 @@ struct i40e_ring_container { | |||
| 248 | u16 itr; | 249 | u16 itr; |
| 249 | }; | 250 | }; |
| 250 | 251 | ||
| 252 | /* iterator for handling rings in ring container */ | ||
| 253 | #define i40e_for_each_ring(pos, head) \ | ||
| 254 | for (pos = (head).ring; pos != NULL; pos = pos->next) | ||
| 255 | |||
| 251 | void i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); | 256 | void i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); |
| 252 | netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev); | 257 | netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev); |
| 253 | void i40e_clean_tx_ring(struct i40e_ring *tx_ring); | 258 | void i40e_clean_tx_ring(struct i40e_ring *tx_ring); |
