aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJesse Brandeburg <jesse.brandeburg@intel.com>2008-08-26 07:27:08 -0400
committerJeff Garzik <jgarzik@redhat.com>2008-09-03 10:03:33 -0400
commit3a581073e0f9f3966ac95a89cd04a0a6b948dc77 (patch)
tree37a2e723329db72ee1acbbc19b09e4b3e7b49db8
parent036c9b097034b4ea82974f7c98d10ec7fbf81902 (diff)
ixgbe: Cleanup references to Tx and Rx rings to becommon across the driver
Cleanup all the different references to the Tx ring and Rx ring structures and make them common across the driver. Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com> Signed-off-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c158
1 files changed, 77 insertions, 81 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 50737ccdeca2..b5a9b9da2fb7 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -290,38 +290,38 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
290 290
291#ifdef CONFIG_DCA 291#ifdef CONFIG_DCA
292static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, 292static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
293 struct ixgbe_ring *rxr) 293 struct ixgbe_ring *rx_ring)
294{ 294{
295 u32 rxctrl; 295 u32 rxctrl;
296 int cpu = get_cpu(); 296 int cpu = get_cpu();
297 int q = rxr - adapter->rx_ring; 297 int q = rx_ring - adapter->rx_ring;
298 298
299 if (rxr->cpu != cpu) { 299 if (rx_ring->cpu != cpu) {
300 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q)); 300 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
301 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK; 301 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
302 rxctrl |= dca_get_tag(cpu); 302 rxctrl |= dca_get_tag(cpu);
303 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN; 303 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
304 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN; 304 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
305 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl); 305 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
306 rxr->cpu = cpu; 306 rx_ring->cpu = cpu;
307 } 307 }
308 put_cpu(); 308 put_cpu();
309} 309}
310 310
311static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, 311static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
312 struct ixgbe_ring *txr) 312 struct ixgbe_ring *tx_ring)
313{ 313{
314 u32 txctrl; 314 u32 txctrl;
315 int cpu = get_cpu(); 315 int cpu = get_cpu();
316 int q = txr - adapter->tx_ring; 316 int q = tx_ring - adapter->tx_ring;
317 317
318 if (txr->cpu != cpu) { 318 if (tx_ring->cpu != cpu) {
319 txctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q)); 319 txctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q));
320 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK; 320 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
321 txctrl |= dca_get_tag(cpu); 321 txctrl |= dca_get_tag(cpu);
322 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; 322 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
323 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q), txctrl); 323 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q), txctrl);
324 txr->cpu = cpu; 324 tx_ring->cpu = cpu;
325 } 325 }
326 put_cpu(); 326 put_cpu();
327} 327}
@@ -459,31 +459,30 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
459 struct net_device *netdev = adapter->netdev; 459 struct net_device *netdev = adapter->netdev;
460 struct pci_dev *pdev = adapter->pdev; 460 struct pci_dev *pdev = adapter->pdev;
461 union ixgbe_adv_rx_desc *rx_desc; 461 union ixgbe_adv_rx_desc *rx_desc;
462 struct ixgbe_rx_buffer *rx_buffer_info; 462 struct ixgbe_rx_buffer *bi;
463 struct sk_buff *skb;
464 unsigned int i; 463 unsigned int i;
465 unsigned int bufsz = adapter->rx_buf_len + NET_IP_ALIGN; 464 unsigned int bufsz = adapter->rx_buf_len + NET_IP_ALIGN;
466 465
467 i = rx_ring->next_to_use; 466 i = rx_ring->next_to_use;
468 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 467 bi = &rx_ring->rx_buffer_info[i];
469 468
470 while (cleaned_count--) { 469 while (cleaned_count--) {
471 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); 470 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
472 471
473 if (!rx_buffer_info->page && 472 if (!bi->page &&
474 (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) { 473 (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
475 rx_buffer_info->page = alloc_page(GFP_ATOMIC); 474 bi->page = alloc_page(GFP_ATOMIC);
476 if (!rx_buffer_info->page) { 475 if (!bi->page) {
477 adapter->alloc_rx_page_failed++; 476 adapter->alloc_rx_page_failed++;
478 goto no_buffers; 477 goto no_buffers;
479 } 478 }
480 rx_buffer_info->page_dma = 479 bi->page_dma = pci_map_page(pdev, bi->page, 0,
481 pci_map_page(pdev, rx_buffer_info->page, 480 PAGE_SIZE,
482 0, PAGE_SIZE, PCI_DMA_FROMDEVICE); 481 PCI_DMA_FROMDEVICE);
483 } 482 }
484 483
485 if (!rx_buffer_info->skb) { 484 if (!bi->skb) {
486 skb = netdev_alloc_skb(netdev, bufsz); 485 struct sk_buff *skb = netdev_alloc_skb(netdev, bufsz);
487 486
488 if (!skb) { 487 if (!skb) {
489 adapter->alloc_rx_buff_failed++; 488 adapter->alloc_rx_buff_failed++;
@@ -497,27 +496,23 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
497 */ 496 */
498 skb_reserve(skb, NET_IP_ALIGN); 497 skb_reserve(skb, NET_IP_ALIGN);
499 498
500 rx_buffer_info->skb = skb; 499 bi->skb = skb;
501 rx_buffer_info->dma = pci_map_single(pdev, skb->data, 500 bi->dma = pci_map_single(pdev, skb->data, bufsz,
502 bufsz, 501 PCI_DMA_FROMDEVICE);
503 PCI_DMA_FROMDEVICE);
504 } 502 }
505 /* Refresh the desc even if buffer_addrs didn't change because 503 /* Refresh the desc even if buffer_addrs didn't change because
506 * each write-back erases this info. */ 504 * each write-back erases this info. */
507 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 505 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
508 rx_desc->read.pkt_addr = 506 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
509 cpu_to_le64(rx_buffer_info->page_dma); 507 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
510 rx_desc->read.hdr_addr =
511 cpu_to_le64(rx_buffer_info->dma);
512 } else { 508 } else {
513 rx_desc->read.pkt_addr = 509 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
514 cpu_to_le64(rx_buffer_info->dma);
515 } 510 }
516 511
517 i++; 512 i++;
518 if (i == rx_ring->count) 513 if (i == rx_ring->count)
519 i = 0; 514 i = 0;
520 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 515 bi = &rx_ring->rx_buffer_info[i];
521 } 516 }
522no_buffers: 517no_buffers:
523 if (rx_ring->next_to_use != i) { 518 if (rx_ring->next_to_use != i) {
@@ -896,7 +891,7 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
896{ 891{
897 struct ixgbe_q_vector *q_vector = data; 892 struct ixgbe_q_vector *q_vector = data;
898 struct ixgbe_adapter *adapter = q_vector->adapter; 893 struct ixgbe_adapter *adapter = q_vector->adapter;
899 struct ixgbe_ring *txr; 894 struct ixgbe_ring *tx_ring;
900 int i, r_idx; 895 int i, r_idx;
901 896
902 if (!q_vector->txr_count) 897 if (!q_vector->txr_count)
@@ -904,14 +899,14 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
904 899
905 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 900 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
906 for (i = 0; i < q_vector->txr_count; i++) { 901 for (i = 0; i < q_vector->txr_count; i++) {
907 txr = &(adapter->tx_ring[r_idx]); 902 tx_ring = &(adapter->tx_ring[r_idx]);
908#ifdef CONFIG_DCA 903#ifdef CONFIG_DCA
909 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 904 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
910 ixgbe_update_tx_dca(adapter, txr); 905 ixgbe_update_tx_dca(adapter, tx_ring);
911#endif 906#endif
912 txr->total_bytes = 0; 907 tx_ring->total_bytes = 0;
913 txr->total_packets = 0; 908 tx_ring->total_packets = 0;
914 ixgbe_clean_tx_irq(adapter, txr); 909 ixgbe_clean_tx_irq(adapter, tx_ring);
915 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 910 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
916 r_idx + 1); 911 r_idx + 1);
917 } 912 }
@@ -928,18 +923,18 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
928{ 923{
929 struct ixgbe_q_vector *q_vector = data; 924 struct ixgbe_q_vector *q_vector = data;
930 struct ixgbe_adapter *adapter = q_vector->adapter; 925 struct ixgbe_adapter *adapter = q_vector->adapter;
931 struct ixgbe_ring *rxr; 926 struct ixgbe_ring *rx_ring;
932 int r_idx; 927 int r_idx;
933 928
934 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 929 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
935 if (!q_vector->rxr_count) 930 if (!q_vector->rxr_count)
936 return IRQ_HANDLED; 931 return IRQ_HANDLED;
937 932
938 rxr = &(adapter->rx_ring[r_idx]); 933 rx_ring = &(adapter->rx_ring[r_idx]);
939 /* disable interrupts on this vector only */ 934 /* disable interrupts on this vector only */
940 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rxr->v_idx); 935 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx);
941 rxr->total_bytes = 0; 936 rx_ring->total_bytes = 0;
942 rxr->total_packets = 0; 937 rx_ring->total_packets = 0;
943 netif_rx_schedule(adapter->netdev, &q_vector->napi); 938 netif_rx_schedule(adapter->netdev, &q_vector->napi);
944 939
945 return IRQ_HANDLED; 940 return IRQ_HANDLED;
@@ -964,18 +959,18 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
964 struct ixgbe_q_vector *q_vector = 959 struct ixgbe_q_vector *q_vector =
965 container_of(napi, struct ixgbe_q_vector, napi); 960 container_of(napi, struct ixgbe_q_vector, napi);
966 struct ixgbe_adapter *adapter = q_vector->adapter; 961 struct ixgbe_adapter *adapter = q_vector->adapter;
967 struct ixgbe_ring *rxr; 962 struct ixgbe_ring *rx_ring;
968 int work_done = 0; 963 int work_done = 0;
969 long r_idx; 964 long r_idx;
970 965
971 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 966 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
972 rxr = &(adapter->rx_ring[r_idx]); 967 rx_ring = &(adapter->rx_ring[r_idx]);
973#ifdef CONFIG_DCA 968#ifdef CONFIG_DCA
974 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 969 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
975 ixgbe_update_rx_dca(adapter, rxr); 970 ixgbe_update_rx_dca(adapter, rx_ring);
976#endif 971#endif
977 972
978 ixgbe_clean_rx_irq(adapter, rxr, &work_done, budget); 973 ixgbe_clean_rx_irq(adapter, rx_ring, &work_done, budget);
979 974
980 /* If all Rx work done, exit the polling mode */ 975 /* If all Rx work done, exit the polling mode */
981 if (work_done < budget) { 976 if (work_done < budget) {
@@ -983,7 +978,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
983 if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS) 978 if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS)
984 ixgbe_set_itr_msix(q_vector); 979 ixgbe_set_itr_msix(q_vector);
985 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 980 if (!test_bit(__IXGBE_DOWN, &adapter->state))
986 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rxr->v_idx); 981 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rx_ring->v_idx);
987 } 982 }
988 983
989 return work_done; 984 return work_done;
@@ -1342,7 +1337,7 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
1342} 1337}
1343 1338
1344/** 1339/**
1345 * ixgbe_configure_tx - Configure 8254x Transmit Unit after Reset 1340 * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
1346 * @adapter: board private structure 1341 * @adapter: board private structure
1347 * 1342 *
1348 * Configure the Tx unit of the MAC after a reset. 1343 * Configure the Tx unit of the MAC after a reset.
@@ -1408,7 +1403,7 @@ static int ixgbe_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph,
1408} 1403}
1409 1404
1410/** 1405/**
1411 * ixgbe_configure_rx - Configure 8254x Receive Unit after Reset 1406 * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
1412 * @adapter: board private structure 1407 * @adapter: board private structure
1413 * 1408 *
1414 * Configure the Rx unit of the MAC after a reset. 1409 * Configure the Rx unit of the MAC after a reset.
@@ -2483,40 +2478,41 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
2483/** 2478/**
2484 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors) 2479 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
2485 * @adapter: board private structure 2480 * @adapter: board private structure
2486 * @txdr: tx descriptor ring (for a specific queue) to setup 2481 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2487 * 2482 *
2488 * Return 0 on success, negative on failure 2483 * Return 0 on success, negative on failure
2489 **/ 2484 **/
2490int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, 2485int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
2491 struct ixgbe_ring *txdr) 2486 struct ixgbe_ring *tx_ring)
2492{ 2487{
2493 struct pci_dev *pdev = adapter->pdev; 2488 struct pci_dev *pdev = adapter->pdev;
2494 int size; 2489 int size;
2495 2490
2496 size = sizeof(struct ixgbe_tx_buffer) * txdr->count; 2491 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
2497 txdr->tx_buffer_info = vmalloc(size); 2492 tx_ring->tx_buffer_info = vmalloc(size);
2498 if (!txdr->tx_buffer_info) { 2493 if (!tx_ring->tx_buffer_info) {
2499 DPRINTK(PROBE, ERR, 2494 DPRINTK(PROBE, ERR,
2500 "Unable to allocate memory for the transmit descriptor ring\n"); 2495 "Unable to allocate memory for the transmit descriptor ring\n");
2501 return -ENOMEM; 2496 return -ENOMEM;
2502 } 2497 }
2503 memset(txdr->tx_buffer_info, 0, size); 2498 memset(tx_ring->tx_buffer_info, 0, size);
2504 2499
2505 /* round up to nearest 4K */ 2500 /* round up to nearest 4K */
2506 txdr->size = txdr->count * sizeof(union ixgbe_adv_tx_desc); 2501 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2507 txdr->size = ALIGN(txdr->size, 4096); 2502 tx_ring->size = ALIGN(tx_ring->size, 4096);
2508 2503
2509 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); 2504 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
2510 if (!txdr->desc) { 2505 &tx_ring->dma);
2511 vfree(txdr->tx_buffer_info); 2506 if (!tx_ring->desc) {
2507 vfree(tx_ring->tx_buffer_info);
2512 DPRINTK(PROBE, ERR, 2508 DPRINTK(PROBE, ERR,
2513 "Memory allocation failed for the tx desc ring\n"); 2509 "Memory allocation failed for the tx desc ring\n");
2514 return -ENOMEM; 2510 return -ENOMEM;
2515 } 2511 }
2516 2512
2517 txdr->next_to_use = 0; 2513 tx_ring->next_to_use = 0;
2518 txdr->next_to_clean = 0; 2514 tx_ring->next_to_clean = 0;
2519 txdr->work_limit = txdr->count; 2515 tx_ring->work_limit = tx_ring->count;
2520 2516
2521 return 0; 2517 return 0;
2522} 2518}
@@ -2524,52 +2520,52 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
2524/** 2520/**
2525 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors) 2521 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
2526 * @adapter: board private structure 2522 * @adapter: board private structure
2527 * @rxdr: rx descriptor ring (for a specific queue) to setup 2523 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2528 * 2524 *
2529 * Returns 0 on success, negative on failure 2525 * Returns 0 on success, negative on failure
2530 **/ 2526 **/
2531int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, 2527int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
2532 struct ixgbe_ring *rxdr) 2528 struct ixgbe_ring *rx_ring)
2533{ 2529{
2534 struct pci_dev *pdev = adapter->pdev; 2530 struct pci_dev *pdev = adapter->pdev;
2535 int size; 2531 int size;
2536 2532
2537 size = sizeof(struct net_lro_desc) * IXGBE_MAX_LRO_DESCRIPTORS; 2533 size = sizeof(struct net_lro_desc) * IXGBE_MAX_LRO_DESCRIPTORS;
2538 rxdr->lro_mgr.lro_arr = vmalloc(size); 2534 rx_ring->lro_mgr.lro_arr = vmalloc(size);
2539 if (!rxdr->lro_mgr.lro_arr) 2535 if (!rx_ring->lro_mgr.lro_arr)
2540 return -ENOMEM; 2536 return -ENOMEM;
2541 memset(rxdr->lro_mgr.lro_arr, 0, size); 2537 memset(rx_ring->lro_mgr.lro_arr, 0, size);
2542 2538
2543 size = sizeof(struct ixgbe_rx_buffer) * rxdr->count; 2539 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
2544 rxdr->rx_buffer_info = vmalloc(size); 2540 rx_ring->rx_buffer_info = vmalloc(size);
2545 if (!rxdr->rx_buffer_info) { 2541 if (!rx_ring->rx_buffer_info) {
2546 DPRINTK(PROBE, ERR, 2542 DPRINTK(PROBE, ERR,
2547 "vmalloc allocation failed for the rx desc ring\n"); 2543 "vmalloc allocation failed for the rx desc ring\n");
2548 goto alloc_failed; 2544 goto alloc_failed;
2549 } 2545 }
2550 memset(rxdr->rx_buffer_info, 0, size); 2546 memset(rx_ring->rx_buffer_info, 0, size);
2551 2547
2552 /* Round up to nearest 4K */ 2548 /* Round up to nearest 4K */
2553 rxdr->size = rxdr->count * sizeof(union ixgbe_adv_rx_desc); 2549 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2554 rxdr->size = ALIGN(rxdr->size, 4096); 2550 rx_ring->size = ALIGN(rx_ring->size, 4096);
2555 2551
2556 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); 2552 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, &rx_ring->dma);
2557 2553
2558 if (!rxdr->desc) { 2554 if (!rx_ring->desc) {
2559 DPRINTK(PROBE, ERR, 2555 DPRINTK(PROBE, ERR,
2560 "Memory allocation failed for the rx desc ring\n"); 2556 "Memory allocation failed for the rx desc ring\n");
2561 vfree(rxdr->rx_buffer_info); 2557 vfree(rx_ring->rx_buffer_info);
2562 goto alloc_failed; 2558 goto alloc_failed;
2563 } 2559 }
2564 2560
2565 rxdr->next_to_clean = 0; 2561 rx_ring->next_to_clean = 0;
2566 rxdr->next_to_use = 0; 2562 rx_ring->next_to_use = 0;
2567 2563
2568 return 0; 2564 return 0;
2569 2565
2570alloc_failed: 2566alloc_failed:
2571 vfree(rxdr->lro_mgr.lro_arr); 2567 vfree(rx_ring->lro_mgr.lro_arr);
2572 rxdr->lro_mgr.lro_arr = NULL; 2568 rx_ring->lro_mgr.lro_arr = NULL;
2573 return -ENOMEM; 2569 return -ENOMEM;
2574} 2570}
2575 2571