aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/igb/igb_main.c
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2010-02-16 20:02:39 -0500
committerDavid S. Miller <davem@davemloft.net>2010-02-17 16:21:34 -0500
commit3025a446b6d0255ae4399ca5f9b259bd1b51539e (patch)
treee352d223dfb10a75c8e4a7c8fca52504fa9ec878 /drivers/net/igb/igb_main.c
parentb94f2d775a71ed09dc8ca2bf24c611bdce9e82e7 (diff)
igb: Allocate rings seperately instead of as a block
This change makes it so that the rings are allocated seperately. As a result we can allocate them on seperate nodes at some point in the future if we so desire. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/igb/igb_main.c')
-rw-r--r--drivers/net/igb/igb_main.c113
1 files changed, 57 insertions, 56 deletions
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 089a7c3f4c69..30fb5a89c42d 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -318,31 +318,35 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
318 */ 318 */
319 if (adapter->vfs_allocated_count) { 319 if (adapter->vfs_allocated_count) {
320 for (; i < adapter->rss_queues; i++) 320 for (; i < adapter->rss_queues; i++)
321 adapter->rx_ring[i].reg_idx = rbase_offset + 321 adapter->rx_ring[i]->reg_idx = rbase_offset +
322 Q_IDX_82576(i); 322 Q_IDX_82576(i);
323 for (; j < adapter->rss_queues; j++) 323 for (; j < adapter->rss_queues; j++)
324 adapter->tx_ring[j].reg_idx = rbase_offset + 324 adapter->tx_ring[j]->reg_idx = rbase_offset +
325 Q_IDX_82576(j); 325 Q_IDX_82576(j);
326 } 326 }
327 case e1000_82575: 327 case e1000_82575:
328 case e1000_82580: 328 case e1000_82580:
329 default: 329 default:
330 for (; i < adapter->num_rx_queues; i++) 330 for (; i < adapter->num_rx_queues; i++)
331 adapter->rx_ring[i].reg_idx = rbase_offset + i; 331 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
332 for (; j < adapter->num_tx_queues; j++) 332 for (; j < adapter->num_tx_queues; j++)
333 adapter->tx_ring[j].reg_idx = rbase_offset + j; 333 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
334 break; 334 break;
335 } 335 }
336} 336}
337 337
338static void igb_free_queues(struct igb_adapter *adapter) 338static void igb_free_queues(struct igb_adapter *adapter)
339{ 339{
340 kfree(adapter->tx_ring); 340 int i;
341 kfree(adapter->rx_ring);
342
343 adapter->tx_ring = NULL;
344 adapter->rx_ring = NULL;
345 341
342 for (i = 0; i < adapter->num_tx_queues; i++) {
343 kfree(adapter->tx_ring[i]);
344 adapter->tx_ring[i] = NULL;
345 }
346 for (i = 0; i < adapter->num_rx_queues; i++) {
347 kfree(adapter->rx_ring[i]);
348 adapter->rx_ring[i] = NULL;
349 }
346 adapter->num_rx_queues = 0; 350 adapter->num_rx_queues = 0;
347 adapter->num_tx_queues = 0; 351 adapter->num_tx_queues = 0;
348} 352}
@@ -356,20 +360,13 @@ static void igb_free_queues(struct igb_adapter *adapter)
356 **/ 360 **/
357static int igb_alloc_queues(struct igb_adapter *adapter) 361static int igb_alloc_queues(struct igb_adapter *adapter)
358{ 362{
363 struct igb_ring *ring;
359 int i; 364 int i;
360 365
361 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
362 sizeof(struct igb_ring), GFP_KERNEL);
363 if (!adapter->tx_ring)
364 goto err;
365
366 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
367 sizeof(struct igb_ring), GFP_KERNEL);
368 if (!adapter->rx_ring)
369 goto err;
370
371 for (i = 0; i < adapter->num_tx_queues; i++) { 366 for (i = 0; i < adapter->num_tx_queues; i++) {
372 struct igb_ring *ring = &(adapter->tx_ring[i]); 367 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
368 if (!ring)
369 goto err;
373 ring->count = adapter->tx_ring_count; 370 ring->count = adapter->tx_ring_count;
374 ring->queue_index = i; 371 ring->queue_index = i;
375 ring->pdev = adapter->pdev; 372 ring->pdev = adapter->pdev;
@@ -377,10 +374,13 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
377 /* For 82575, context index must be unique per ring. */ 374 /* For 82575, context index must be unique per ring. */
378 if (adapter->hw.mac.type == e1000_82575) 375 if (adapter->hw.mac.type == e1000_82575)
379 ring->flags = IGB_RING_FLAG_TX_CTX_IDX; 376 ring->flags = IGB_RING_FLAG_TX_CTX_IDX;
377 adapter->tx_ring[i] = ring;
380 } 378 }
381 379
382 for (i = 0; i < adapter->num_rx_queues; i++) { 380 for (i = 0; i < adapter->num_rx_queues; i++) {
383 struct igb_ring *ring = &(adapter->rx_ring[i]); 381 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
382 if (!ring)
383 goto err;
384 ring->count = adapter->rx_ring_count; 384 ring->count = adapter->rx_ring_count;
385 ring->queue_index = i; 385 ring->queue_index = i;
386 ring->pdev = adapter->pdev; 386 ring->pdev = adapter->pdev;
@@ -390,6 +390,7 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
390 /* set flag indicating ring supports SCTP checksum offload */ 390 /* set flag indicating ring supports SCTP checksum offload */
391 if (adapter->hw.mac.type >= e1000_82576) 391 if (adapter->hw.mac.type >= e1000_82576)
392 ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM; 392 ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM;
393 adapter->rx_ring[i] = ring;
393 } 394 }
394 395
395 igb_cache_ring_register(adapter); 396 igb_cache_ring_register(adapter);
@@ -780,10 +781,9 @@ err_out:
780static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter, 781static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
781 int ring_idx, int v_idx) 782 int ring_idx, int v_idx)
782{ 783{
783 struct igb_q_vector *q_vector; 784 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
784 785
785 q_vector = adapter->q_vector[v_idx]; 786 q_vector->rx_ring = adapter->rx_ring[ring_idx];
786 q_vector->rx_ring = &adapter->rx_ring[ring_idx];
787 q_vector->rx_ring->q_vector = q_vector; 787 q_vector->rx_ring->q_vector = q_vector;
788 q_vector->itr_val = adapter->rx_itr_setting; 788 q_vector->itr_val = adapter->rx_itr_setting;
789 if (q_vector->itr_val && q_vector->itr_val <= 3) 789 if (q_vector->itr_val && q_vector->itr_val <= 3)
@@ -793,10 +793,9 @@ static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
793static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter, 793static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
794 int ring_idx, int v_idx) 794 int ring_idx, int v_idx)
795{ 795{
796 struct igb_q_vector *q_vector; 796 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
797 797
798 q_vector = adapter->q_vector[v_idx]; 798 q_vector->tx_ring = adapter->tx_ring[ring_idx];
799 q_vector->tx_ring = &adapter->tx_ring[ring_idx];
800 q_vector->tx_ring->q_vector = q_vector; 799 q_vector->tx_ring->q_vector = q_vector;
801 q_vector->itr_val = adapter->tx_itr_setting; 800 q_vector->itr_val = adapter->tx_itr_setting;
802 if (q_vector->itr_val && q_vector->itr_val <= 3) 801 if (q_vector->itr_val && q_vector->itr_val <= 3)
@@ -1106,7 +1105,7 @@ static void igb_configure(struct igb_adapter *adapter)
1106 * at least 1 descriptor unused to make sure 1105 * at least 1 descriptor unused to make sure
1107 * next_to_use != next_to_clean */ 1106 * next_to_use != next_to_clean */
1108 for (i = 0; i < adapter->num_rx_queues; i++) { 1107 for (i = 0; i < adapter->num_rx_queues; i++) {
1109 struct igb_ring *ring = &adapter->rx_ring[i]; 1108 struct igb_ring *ring = adapter->rx_ring[i];
1110 igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring)); 1109 igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring));
1111 } 1110 }
1112 1111
@@ -2148,19 +2147,19 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2148 int i, err = 0; 2147 int i, err = 0;
2149 2148
2150 for (i = 0; i < adapter->num_tx_queues; i++) { 2149 for (i = 0; i < adapter->num_tx_queues; i++) {
2151 err = igb_setup_tx_resources(&adapter->tx_ring[i]); 2150 err = igb_setup_tx_resources(adapter->tx_ring[i]);
2152 if (err) { 2151 if (err) {
2153 dev_err(&pdev->dev, 2152 dev_err(&pdev->dev,
2154 "Allocation for Tx Queue %u failed\n", i); 2153 "Allocation for Tx Queue %u failed\n", i);
2155 for (i--; i >= 0; i--) 2154 for (i--; i >= 0; i--)
2156 igb_free_tx_resources(&adapter->tx_ring[i]); 2155 igb_free_tx_resources(adapter->tx_ring[i]);
2157 break; 2156 break;
2158 } 2157 }
2159 } 2158 }
2160 2159
2161 for (i = 0; i < IGB_ABS_MAX_TX_QUEUES; i++) { 2160 for (i = 0; i < IGB_ABS_MAX_TX_QUEUES; i++) {
2162 int r_idx = i % adapter->num_tx_queues; 2161 int r_idx = i % adapter->num_tx_queues;
2163 adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx]; 2162 adapter->multi_tx_table[i] = adapter->tx_ring[r_idx];
2164 } 2163 }
2165 return err; 2164 return err;
2166} 2165}
@@ -2243,7 +2242,7 @@ static void igb_configure_tx(struct igb_adapter *adapter)
2243 int i; 2242 int i;
2244 2243
2245 for (i = 0; i < adapter->num_tx_queues; i++) 2244 for (i = 0; i < adapter->num_tx_queues; i++)
2246 igb_configure_tx_ring(adapter, &adapter->tx_ring[i]); 2245 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
2247} 2246}
2248 2247
2249/** 2248/**
@@ -2301,12 +2300,12 @@ static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2301 int i, err = 0; 2300 int i, err = 0;
2302 2301
2303 for (i = 0; i < adapter->num_rx_queues; i++) { 2302 for (i = 0; i < adapter->num_rx_queues; i++) {
2304 err = igb_setup_rx_resources(&adapter->rx_ring[i]); 2303 err = igb_setup_rx_resources(adapter->rx_ring[i]);
2305 if (err) { 2304 if (err) {
2306 dev_err(&pdev->dev, 2305 dev_err(&pdev->dev,
2307 "Allocation for Rx Queue %u failed\n", i); 2306 "Allocation for Rx Queue %u failed\n", i);
2308 for (i--; i >= 0; i--) 2307 for (i--; i >= 0; i--)
2309 igb_free_rx_resources(&adapter->rx_ring[i]); 2308 igb_free_rx_resources(adapter->rx_ring[i]);
2310 break; 2309 break;
2311 } 2310 }
2312 } 2311 }
@@ -2634,7 +2633,7 @@ static void igb_configure_rx(struct igb_adapter *adapter)
2634 /* Setup the HW Rx Head and Tail Descriptor Pointers and 2633 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2635 * the Base and Length of the Rx Descriptor Ring */ 2634 * the Base and Length of the Rx Descriptor Ring */
2636 for (i = 0; i < adapter->num_rx_queues; i++) 2635 for (i = 0; i < adapter->num_rx_queues; i++)
2637 igb_configure_rx_ring(adapter, &adapter->rx_ring[i]); 2636 igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
2638} 2637}
2639 2638
2640/** 2639/**
@@ -2671,7 +2670,7 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter)
2671 int i; 2670 int i;
2672 2671
2673 for (i = 0; i < adapter->num_tx_queues; i++) 2672 for (i = 0; i < adapter->num_tx_queues; i++)
2674 igb_free_tx_resources(&adapter->tx_ring[i]); 2673 igb_free_tx_resources(adapter->tx_ring[i]);
2675} 2674}
2676 2675
2677void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring, 2676void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
@@ -2738,7 +2737,7 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
2738 int i; 2737 int i;
2739 2738
2740 for (i = 0; i < adapter->num_tx_queues; i++) 2739 for (i = 0; i < adapter->num_tx_queues; i++)
2741 igb_clean_tx_ring(&adapter->tx_ring[i]); 2740 igb_clean_tx_ring(adapter->tx_ring[i]);
2742} 2741}
2743 2742
2744/** 2743/**
@@ -2775,7 +2774,7 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
2775 int i; 2774 int i;
2776 2775
2777 for (i = 0; i < adapter->num_rx_queues; i++) 2776 for (i = 0; i < adapter->num_rx_queues; i++)
2778 igb_free_rx_resources(&adapter->rx_ring[i]); 2777 igb_free_rx_resources(adapter->rx_ring[i]);
2779} 2778}
2780 2779
2781/** 2780/**
@@ -2839,7 +2838,7 @@ static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
2839 int i; 2838 int i;
2840 2839
2841 for (i = 0; i < adapter->num_rx_queues; i++) 2840 for (i = 0; i < adapter->num_rx_queues; i++)
2842 igb_clean_rx_ring(&adapter->rx_ring[i]); 2841 igb_clean_rx_ring(adapter->rx_ring[i]);
2843} 2842}
2844 2843
2845/** 2844/**
@@ -3163,7 +3162,7 @@ static void igb_watchdog_task(struct work_struct *work)
3163 igb_update_adaptive(hw); 3162 igb_update_adaptive(hw);
3164 3163
3165 for (i = 0; i < adapter->num_tx_queues; i++) { 3164 for (i = 0; i < adapter->num_tx_queues; i++) {
3166 struct igb_ring *tx_ring = &adapter->tx_ring[i]; 3165 struct igb_ring *tx_ring = adapter->tx_ring[i];
3167 if (!netif_carrier_ok(netdev)) { 3166 if (!netif_carrier_ok(netdev)) {
3168 /* We've lost link, so the controller stops DMA, 3167 /* We've lost link, so the controller stops DMA,
3169 * but we've got queued Tx work that's never going 3168 * but we've got queued Tx work that's never going
@@ -3359,13 +3358,13 @@ static void igb_set_itr(struct igb_adapter *adapter)
3359 3358
3360 adapter->rx_itr = igb_update_itr(adapter, 3359 adapter->rx_itr = igb_update_itr(adapter,
3361 adapter->rx_itr, 3360 adapter->rx_itr,
3362 adapter->rx_ring->total_packets, 3361 q_vector->rx_ring->total_packets,
3363 adapter->rx_ring->total_bytes); 3362 q_vector->rx_ring->total_bytes);
3364 3363
3365 adapter->tx_itr = igb_update_itr(adapter, 3364 adapter->tx_itr = igb_update_itr(adapter,
3366 adapter->tx_itr, 3365 adapter->tx_itr,
3367 adapter->tx_ring->total_packets, 3366 q_vector->tx_ring->total_packets,
3368 adapter->tx_ring->total_bytes); 3367 q_vector->tx_ring->total_bytes);
3369 current_itr = max(adapter->rx_itr, adapter->tx_itr); 3368 current_itr = max(adapter->rx_itr, adapter->tx_itr);
3370 3369
3371 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 3370 /* conservative mode (itr 3) eliminates the lowest_latency setting */
@@ -3388,10 +3387,10 @@ static void igb_set_itr(struct igb_adapter *adapter)
3388 } 3387 }
3389 3388
3390set_itr_now: 3389set_itr_now:
3391 adapter->rx_ring->total_bytes = 0; 3390 q_vector->rx_ring->total_bytes = 0;
3392 adapter->rx_ring->total_packets = 0; 3391 q_vector->rx_ring->total_packets = 0;
3393 adapter->tx_ring->total_bytes = 0; 3392 q_vector->tx_ring->total_bytes = 0;
3394 adapter->tx_ring->total_packets = 0; 3393 q_vector->tx_ring->total_packets = 0;
3395 3394
3396 if (new_itr != q_vector->itr_val) { 3395 if (new_itr != q_vector->itr_val) {
3397 /* this attempts to bias the interrupt rate towards Bulk 3396 /* this attempts to bias the interrupt rate towards Bulk
@@ -3950,7 +3949,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3950 netdev->mtu = new_mtu; 3949 netdev->mtu = new_mtu;
3951 3950
3952 for (i = 0; i < adapter->num_rx_queues; i++) 3951 for (i = 0; i < adapter->num_rx_queues; i++)
3953 adapter->rx_ring[i].rx_buffer_len = rx_buffer_len; 3952 adapter->rx_ring[i]->rx_buffer_len = rx_buffer_len;
3954 3953
3955 if (netif_running(netdev)) 3954 if (netif_running(netdev))
3956 igb_up(adapter); 3955 igb_up(adapter);
@@ -3992,10 +3991,11 @@ void igb_update_stats(struct igb_adapter *adapter)
3992 packets = 0; 3991 packets = 0;
3993 for (i = 0; i < adapter->num_rx_queues; i++) { 3992 for (i = 0; i < adapter->num_rx_queues; i++) {
3994 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF; 3993 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
3995 adapter->rx_ring[i].rx_stats.drops += rqdpc_tmp; 3994 struct igb_ring *ring = adapter->rx_ring[i];
3995 ring->rx_stats.drops += rqdpc_tmp;
3996 net_stats->rx_fifo_errors += rqdpc_tmp; 3996 net_stats->rx_fifo_errors += rqdpc_tmp;
3997 bytes += adapter->rx_ring[i].rx_stats.bytes; 3997 bytes += ring->rx_stats.bytes;
3998 packets += adapter->rx_ring[i].rx_stats.packets; 3998 packets += ring->rx_stats.packets;
3999 } 3999 }
4000 4000
4001 net_stats->rx_bytes = bytes; 4001 net_stats->rx_bytes = bytes;
@@ -4004,8 +4004,9 @@ void igb_update_stats(struct igb_adapter *adapter)
4004 bytes = 0; 4004 bytes = 0;
4005 packets = 0; 4005 packets = 0;
4006 for (i = 0; i < adapter->num_tx_queues; i++) { 4006 for (i = 0; i < adapter->num_tx_queues; i++) {
4007 bytes += adapter->tx_ring[i].tx_stats.bytes; 4007 struct igb_ring *ring = adapter->tx_ring[i];
4008 packets += adapter->tx_ring[i].tx_stats.packets; 4008 bytes += ring->tx_stats.bytes;
4009 packets += ring->tx_stats.packets;
4009 } 4010 }
4010 net_stats->tx_bytes = bytes; 4011 net_stats->tx_bytes = bytes;
4011 net_stats->tx_packets = packets; 4012 net_stats->tx_packets = packets;