aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe
diff options
context:
space:
mode:
authorPeter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>2009-02-06 02:53:59 -0500
committerDavid S. Miller <davem@davemloft.net>2009-02-06 02:53:59 -0500
commitbc97114d3f998a040876695a9b2b5be0b1a5320b (patch)
tree52ad12f26046d32d3cd0cbd2105d41fb82836a68 /drivers/net/ixgbe
parent56035022d86fff45299288cb372a42f752ba23fa (diff)
ixgbe: Refactor set_num_queues() and cache_ring_register()
The current code to determine the number of queues the device will want on driver initialization is ugly and difficult to maintain. It also doesn't allow for easy expansion for future features or future hardware. This patch refactors these routines, and make them easier to deal with. Signed-off-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ixgbe')
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c222
1 files changed, 118 insertions, 104 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index ed8d14163c1d..d396c6e01fb5 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -2314,68 +2314,61 @@ static void ixgbe_reset_task(struct work_struct *work)
2314 ixgbe_reinit_locked(adapter); 2314 ixgbe_reinit_locked(adapter);
2315} 2315}
2316 2316
2317static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) 2317#ifdef CONFIG_IXGBE_DCB
2318static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
2318{ 2319{
2319 int nrq = 1, ntq = 1; 2320 bool ret = false;
2320 int feature_mask = 0, rss_i, rss_m;
2321 int dcb_i, dcb_m;
2322 2321
2323 /* Number of supported queues */ 2322 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
2324 switch (adapter->hw.mac.type) { 2323 adapter->ring_feature[RING_F_DCB].mask = 0x7 << 3;
2325 case ixgbe_mac_82598EB: 2324 adapter->num_rx_queues =
2326 dcb_i = adapter->ring_feature[RING_F_DCB].indices; 2325 adapter->ring_feature[RING_F_DCB].indices;
2327 dcb_m = 0; 2326 adapter->num_tx_queues =
2328 rss_i = adapter->ring_feature[RING_F_RSS].indices; 2327 adapter->ring_feature[RING_F_DCB].indices;
2329 rss_m = 0; 2328 ret = true;
2330 feature_mask |= IXGBE_FLAG_RSS_ENABLED; 2329 } else {
2331 feature_mask |= IXGBE_FLAG_DCB_ENABLED; 2330 adapter->ring_feature[RING_F_DCB].mask = 0;
2332 2331 adapter->ring_feature[RING_F_DCB].indices = 0;
2333 switch (adapter->flags & feature_mask) { 2332 ret = false;
2334 case (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_DCB_ENABLED): 2333 }
2335 dcb_m = 0x7 << 3;
2336 rss_i = min(8, rss_i);
2337 rss_m = 0x7;
2338 nrq = dcb_i * rss_i;
2339 ntq = min(MAX_TX_QUEUES, dcb_i * rss_i);
2340 break;
2341 case (IXGBE_FLAG_DCB_ENABLED):
2342 dcb_m = 0x7 << 3;
2343 nrq = dcb_i;
2344 ntq = dcb_i;
2345 break;
2346 case (IXGBE_FLAG_RSS_ENABLED):
2347 rss_m = 0xF;
2348 nrq = rss_i;
2349 ntq = rss_i;
2350 break;
2351 case 0:
2352 default:
2353 dcb_i = 0;
2354 dcb_m = 0;
2355 rss_i = 0;
2356 rss_m = 0;
2357 nrq = 1;
2358 ntq = 1;
2359 break;
2360 }
2361 2334
2362 /* Sanity check, we should never have zero queues */ 2335 return ret;
2363 nrq = (nrq ?:1); 2336}
2364 ntq = (ntq ?:1); 2337#endif
2365 2338
2366 adapter->ring_feature[RING_F_DCB].indices = dcb_i; 2339static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
2367 adapter->ring_feature[RING_F_DCB].mask = dcb_m; 2340{
2368 adapter->ring_feature[RING_F_RSS].indices = rss_i; 2341 bool ret = false;
2369 adapter->ring_feature[RING_F_RSS].mask = rss_m; 2342
2370 break; 2343 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
2371 default: 2344 adapter->ring_feature[RING_F_RSS].mask = 0xF;
2372 nrq = 1; 2345 adapter->num_rx_queues =
2373 ntq = 1; 2346 adapter->ring_feature[RING_F_RSS].indices;
2374 break; 2347 adapter->num_tx_queues =
2348 adapter->ring_feature[RING_F_RSS].indices;
2349 ret = true;
2350 } else {
2351 adapter->ring_feature[RING_F_RSS].mask = 0;
2352 adapter->ring_feature[RING_F_RSS].indices = 0;
2353 ret = false;
2375 } 2354 }
2376 2355
2377 adapter->num_rx_queues = nrq; 2356 return ret;
2378 adapter->num_tx_queues = ntq; 2357}
2358
2359static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
2360{
2361 /* Start with base case */
2362 adapter->num_rx_queues = 1;
2363 adapter->num_tx_queues = 1;
2364
2365#ifdef CONFIG_IXGBE_DCB
2366 if (ixgbe_set_dcb_queues(adapter))
2367 return;
2368
2369#endif
2370 if (ixgbe_set_rss_queues(adapter))
2371 return;
2379} 2372}
2380 2373
2381static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, 2374static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
@@ -2432,66 +2425,87 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
2432} 2425}
2433 2426
2434/** 2427/**
2435 * ixgbe_cache_ring_register - Descriptor ring to register mapping 2428 * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
2436 * @adapter: board private structure to initialize 2429 * @adapter: board private structure to initialize
2437 * 2430 *
2438 * Once we know the feature-set enabled for the device, we'll cache 2431 * Cache the descriptor ring offsets for RSS to the assigned rings.
2439 * the register offset the descriptor ring is assigned to. 2432 *
2440 **/ 2433 **/
2441static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) 2434static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
2442{ 2435{
2443 int feature_mask = 0, rss_i; 2436 int i;
2444 int i, txr_idx, rxr_idx; 2437 bool ret = false;
2445 int dcb_i;
2446 2438
2447 /* Number of supported queues */ 2439 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
2448 switch (adapter->hw.mac.type) { 2440 for (i = 0; i < adapter->num_rx_queues; i++)
2449 case ixgbe_mac_82598EB: 2441 adapter->rx_ring[i].reg_idx = i;
2450 dcb_i = adapter->ring_feature[RING_F_DCB].indices; 2442 for (i = 0; i < adapter->num_tx_queues; i++)
2451 rss_i = adapter->ring_feature[RING_F_RSS].indices; 2443 adapter->tx_ring[i].reg_idx = i;
2452 txr_idx = 0; 2444 ret = true;
2453 rxr_idx = 0; 2445 } else {
2454 feature_mask |= IXGBE_FLAG_DCB_ENABLED; 2446 ret = false;
2455 feature_mask |= IXGBE_FLAG_RSS_ENABLED; 2447 }
2456 switch (adapter->flags & feature_mask) { 2448
2457 case (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_DCB_ENABLED): 2449 return ret;
2458 for (i = 0; i < dcb_i; i++) { 2450}
2459 int j; 2451
2460 /* Rx first */ 2452#ifdef CONFIG_IXGBE_DCB
2461 for (j = 0; j < adapter->num_rx_queues; j++) { 2453/**
2462 adapter->rx_ring[rxr_idx].reg_idx = 2454 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
2463 i << 3 | j; 2455 * @adapter: board private structure to initialize
2464 rxr_idx++; 2456 *
2465 } 2457 * Cache the descriptor ring offsets for DCB to the assigned rings.
2466 /* Tx now */ 2458 *
2467 for (j = 0; j < adapter->num_tx_queues; j++) { 2459 **/
2468 adapter->tx_ring[txr_idx].reg_idx = 2460static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
2469 i << 2 | (j >> 1); 2461{
2470 if (j & 1) 2462 int i;
2471 txr_idx++; 2463 bool ret = false;
2472 } 2464 int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
2473 } 2465
2474 case (IXGBE_FLAG_DCB_ENABLED): 2466 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
2467 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2475 /* the number of queues is assumed to be symmetric */ 2468 /* the number of queues is assumed to be symmetric */
2476 for (i = 0; i < dcb_i; i++) { 2469 for (i = 0; i < dcb_i; i++) {
2477 adapter->rx_ring[i].reg_idx = i << 3; 2470 adapter->rx_ring[i].reg_idx = i << 3;
2478 adapter->tx_ring[i].reg_idx = i << 2; 2471 adapter->tx_ring[i].reg_idx = i << 2;
2479 } 2472 }
2480 break; 2473 ret = true;
2481 case (IXGBE_FLAG_RSS_ENABLED): 2474 } else {
2482 for (i = 0; i < adapter->num_rx_queues; i++) 2475 ret = false;
2483 adapter->rx_ring[i].reg_idx = i;
2484 for (i = 0; i < adapter->num_tx_queues; i++)
2485 adapter->tx_ring[i].reg_idx = i;
2486 break;
2487 case 0:
2488 default:
2489 break;
2490 } 2476 }
2491 break; 2477 } else {
2492 default: 2478 ret = false;
2493 break;
2494 } 2479 }
2480
2481 return ret;
2482}
2483#endif
2484
2485/**
2486 * ixgbe_cache_ring_register - Descriptor ring to register mapping
2487 * @adapter: board private structure to initialize
2488 *
2489 * Once we know the feature-set enabled for the device, we'll cache
2490 * the register offset the descriptor ring is assigned to.
2491 *
2492 * Note, the order the various feature calls is important. It must start with
2493 * the "most" features enabled at the same time, then trickle down to the
2494 * least amount of features turned on at once.
2495 **/
2496static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
2497{
2498 /* start with default case */
2499 adapter->rx_ring[0].reg_idx = 0;
2500 adapter->tx_ring[0].reg_idx = 0;
2501
2502#ifdef CONFIG_IXGBE_DCB
2503 if (ixgbe_cache_ring_dcb(adapter))
2504 return;
2505
2506#endif
2507 if (ixgbe_cache_ring_rss(adapter))
2508 return;
2495} 2509}
2496 2510
2497/** 2511/**