aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohn Fastabend <john.r.fastabend@intel.com>2011-04-26 03:26:08 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2011-06-21 04:20:13 -0400
commite901acd6fa5538436e08e8a862dd2c080297f852 (patch)
tree13e5f80f1aebbecbc2b95de238e2a415677b6d64
parent8b1c0b24d9afd4a59a8aa9c778253bcff949395a (diff)
ixgbe: DCB use existing TX and RX queues
The number of TX and RX queues allocated depends on the device type, the current features set, online CPUs, and various compile flags. To enable DCB with multiple queues and allow it to coexist with all the features currently implemented it has to setup a valid queue count. This is done at init time using the FDIR and RSS max queue counts and allowing each TC to allocate a queue per CPU. DCB will now use available queues up to (8 x TCs) this is somewhat arbitrary cap but allows DCB to use up to 64 queues. Its easy to increase this later if that is needed. This is prep work to enable Flow Director with DCB. After this DCB can easily coexist with existing features and no longer needs its own DCB feature ring. Signed-off-by: John Fastabend <john.r.fastabend@intel.com> Tested-by: Ross Brattain <ross.b.brattain@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
-rw-r--r--drivers/net/ixgbe/ixgbe.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c107
2 files changed, 49 insertions, 60 deletions
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index e467b20ed1f0..d5674fc8bc02 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -244,7 +244,6 @@ struct ixgbe_ring {
244 244
245enum ixgbe_ring_f_enum { 245enum ixgbe_ring_f_enum {
246 RING_F_NONE = 0, 246 RING_F_NONE = 0,
247 RING_F_DCB,
248 RING_F_VMDQ, /* SR-IOV uses the same ring feature */ 247 RING_F_VMDQ, /* SR-IOV uses the same ring feature */
249 RING_F_RSS, 248 RING_F_RSS,
250 RING_F_FDIR, 249 RING_F_FDIR,
@@ -255,7 +254,6 @@ enum ixgbe_ring_f_enum {
255 RING_F_ARRAY_SIZE /* must be last in enum set */ 254 RING_F_ARRAY_SIZE /* must be last in enum set */
256}; 255};
257 256
258#define IXGBE_MAX_DCB_INDICES 64
259#define IXGBE_MAX_RSS_INDICES 16 257#define IXGBE_MAX_RSS_INDICES 16
260#define IXGBE_MAX_VMDQ_INDICES 64 258#define IXGBE_MAX_VMDQ_INDICES 64
261#define IXGBE_MAX_FDIR_INDICES 64 259#define IXGBE_MAX_FDIR_INDICES 64
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 7e3850ab4223..3a88fb6e32e7 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -4417,72 +4417,72 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
4417 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) 4417 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
4418 return false; 4418 return false;
4419 4419
4420 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 4420 f->indices = min((int)num_online_cpus(), f->indices);
4421#ifdef CONFIG_IXGBE_DCB
4422 int tc;
4423 struct net_device *dev = adapter->netdev;
4424 4421
4425 tc = netdev_get_prio_tc_map(dev, adapter->fcoe.up); 4422 adapter->num_rx_queues = 1;
4426 f->indices = dev->tc_to_txq[tc].count; 4423 adapter->num_tx_queues = 1;
4427 f->mask = dev->tc_to_txq[tc].offset;
4428#endif
4429 } else {
4430 f->indices = min((int)num_online_cpus(), f->indices);
4431
4432 adapter->num_rx_queues = 1;
4433 adapter->num_tx_queues = 1;
4434 4424
4435 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { 4425 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
4436 e_info(probe, "FCoE enabled with RSS\n"); 4426 e_info(probe, "FCoE enabled with RSS\n");
4437 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || 4427 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
4438 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) 4428 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
4439 ixgbe_set_fdir_queues(adapter); 4429 ixgbe_set_fdir_queues(adapter);
4440 else 4430 else
4441 ixgbe_set_rss_queues(adapter); 4431 ixgbe_set_rss_queues(adapter);
4442 }
4443 /* adding FCoE rx rings to the end */
4444 f->mask = adapter->num_rx_queues;
4445 adapter->num_rx_queues += f->indices;
4446 adapter->num_tx_queues += f->indices;
4447 } 4432 }
4433 /* adding FCoE rx rings to the end */
4434 f->mask = adapter->num_rx_queues;
4435 adapter->num_rx_queues += f->indices;
4436 adapter->num_tx_queues += f->indices;
4448 4437
4449 return true; 4438 return true;
4450} 4439}
4451#endif /* IXGBE_FCOE */ 4440#endif /* IXGBE_FCOE */
4452 4441
4442/* Artificial max queue cap per traffic class in DCB mode */
4443#define DCB_QUEUE_CAP 8
4444
4453#ifdef CONFIG_IXGBE_DCB 4445#ifdef CONFIG_IXGBE_DCB
4454static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) 4446static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
4455{ 4447{
4456 bool ret = false; 4448 int per_tc_q, q, i, offset = 0;
4457 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB]; 4449 struct net_device *dev = adapter->netdev;
4458 int tcs = netdev_get_num_tc(adapter->netdev); 4450 int tcs = netdev_get_num_tc(dev);
4459 int max_q, i, q;
4460 4451
4461 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !tcs) 4452 if (!tcs)
4462 return ret; 4453 return false;
4463 4454
4464 max_q = adapter->netdev->num_tx_queues / tcs; 4455 /* Map queue offset and counts onto allocated tx queues */
4456 per_tc_q = min(dev->num_tx_queues / tcs, (unsigned int)DCB_QUEUE_CAP);
4457 q = min((int)num_online_cpus(), per_tc_q);
4465 4458
4466 f->indices = 0;
4467 for (i = 0; i < tcs; i++) { 4459 for (i = 0; i < tcs; i++) {
4468 q = min((int)num_online_cpus(), max_q); 4460 netdev_set_prio_tc_map(dev, i, i);
4469 f->indices += q; 4461 netdev_set_tc_queue(dev, i, q, offset);
4462 offset += q;
4470 } 4463 }
4471 4464
4472 f->mask = 0x7 << 3; 4465 adapter->num_tx_queues = q * tcs;
4473 adapter->num_rx_queues = f->indices; 4466 adapter->num_rx_queues = q * tcs;
4474 adapter->num_tx_queues = f->indices;
4475 ret = true;
4476 4467
4477#ifdef IXGBE_FCOE 4468#ifdef IXGBE_FCOE
4478 /* FCoE enabled queues require special configuration done through 4469 /* FCoE enabled queues require special configuration indexed
4479 * configure_fcoe() and others. Here we map FCoE indices onto the 4470 * by feature specific indices and mask. Here we map FCoE
4480 * DCB queue pairs allowing FCoE to own configuration later. 4471 * indices onto the DCB queue pairs allowing FCoE to own
4472 * configuration later.
4481 */ 4473 */
4482 ixgbe_set_fcoe_queues(adapter); 4474 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
4475 int tc;
4476 struct ixgbe_ring_feature *f =
4477 &adapter->ring_feature[RING_F_FCOE];
4478
4479 tc = netdev_get_prio_tc_map(dev, adapter->fcoe.up);
4480 f->indices = dev->tc_to_txq[tc].count;
4481 f->mask = dev->tc_to_txq[tc].offset;
4482 }
4483#endif 4483#endif
4484 4484
4485 return ret; 4485 return true;
4486} 4486}
4487#endif 4487#endif
4488 4488
@@ -5172,7 +5172,6 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
5172 rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus()); 5172 rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
5173 adapter->ring_feature[RING_F_RSS].indices = rss; 5173 adapter->ring_feature[RING_F_RSS].indices = rss;
5174 adapter->flags |= IXGBE_FLAG_RSS_ENABLED; 5174 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
5175 adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
5176 switch (hw->mac.type) { 5175 switch (hw->mac.type) {
5177 case ixgbe_mac_82598EB: 5176 case ixgbe_mac_82598EB:
5178 if (hw->device_id == IXGBE_DEV_ID_82598AT) 5177 if (hw->device_id == IXGBE_DEV_ID_82598AT)
@@ -7213,10 +7212,8 @@ static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
7213 */ 7212 */
7214int ixgbe_setup_tc(struct net_device *dev, u8 tc) 7213int ixgbe_setup_tc(struct net_device *dev, u8 tc)
7215{ 7214{
7216 unsigned int q, i, offset = 0;
7217 struct ixgbe_adapter *adapter = netdev_priv(dev); 7215 struct ixgbe_adapter *adapter = netdev_priv(dev);
7218 struct ixgbe_hw *hw = &adapter->hw; 7216 struct ixgbe_hw *hw = &adapter->hw;
7219 int max_q = adapter->netdev->num_tx_queues / tc;
7220 7217
7221 /* If DCB is anabled do not remove traffic classes, multiple 7218 /* If DCB is anabled do not remove traffic classes, multiple
7222 * traffic classes are required to implement DCB 7219 * traffic classes are required to implement DCB
@@ -7242,14 +7239,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
7242 else 7239 else
7243 netdev_reset_tc(dev); 7240 netdev_reset_tc(dev);
7244 7241
7245 /* Partition Tx queues evenly amongst traffic classes */
7246 for (i = 0; i < tc; i++) {
7247 q = min((int)num_online_cpus(), max_q);
7248 netdev_set_prio_tc_map(dev, i, i);
7249 netdev_set_tc_queue(dev, i, q, offset);
7250 offset += q;
7251 }
7252
7253 ixgbe_init_interrupt_scheme(adapter); 7242 ixgbe_init_interrupt_scheme(adapter);
7254 ixgbe_validate_rtr(adapter, tc); 7243 ixgbe_validate_rtr(adapter, tc);
7255 if (netif_running(dev)) 7244 if (netif_running(dev))
@@ -7436,14 +7425,16 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
7436 pci_set_master(pdev); 7425 pci_set_master(pdev);
7437 pci_save_state(pdev); 7426 pci_save_state(pdev);
7438 7427
7428#ifdef CONFIG_IXGBE_DCB
7429 indices *= MAX_TRAFFIC_CLASS;
7430#endif
7431
7439 if (ii->mac == ixgbe_mac_82598EB) 7432 if (ii->mac == ixgbe_mac_82598EB)
7440 indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES); 7433 indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES);
7441 else 7434 else
7442 indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES); 7435 indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
7443 7436
7444#if defined(CONFIG_DCB) 7437#ifdef IXGBE_FCOE
7445 indices = max_t(unsigned int, indices, IXGBE_MAX_DCB_INDICES);
7446#elif defined(IXGBE_FCOE)
7447 indices += min_t(unsigned int, num_possible_cpus(), 7438 indices += min_t(unsigned int, num_possible_cpus(),
7448 IXGBE_MAX_FCOE_INDICES); 7439 IXGBE_MAX_FCOE_INDICES);
7449#endif 7440#endif