aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe
diff options
context:
space:
mode:
authorJohn Fastabend <john.r.fastabend@intel.com>2011-03-07 22:44:52 -0500
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2011-03-12 07:11:29 -0500
commite5b646355770d34eab360ebae93c56c407dfe803 (patch)
treeed43c57e5ecb4813e126a5f287443baa3af49b92 /drivers/net/ixgbe
parentdc166e22ede5ffb46b5b18b99ba0321ae545f89b (diff)
ixgbe: DCB, use multiple Tx rings per traffic class
This enables multiple {Tx|Rx} rings per traffic class while in DCB mode. In order to get this working as expected the tc_to_tx net device mapping is configured as well as the prio_tc_map. skb priorities are mapped across a range of queue pairs to get a distribution per traffic class. The maximum number of queue pairs used while in DCB mode is capped at 64. The hardware max is actually 128 queues but 64 is sufficient for now and allocating more seemed a bit excessive. It is easy enough to increase the cap later if need be. To get the 802.1Q priority tags inserted correctly ixgbe was previously using the skb queue_mapping field to directly set the 802.1Q priority. This no longer works because we have removed the 1:1 mapping between queues and traffic class. Each ring is aligned with an 802.1Qaz traffic class so here we add an extra field to the ring struct to identify the 802.1Q traffic class. This uses an extra byte of the ixgbe_ring struct fortunately there was a 2byte hole, struct ixgbe_ring { void * desc; /* 0 8 */ struct device * dev; /* 8 8 */ struct net_device * netdev; /* 16 8 */ union { struct ixgbe_tx_buffer * tx_buffer_info; /* 8 */ struct ixgbe_rx_buffer * rx_buffer_info; /* 8 */ }; /* 24 8 */ long unsigned int state; /* 32 8 */ u8 atr_sample_rate; /* 40 1 */ u8 atr_count; /* 41 1 */ u16 count; /* 42 2 */ u16 rx_buf_len; /* 44 2 */ u16 next_to_use; /* 46 2 */ u16 next_to_clean; /* 48 2 */ u8 queue_index; /* 50 1 */ u8 reg_idx; /* 51 1 */ u16 work_limit; /* 52 2 */ /* XXX 2 bytes hole, try to pack */ u8 * tail; /* 56 8 */ /* --- cacheline 1 boundary (64 bytes) --- */ Now we can set the VLAN priority directly and it will be correct. User space can indicate the 802.1Qaz priority using the SO_PRIORITY setsocket() option and QOS layer will steer the skb to the correct rings. Additionally using the multiq qdisc with a queue_mapping action works as well. Signed-off-by: John Fastabend <john.r.fastabend@intel.com> Tested-by: Ross Brattain <ross.b.brattain@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ixgbe')
-rw-r--r--drivers/net/ixgbe/ixgbe.h4
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c7
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c339
3 files changed, 182 insertions, 168 deletions
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 815edfd7d0ee..b7e62d568b85 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -209,6 +209,7 @@ struct ixgbe_ring {
209 * associated with this ring, which is 209 * associated with this ring, which is
210 * different for DCB and RSS modes 210 * different for DCB and RSS modes
211 */ 211 */
212 u8 dcb_tc;
212 213
213 u16 work_limit; /* max work per interrupt */ 214 u16 work_limit; /* max work per interrupt */
214 215
@@ -243,7 +244,7 @@ enum ixgbe_ring_f_enum {
243 RING_F_ARRAY_SIZE /* must be last in enum set */ 244 RING_F_ARRAY_SIZE /* must be last in enum set */
244}; 245};
245 246
246#define IXGBE_MAX_DCB_INDICES 8 247#define IXGBE_MAX_DCB_INDICES 64
247#define IXGBE_MAX_RSS_INDICES 16 248#define IXGBE_MAX_RSS_INDICES 16
248#define IXGBE_MAX_VMDQ_INDICES 64 249#define IXGBE_MAX_VMDQ_INDICES 64
249#define IXGBE_MAX_FDIR_INDICES 64 250#define IXGBE_MAX_FDIR_INDICES 64
@@ -542,6 +543,7 @@ extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
542extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter, 543extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
543 struct ixgbe_ring *ring); 544 struct ixgbe_ring *ring);
544extern void ixgbe_set_rx_mode(struct net_device *netdev); 545extern void ixgbe_set_rx_mode(struct net_device *netdev);
546extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
545#ifdef IXGBE_FCOE 547#ifdef IXGBE_FCOE
546extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); 548extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
547extern int ixgbe_fso(struct ixgbe_adapter *adapter, 549extern int ixgbe_fso(struct ixgbe_adapter *adapter,
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index d4b2914376db..b7b6db3bbd59 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -145,6 +145,9 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
145 } 145 }
146 146
147 adapter->flags |= IXGBE_FLAG_DCB_ENABLED; 147 adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
148 if (!netdev_get_num_tc(netdev))
149 ixgbe_setup_tc(netdev, MAX_TRAFFIC_CLASS);
150
148 ixgbe_init_interrupt_scheme(adapter); 151 ixgbe_init_interrupt_scheme(adapter);
149 if (netif_running(netdev)) 152 if (netif_running(netdev))
150 netdev->netdev_ops->ndo_open(netdev); 153 netdev->netdev_ops->ndo_open(netdev);
@@ -169,6 +172,8 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
169 break; 172 break;
170 } 173 }
171 174
175 ixgbe_setup_tc(netdev, 0);
176
172 ixgbe_init_interrupt_scheme(adapter); 177 ixgbe_init_interrupt_scheme(adapter);
173 if (netif_running(netdev)) 178 if (netif_running(netdev))
174 netdev->netdev_ops->ndo_open(netdev); 179 netdev->netdev_ops->ndo_open(netdev);
@@ -351,7 +356,7 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
351 return DCB_NO_HW_CHG; 356 return DCB_NO_HW_CHG;
352 357
353 ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, 358 ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
354 adapter->ring_feature[RING_F_DCB].indices); 359 MAX_TRAFFIC_CLASS);
355 360
356 if (ret) 361 if (ret)
357 return DCB_NO_HW_CHG; 362 return DCB_NO_HW_CHG;
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 4aeade82812a..3694226462da 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -652,7 +652,7 @@ void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
652static u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx) 652static u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx)
653{ 653{
654 int tc = -1; 654 int tc = -1;
655 int dcb_i = adapter->ring_feature[RING_F_DCB].indices; 655 int dcb_i = netdev_get_num_tc(adapter->netdev);
656 656
657 /* if DCB is not enabled the queues have no TC */ 657 /* if DCB is not enabled the queues have no TC */
658 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) 658 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
@@ -4258,24 +4258,6 @@ static void ixgbe_reset_task(struct work_struct *work)
4258 ixgbe_reinit_locked(adapter); 4258 ixgbe_reinit_locked(adapter);
4259} 4259}
4260 4260
4261#ifdef CONFIG_IXGBE_DCB
4262static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
4263{
4264 bool ret = false;
4265 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB];
4266
4267 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
4268 return ret;
4269
4270 f->mask = 0x7 << 3;
4271 adapter->num_rx_queues = f->indices;
4272 adapter->num_tx_queues = f->indices;
4273 ret = true;
4274
4275 return ret;
4276}
4277#endif
4278
4279/** 4261/**
4280 * ixgbe_set_rss_queues: Allocate queues for RSS 4262 * ixgbe_set_rss_queues: Allocate queues for RSS
4281 * @adapter: board private structure to initialize 4263 * @adapter: board private structure to initialize
@@ -4346,19 +4328,26 @@ static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
4346 **/ 4328 **/
4347static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) 4329static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
4348{ 4330{
4349 bool ret = false;
4350 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; 4331 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
4351 4332
4352 f->indices = min((int)num_online_cpus(), f->indices); 4333 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
4353 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 4334 return false;
4354 adapter->num_rx_queues = 1; 4335
4355 adapter->num_tx_queues = 1; 4336 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
4356#ifdef CONFIG_IXGBE_DCB 4337#ifdef CONFIG_IXGBE_DCB
4357 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 4338 int tc;
4358 e_info(probe, "FCoE enabled with DCB\n"); 4339 struct net_device *dev = adapter->netdev;
4359 ixgbe_set_dcb_queues(adapter); 4340
4360 } 4341 tc = netdev_get_prio_tc_map(dev, adapter->fcoe.up);
4342 f->indices = dev->tc_to_txq[tc].count;
4343 f->mask = dev->tc_to_txq[tc].offset;
4361#endif 4344#endif
4345 } else {
4346 f->indices = min((int)num_online_cpus(), f->indices);
4347
4348 adapter->num_rx_queues = 1;
4349 adapter->num_tx_queues = 1;
4350
4362 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { 4351 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
4363 e_info(probe, "FCoE enabled with RSS\n"); 4352 e_info(probe, "FCoE enabled with RSS\n");
4364 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || 4353 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
@@ -4371,14 +4360,45 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
4371 f->mask = adapter->num_rx_queues; 4360 f->mask = adapter->num_rx_queues;
4372 adapter->num_rx_queues += f->indices; 4361 adapter->num_rx_queues += f->indices;
4373 adapter->num_tx_queues += f->indices; 4362 adapter->num_tx_queues += f->indices;
4363 }
4374 4364
4375 ret = true; 4365 return true;
4366}
4367#endif /* IXGBE_FCOE */
4368
4369#ifdef CONFIG_IXGBE_DCB
4370static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
4371{
4372 bool ret = false;
4373 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB];
4374 int i, q;
4375
4376 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
4377 return ret;
4378
4379 f->indices = 0;
4380 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
4381 q = min((int)num_online_cpus(), MAX_TRAFFIC_CLASS);
4382 f->indices += q;
4376 } 4383 }
4377 4384
4385 f->mask = 0x7 << 3;
4386 adapter->num_rx_queues = f->indices;
4387 adapter->num_tx_queues = f->indices;
4388 ret = true;
4389
4390#ifdef IXGBE_FCOE
4391 /* FCoE enabled queues require special configuration done through
4392 * configure_fcoe() and others. Here we map FCoE indices onto the
4393 * DCB queue pairs allowing FCoE to own configuration later.
4394 */
4395 ixgbe_set_fcoe_queues(adapter);
4396#endif
4397
4378 return ret; 4398 return ret;
4379} 4399}
4400#endif
4380 4401
4381#endif /* IXGBE_FCOE */
4382/** 4402/**
4383 * ixgbe_set_sriov_queues: Allocate queues for IOV use 4403 * ixgbe_set_sriov_queues: Allocate queues for IOV use
4384 * @adapter: board private structure to initialize 4404 * @adapter: board private structure to initialize
@@ -4414,16 +4434,16 @@ static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
4414 if (ixgbe_set_sriov_queues(adapter)) 4434 if (ixgbe_set_sriov_queues(adapter))
4415 goto done; 4435 goto done;
4416 4436
4417#ifdef IXGBE_FCOE
4418 if (ixgbe_set_fcoe_queues(adapter))
4419 goto done;
4420
4421#endif /* IXGBE_FCOE */
4422#ifdef CONFIG_IXGBE_DCB 4437#ifdef CONFIG_IXGBE_DCB
4423 if (ixgbe_set_dcb_queues(adapter)) 4438 if (ixgbe_set_dcb_queues(adapter))
4424 goto done; 4439 goto done;
4425 4440
4426#endif 4441#endif
4442#ifdef IXGBE_FCOE
4443 if (ixgbe_set_fcoe_queues(adapter))
4444 goto done;
4445
4446#endif /* IXGBE_FCOE */
4427 if (ixgbe_set_fdir_queues(adapter)) 4447 if (ixgbe_set_fdir_queues(adapter))
4428 goto done; 4448 goto done;
4429 4449
@@ -4515,6 +4535,91 @@ static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
4515} 4535}
4516 4536
4517#ifdef CONFIG_IXGBE_DCB 4537#ifdef CONFIG_IXGBE_DCB
4538
4539/* ixgbe_get_first_reg_idx - Return first register index associated with ring */
4540void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
4541 unsigned int *tx, unsigned int *rx)
4542{
4543 struct net_device *dev = adapter->netdev;
4544 struct ixgbe_hw *hw = &adapter->hw;
4545 u8 num_tcs = netdev_get_num_tc(dev);
4546
4547 *tx = 0;
4548 *rx = 0;
4549
4550 switch (hw->mac.type) {
4551 case ixgbe_mac_82598EB:
4552 *tx = tc << 3;
4553 *rx = tc << 2;
4554 break;
4555 case ixgbe_mac_82599EB:
4556 case ixgbe_mac_X540:
4557 if (num_tcs == 8) {
4558 if (tc < 3) {
4559 *tx = tc << 5;
4560 *rx = tc << 4;
4561 } else if (tc < 5) {
4562 *tx = ((tc + 2) << 4);
4563 *rx = tc << 4;
4564 } else if (tc < num_tcs) {
4565 *tx = ((tc + 8) << 3);
4566 *rx = tc << 4;
4567 }
4568 } else if (num_tcs == 4) {
4569 *rx = tc << 5;
4570 switch (tc) {
4571 case 0:
4572 *tx = 0;
4573 break;
4574 case 1:
4575 *tx = 64;
4576 break;
4577 case 2:
4578 *tx = 96;
4579 break;
4580 case 3:
4581 *tx = 112;
4582 break;
4583 default:
4584 break;
4585 }
4586 }
4587 break;
4588 default:
4589 break;
4590 }
4591}
4592
4593#define IXGBE_MAX_Q_PER_TC (IXGBE_MAX_DCB_INDICES / MAX_TRAFFIC_CLASS)
4594
4595/* ixgbe_setup_tc - routine to configure net_device for multiple traffic
4596 * classes.
4597 *
4598 * @netdev: net device to configure
4599 * @tc: number of traffic classes to enable
4600 */
4601int ixgbe_setup_tc(struct net_device *dev, u8 tc)
4602{
4603 int i;
4604 unsigned int q, offset = 0;
4605
4606 if (!tc) {
4607 netdev_reset_tc(dev);
4608 } else {
4609 if (netdev_set_num_tc(dev, tc))
4610 return -EINVAL;
4611
4612 /* Partition Tx queues evenly amongst traffic classes */
4613 for (i = 0; i < tc; i++) {
4614 q = min((int)num_online_cpus(), IXGBE_MAX_Q_PER_TC);
4615 netdev_set_prio_tc_map(dev, i, i);
4616 netdev_set_tc_queue(dev, i, q, offset);
4617 offset += q;
4618 }
4619 }
4620 return 0;
4621}
4622
4518/** 4623/**
4519 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB 4624 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
4520 * @adapter: board private structure to initialize 4625 * @adapter: board private structure to initialize
@@ -4524,72 +4629,27 @@ static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
4524 **/ 4629 **/
4525static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) 4630static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
4526{ 4631{
4527 int i; 4632 struct net_device *dev = adapter->netdev;
4528 bool ret = false; 4633 int i, j, k;
4529 int dcb_i = adapter->ring_feature[RING_F_DCB].indices; 4634 u8 num_tcs = netdev_get_num_tc(dev);
4530 4635
4531 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) 4636 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
4532 return false; 4637 return false;
4533 4638
4534 /* the number of queues is assumed to be symmetric */ 4639 for (i = 0, k = 0; i < num_tcs; i++) {
4535 switch (adapter->hw.mac.type) { 4640 unsigned int tx_s, rx_s;
4536 case ixgbe_mac_82598EB: 4641 u16 count = dev->tc_to_txq[i].count;
4537 for (i = 0; i < dcb_i; i++) { 4642
4538 adapter->rx_ring[i]->reg_idx = i << 3; 4643 ixgbe_get_first_reg_idx(adapter, i, &tx_s, &rx_s);
4539 adapter->tx_ring[i]->reg_idx = i << 2; 4644 for (j = 0; j < count; j++, k++) {
4540 } 4645 adapter->tx_ring[k]->reg_idx = tx_s + j;
4541 ret = true; 4646 adapter->rx_ring[k]->reg_idx = rx_s + j;
4542 break; 4647 adapter->tx_ring[k]->dcb_tc = i;
4543 case ixgbe_mac_82599EB: 4648 adapter->rx_ring[k]->dcb_tc = i;
4544 case ixgbe_mac_X540:
4545 if (dcb_i == 8) {
4546 /*
4547 * Tx TC0 starts at: descriptor queue 0
4548 * Tx TC1 starts at: descriptor queue 32
4549 * Tx TC2 starts at: descriptor queue 64
4550 * Tx TC3 starts at: descriptor queue 80
4551 * Tx TC4 starts at: descriptor queue 96
4552 * Tx TC5 starts at: descriptor queue 104
4553 * Tx TC6 starts at: descriptor queue 112
4554 * Tx TC7 starts at: descriptor queue 120
4555 *
4556 * Rx TC0-TC7 are offset by 16 queues each
4557 */
4558 for (i = 0; i < 3; i++) {
4559 adapter->tx_ring[i]->reg_idx = i << 5;
4560 adapter->rx_ring[i]->reg_idx = i << 4;
4561 }
4562 for ( ; i < 5; i++) {
4563 adapter->tx_ring[i]->reg_idx = ((i + 2) << 4);
4564 adapter->rx_ring[i]->reg_idx = i << 4;
4565 }
4566 for ( ; i < dcb_i; i++) {
4567 adapter->tx_ring[i]->reg_idx = ((i + 8) << 3);
4568 adapter->rx_ring[i]->reg_idx = i << 4;
4569 }
4570 ret = true;
4571 } else if (dcb_i == 4) {
4572 /*
4573 * Tx TC0 starts at: descriptor queue 0
4574 * Tx TC1 starts at: descriptor queue 64
4575 * Tx TC2 starts at: descriptor queue 96
4576 * Tx TC3 starts at: descriptor queue 112
4577 *
4578 * Rx TC0-TC3 are offset by 32 queues each
4579 */
4580 adapter->tx_ring[0]->reg_idx = 0;
4581 adapter->tx_ring[1]->reg_idx = 64;
4582 adapter->tx_ring[2]->reg_idx = 96;
4583 adapter->tx_ring[3]->reg_idx = 112;
4584 for (i = 0 ; i < dcb_i; i++)
4585 adapter->rx_ring[i]->reg_idx = i << 5;
4586 ret = true;
4587 } 4649 }
4588 break;
4589 default:
4590 break;
4591 } 4650 }
4592 return ret; 4651
4652 return true;
4593} 4653}
4594#endif 4654#endif
4595 4655
@@ -4635,33 +4695,6 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
4635 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) 4695 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
4636 return false; 4696 return false;
4637 4697
4638#ifdef CONFIG_IXGBE_DCB
4639 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
4640 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
4641
4642 ixgbe_cache_ring_dcb(adapter);
4643 /* find out queues in TC for FCoE */
4644 fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
4645 fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
4646 /*
4647 * In 82599, the number of Tx queues for each traffic
4648 * class for both 8-TC and 4-TC modes are:
4649 * TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7
4650 * 8 TCs: 32 32 16 16 8 8 8 8
4651 * 4 TCs: 64 64 32 32
4652 * We have max 8 queues for FCoE, where 8 the is
4653 * FCoE redirection table size. If TC for FCoE is
4654 * less than or equal to TC3, we have enough queues
4655 * to add max of 8 queues for FCoE, so we start FCoE
4656 * Tx queue from the next one, i.e., reg_idx + 1.
4657 * If TC for FCoE is above TC3, implying 8 TC mode,
4658 * and we need 8 for FCoE, we have to take all queues
4659 * in that traffic class for FCoE.
4660 */
4661 if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3))
4662 fcoe_tx_i--;
4663 }
4664#endif /* CONFIG_IXGBE_DCB */
4665 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { 4698 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
4666 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || 4699 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
4667 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) 4700 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
@@ -4718,16 +4751,16 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
4718 if (ixgbe_cache_ring_sriov(adapter)) 4751 if (ixgbe_cache_ring_sriov(adapter))
4719 return; 4752 return;
4720 4753
4754#ifdef CONFIG_IXGBE_DCB
4755 if (ixgbe_cache_ring_dcb(adapter))
4756 return;
4757#endif
4758
4721#ifdef IXGBE_FCOE 4759#ifdef IXGBE_FCOE
4722 if (ixgbe_cache_ring_fcoe(adapter)) 4760 if (ixgbe_cache_ring_fcoe(adapter))
4723 return; 4761 return;
4724
4725#endif /* IXGBE_FCOE */ 4762#endif /* IXGBE_FCOE */
4726#ifdef CONFIG_IXGBE_DCB
4727 if (ixgbe_cache_ring_dcb(adapter))
4728 return;
4729 4763
4730#endif
4731 if (ixgbe_cache_ring_fdir(adapter)) 4764 if (ixgbe_cache_ring_fdir(adapter))
4732 return; 4765 return;
4733 4766
@@ -5192,7 +5225,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
5192 adapter->dcb_set_bitmap = 0x00; 5225 adapter->dcb_set_bitmap = 0x00;
5193 adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; 5226 adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
5194 ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg, 5227 ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
5195 adapter->ring_feature[RING_F_DCB].indices); 5228 MAX_TRAFFIC_CLASS);
5196 5229
5197#endif 5230#endif
5198 5231
@@ -6664,18 +6697,12 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
6664 6697
6665 protocol = vlan_get_protocol(skb); 6698 protocol = vlan_get_protocol(skb);
6666 6699
6667 if ((protocol == htons(ETH_P_FCOE)) || 6700 if (((protocol == htons(ETH_P_FCOE)) ||
6668 (protocol == htons(ETH_P_FIP))) { 6701 (protocol == htons(ETH_P_FIP))) &&
6669 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 6702 (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
6670 txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); 6703 txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
6671 txq += adapter->ring_feature[RING_F_FCOE].mask; 6704 txq += adapter->ring_feature[RING_F_FCOE].mask;
6672 return txq; 6705 return txq;
6673#ifdef CONFIG_IXGBE_DCB
6674 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6675 txq = adapter->fcoe.up;
6676 return txq;
6677#endif
6678 }
6679 } 6706 }
6680#endif 6707#endif
6681 6708
@@ -6685,15 +6712,6 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
6685 return txq; 6712 return txq;
6686 } 6713 }
6687 6714
6688 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6689 if (skb->priority == TC_PRIO_CONTROL)
6690 txq = adapter->ring_feature[RING_F_DCB].indices-1;
6691 else
6692 txq = (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK)
6693 >> 13;
6694 return txq;
6695 }
6696
6697 return skb_tx_hash(dev, skb); 6715 return skb_tx_hash(dev, skb);
6698} 6716}
6699 6717
@@ -6715,13 +6733,13 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6715 tx_flags |= vlan_tx_tag_get(skb); 6733 tx_flags |= vlan_tx_tag_get(skb);
6716 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 6734 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6717 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK; 6735 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
6718 tx_flags |= ((skb->queue_mapping & 0x7) << 13); 6736 tx_flags |= tx_ring->dcb_tc << 13;
6719 } 6737 }
6720 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 6738 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
6721 tx_flags |= IXGBE_TX_FLAGS_VLAN; 6739 tx_flags |= IXGBE_TX_FLAGS_VLAN;
6722 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED && 6740 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED &&
6723 skb->priority != TC_PRIO_CONTROL) { 6741 skb->priority != TC_PRIO_CONTROL) {
6724 tx_flags |= ((skb->queue_mapping & 0x7) << 13); 6742 tx_flags |= tx_ring->dcb_tc << 13;
6725 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 6743 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
6726 tx_flags |= IXGBE_TX_FLAGS_VLAN; 6744 tx_flags |= IXGBE_TX_FLAGS_VLAN;
6727 } 6745 }
@@ -6730,20 +6748,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6730 /* for FCoE with DCB, we force the priority to what 6748 /* for FCoE with DCB, we force the priority to what
6731 * was specified by the switch */ 6749 * was specified by the switch */
6732 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED && 6750 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED &&
6733 (protocol == htons(ETH_P_FCOE) || 6751 (protocol == htons(ETH_P_FCOE)))
6734 protocol == htons(ETH_P_FIP))) { 6752 tx_flags |= IXGBE_TX_FLAGS_FCOE;
6735#ifdef CONFIG_IXGBE_DCB
6736 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6737 tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
6738 << IXGBE_TX_FLAGS_VLAN_SHIFT);
6739 tx_flags |= ((adapter->fcoe.up << 13)
6740 << IXGBE_TX_FLAGS_VLAN_SHIFT);
6741 }
6742#endif
6743 /* flag for FCoE offloads */
6744 if (protocol == htons(ETH_P_FCOE))
6745 tx_flags |= IXGBE_TX_FLAGS_FCOE;
6746 }
6747#endif 6753#endif
6748 6754
6749 /* four things can cause us to need a context descriptor */ 6755 /* four things can cause us to need a context descriptor */
@@ -7157,8 +7163,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
7157 else 7163 else
7158 indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES); 7164 indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
7159 7165
7166#if defined(CONFIG_DCB)
7160 indices = max_t(unsigned int, indices, IXGBE_MAX_DCB_INDICES); 7167 indices = max_t(unsigned int, indices, IXGBE_MAX_DCB_INDICES);
7161#ifdef IXGBE_FCOE 7168#elif defined(IXGBE_FCOE)
7162 indices += min_t(unsigned int, num_possible_cpus(), 7169 indices += min_t(unsigned int, num_possible_cpus(),
7163 IXGBE_MAX_FCOE_INDICES); 7170 IXGBE_MAX_FCOE_INDICES);
7164#endif 7171#endif