aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2008-11-20 23:52:10 -0500
committerDavid S. Miller <davem@davemloft.net>2008-11-20 23:52:10 -0500
commit2f90b8657ec942d1880f720e0177ee71df7c8e3c (patch)
tree844114b22c548fedbab67e53b09b2efcf170563a /drivers
parent9db66bdcc83749affe61c61eb8ff3cf08f42afec (diff)
ixgbe: this patch adds support for DCB to the kernel and ixgbe driver
This adds support for Data Center Bridging (DCB) features in the ixgbe driver and adds an rtnetlink interface for configuring DCB to the kernel. The DCB feature support included are Priority Grouping (PG) - which allows bandwidth guarantees to be allocated to groups to traffic based on the 802.1q priority, and Priority Based Flow Control (PFC) - which introduces a new MAC control PAUSE frame which works at granularity of the 802.1p priority instead of the link (IEEE 802.3x). Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/Kconfig10
-rw-r--r--drivers/net/ixgbe/Makefile2
-rw-r--r--drivers/net/ixgbe/ixgbe.h25
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.c332
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.h157
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.c398
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.h94
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c356
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c30
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c200
10 files changed, 1585 insertions, 19 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index afa206590ada..efd461d7c2bb 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2451,6 +2451,16 @@ config IXGBE_DCA
2451 driver. DCA is a method for warming the CPU cache before data 2451 driver. DCA is a method for warming the CPU cache before data
2452 is used, with the intent of lessening the impact of cache misses. 2452 is used, with the intent of lessening the impact of cache misses.
2453 2453
2454config IXGBE_DCBNL
2455 bool "Data Center Bridging (DCB) Support"
2456 default n
2457 depends on IXGBE && DCBNL
2458 ---help---
2459 Say Y here if you want to use Data Center Bridging (DCB) in the
2460 driver.
2461
2462 If unsure, say N.
2463
2454config IXGB 2464config IXGB
2455 tristate "Intel(R) PRO/10GbE support" 2465 tristate "Intel(R) PRO/10GbE support"
2456 depends on PCI 2466 depends on PCI
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index ccd83d9f579e..3228e508e628 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -34,3 +34,5 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
34 34
35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ 35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
36 ixgbe_82598.o ixgbe_phy.o 36 ixgbe_82598.o ixgbe_phy.o
37
38ixgbe-$(CONFIG_IXGBE_DCBNL) += ixgbe_dcb.o ixgbe_dcb_82598.o ixgbe_dcb_nl.o
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 132854f646ba..796f189f3879 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -35,7 +35,7 @@
35 35
36#include "ixgbe_type.h" 36#include "ixgbe_type.h"
37#include "ixgbe_common.h" 37#include "ixgbe_common.h"
38 38#include "ixgbe_dcb.h"
39#ifdef CONFIG_IXGBE_DCA 39#ifdef CONFIG_IXGBE_DCA
40#include <linux/dca.h> 40#include <linux/dca.h>
41#endif 41#endif
@@ -84,6 +84,7 @@
84#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2) 84#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2)
85#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3) 85#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3)
86#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 86#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
87#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
87#define IXGBE_TX_FLAGS_VLAN_SHIFT 16 88#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
88 89
89#define IXGBE_MAX_LRO_DESCRIPTORS 8 90#define IXGBE_MAX_LRO_DESCRIPTORS 8
@@ -134,7 +135,7 @@ struct ixgbe_ring {
134 135
135 u16 reg_idx; /* holds the special value that gets the hardware register 136 u16 reg_idx; /* holds the special value that gets the hardware register
136 * offset associated with this ring, which is different 137 * offset associated with this ring, which is different
137 * for DCE and RSS modes */ 138 * for DCB and RSS modes */
138 139
139#ifdef CONFIG_IXGBE_DCA 140#ifdef CONFIG_IXGBE_DCA
140 /* cpu for tx queue */ 141 /* cpu for tx queue */
@@ -152,8 +153,10 @@ struct ixgbe_ring {
152 u16 rx_buf_len; 153 u16 rx_buf_len;
153}; 154};
154 155
156#define RING_F_DCB 0
155#define RING_F_VMDQ 1 157#define RING_F_VMDQ 1
156#define RING_F_RSS 2 158#define RING_F_RSS 2
159#define IXGBE_MAX_DCB_INDICES 8
157#define IXGBE_MAX_RSS_INDICES 16 160#define IXGBE_MAX_RSS_INDICES 16
158#define IXGBE_MAX_VMDQ_INDICES 16 161#define IXGBE_MAX_VMDQ_INDICES 16
159struct ixgbe_ring_feature { 162struct ixgbe_ring_feature {
@@ -164,6 +167,10 @@ struct ixgbe_ring_feature {
164#define MAX_RX_QUEUES 64 167#define MAX_RX_QUEUES 64
165#define MAX_TX_QUEUES 32 168#define MAX_TX_QUEUES 32
166 169
170#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
171 ? 8 : 1)
172#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
173
167/* MAX_MSIX_Q_VECTORS of these are allocated, 174/* MAX_MSIX_Q_VECTORS of these are allocated,
168 * but we only use one per queue-specific vector. 175 * but we only use one per queue-specific vector.
169 */ 176 */
@@ -215,6 +222,9 @@ struct ixgbe_adapter {
215 struct work_struct reset_task; 222 struct work_struct reset_task;
216 struct ixgbe_q_vector q_vector[MAX_MSIX_Q_VECTORS]; 223 struct ixgbe_q_vector q_vector[MAX_MSIX_Q_VECTORS];
217 char name[MAX_MSIX_COUNT][IFNAMSIZ + 5]; 224 char name[MAX_MSIX_COUNT][IFNAMSIZ + 5];
225 struct ixgbe_dcb_config dcb_cfg;
226 struct ixgbe_dcb_config temp_dcb_cfg;
227 u8 dcb_set_bitmap;
218 228
219 /* Interrupt Throttle Rate */ 229 /* Interrupt Throttle Rate */
220 u32 itr_setting; 230 u32 itr_setting;
@@ -270,6 +280,7 @@ struct ixgbe_adapter {
270#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 20) 280#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 20)
271#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22) 281#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22)
272#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 23) 282#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 23)
283#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 24)
273 284
274/* default to trying for four seconds */ 285/* default to trying for four seconds */
275#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ) 286#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
@@ -313,6 +324,12 @@ enum ixgbe_boards {
313}; 324};
314 325
315extern struct ixgbe_info ixgbe_82598_info; 326extern struct ixgbe_info ixgbe_82598_info;
327#ifdef CONFIG_IXGBE_DCBNL
328extern struct dcbnl_rtnl_ops dcbnl_ops;
329extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
330 struct ixgbe_dcb_config *dst_dcb_cfg,
331 int tc_max);
332#endif
316 333
317extern char ixgbe_driver_name[]; 334extern char ixgbe_driver_name[];
318extern const char ixgbe_driver_version[]; 335extern const char ixgbe_driver_version[];
@@ -327,5 +344,9 @@ extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *)
327extern void ixgbe_free_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); 344extern void ixgbe_free_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
328extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); 345extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
329extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); 346extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
347extern void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter);
348extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
349void ixgbe_napi_add_all(struct ixgbe_adapter *adapter);
350void ixgbe_napi_del_all(struct ixgbe_adapter *adapter);
330 351
331#endif /* _IXGBE_H_ */ 352#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c
new file mode 100644
index 000000000000..e2e28ac63dec
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_dcb.c
@@ -0,0 +1,332 @@
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29
30#include "ixgbe.h"
31#include "ixgbe_type.h"
32#include "ixgbe_dcb.h"
33#include "ixgbe_dcb_82598.h"
34
35/**
36 * ixgbe_dcb_config - Struct containing DCB settings.
37 * @dcb_config: Pointer to DCB config structure
38 *
39 * This function checks DCB rules for DCB settings.
40 * The following rules are checked:
41 * 1. The sum of bandwidth percentages of all Bandwidth Groups must total 100%.
42 * 2. The sum of bandwidth percentages of all Traffic Classes within a Bandwidth
43 * Group must total 100.
44 * 3. A Traffic Class should not be set to both Link Strict Priority
45 * and Group Strict Priority.
46 * 4. Link strict Bandwidth Groups can only have link strict traffic classes
47 * with zero bandwidth.
48 */
49s32 ixgbe_dcb_check_config(struct ixgbe_dcb_config *dcb_config)
50{
51 struct tc_bw_alloc *p;
52 s32 ret_val = 0;
53 u8 i, j, bw = 0, bw_id;
54 u8 bw_sum[2][MAX_BW_GROUP];
55 bool link_strict[2][MAX_BW_GROUP];
56
57 memset(bw_sum, 0, sizeof(bw_sum));
58 memset(link_strict, 0, sizeof(link_strict));
59
60 /* First Tx, then Rx */
61 for (i = 0; i < 2; i++) {
62 /* Check each traffic class for rule violation */
63 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
64 p = &dcb_config->tc_config[j].path[i];
65
66 bw = p->bwg_percent;
67 bw_id = p->bwg_id;
68
69 if (bw_id >= MAX_BW_GROUP) {
70 ret_val = DCB_ERR_CONFIG;
71 goto err_config;
72 }
73 if (p->prio_type == prio_link) {
74 link_strict[i][bw_id] = true;
75 /* Link strict should have zero bandwidth */
76 if (bw) {
77 ret_val = DCB_ERR_LS_BW_NONZERO;
78 goto err_config;
79 }
80 } else if (!bw) {
81 /*
82 * Traffic classes without link strict
83 * should have non-zero bandwidth.
84 */
85 ret_val = DCB_ERR_TC_BW_ZERO;
86 goto err_config;
87 }
88 bw_sum[i][bw_id] += bw;
89 }
90
91 bw = 0;
92
93 /* Check each bandwidth group for rule violation */
94 for (j = 0; j < MAX_BW_GROUP; j++) {
95 bw += dcb_config->bw_percentage[i][j];
96 /*
97 * Sum of bandwidth percentages of all traffic classes
98 * within a Bandwidth Group must total 100 except for
99 * link strict group (zero bandwidth).
100 */
101 if (link_strict[i][j]) {
102 if (bw_sum[i][j]) {
103 /*
104 * Link strict group should have zero
105 * bandwidth.
106 */
107 ret_val = DCB_ERR_LS_BWG_NONZERO;
108 goto err_config;
109 }
110 } else if (bw_sum[i][j] != BW_PERCENT &&
111 bw_sum[i][j] != 0) {
112 ret_val = DCB_ERR_TC_BW;
113 goto err_config;
114 }
115 }
116
117 if (bw != BW_PERCENT) {
118 ret_val = DCB_ERR_BW_GROUP;
119 goto err_config;
120 }
121 }
122
123err_config:
124 return ret_val;
125}
126
127/**
128 * ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits
129 * @ixgbe_dcb_config: Struct containing DCB settings.
130 * @direction: Configuring either Tx or Rx.
131 *
132 * This function calculates the credits allocated to each traffic class.
133 * It should be called only after the rules are checked by
134 * ixgbe_dcb_check_config().
135 */
136s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config,
137 u8 direction)
138{
139 struct tc_bw_alloc *p;
140 s32 ret_val = 0;
141 /* Initialization values default for Tx settings */
142 u32 credit_refill = 0;
143 u32 credit_max = 0;
144 u16 link_percentage = 0;
145 u8 bw_percent = 0;
146 u8 i;
147
148 if (dcb_config == NULL) {
149 ret_val = DCB_ERR_CONFIG;
150 goto out;
151 }
152
153 /* Find out the link percentage for each TC first */
154 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
155 p = &dcb_config->tc_config[i].path[direction];
156 bw_percent = dcb_config->bw_percentage[direction][p->bwg_id];
157
158 link_percentage = p->bwg_percent;
159 /* Must be careful of integer division for very small nums */
160 link_percentage = (link_percentage * bw_percent) / 100;
161 if (p->bwg_percent > 0 && link_percentage == 0)
162 link_percentage = 1;
163
164 /* Save link_percentage for reference */
165 p->link_percent = (u8)link_percentage;
166
167 /* Calculate credit refill and save it */
168 credit_refill = link_percentage * MINIMUM_CREDIT_REFILL;
169 p->data_credits_refill = (u16)credit_refill;
170
171 /* Calculate maximum credit for the TC */
172 credit_max = (link_percentage * MAX_CREDIT) / 100;
173
174 /*
175 * Adjustment based on rule checking, if the percentage
176 * of a TC is too small, the maximum credit may not be
177 * enough to send out a jumbo frame in data plane arbitration.
178 */
179 if (credit_max && (credit_max < MINIMUM_CREDIT_FOR_JUMBO))
180 credit_max = MINIMUM_CREDIT_FOR_JUMBO;
181
182 if (direction == DCB_TX_CONFIG) {
183 /*
184 * Adjustment based on rule checking, if the
185 * percentage of a TC is too small, the maximum
186 * credit may not be enough to send out a TSO
187 * packet in descriptor plane arbitration.
188 */
189 if (credit_max &&
190 (credit_max < MINIMUM_CREDIT_FOR_TSO))
191 credit_max = MINIMUM_CREDIT_FOR_TSO;
192
193 dcb_config->tc_config[i].desc_credits_max =
194 (u16)credit_max;
195 }
196
197 p->data_credits_max = (u16)credit_max;
198 }
199
200out:
201 return ret_val;
202}
203
204/**
205 * ixgbe_dcb_get_tc_stats - Returns status of each traffic class
206 * @hw: pointer to hardware structure
207 * @stats: pointer to statistics structure
208 * @tc_count: Number of elements in bwg_array.
209 *
210 * This function returns the status data for each of the Traffic Classes in use.
211 */
212s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
213 u8 tc_count)
214{
215 s32 ret = 0;
216 if (hw->mac.type == ixgbe_mac_82598EB)
217 ret = ixgbe_dcb_get_tc_stats_82598(hw, stats, tc_count);
218 return ret;
219}
220
221/**
222 * ixgbe_dcb_get_pfc_stats - Returns CBFC status of each traffic class
223 * hw - pointer to hardware structure
224 * stats - pointer to statistics structure
225 * tc_count - Number of elements in bwg_array.
226 *
227 * This function returns the CBFC status data for each of the Traffic Classes.
228 */
229s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
230 u8 tc_count)
231{
232 s32 ret = 0;
233 if (hw->mac.type == ixgbe_mac_82598EB)
234 ret = ixgbe_dcb_get_pfc_stats_82598(hw, stats, tc_count);
235 return ret;
236}
237
238/**
239 * ixgbe_dcb_config_rx_arbiter - Config Rx arbiter
240 * @hw: pointer to hardware structure
241 * @dcb_config: pointer to ixgbe_dcb_config structure
242 *
243 * Configure Rx Data Arbiter and credits for each traffic class.
244 */
245s32 ixgbe_dcb_config_rx_arbiter(struct ixgbe_hw *hw,
246 struct ixgbe_dcb_config *dcb_config)
247{
248 s32 ret = 0;
249 if (hw->mac.type == ixgbe_mac_82598EB)
250 ret = ixgbe_dcb_config_rx_arbiter_82598(hw, dcb_config);
251 return ret;
252}
253
254/**
255 * ixgbe_dcb_config_tx_desc_arbiter - Config Tx Desc arbiter
256 * @hw: pointer to hardware structure
257 * @dcb_config: pointer to ixgbe_dcb_config structure
258 *
259 * Configure Tx Descriptor Arbiter and credits for each traffic class.
260 */
261s32 ixgbe_dcb_config_tx_desc_arbiter(struct ixgbe_hw *hw,
262 struct ixgbe_dcb_config *dcb_config)
263{
264 s32 ret = 0;
265 if (hw->mac.type == ixgbe_mac_82598EB)
266 ret = ixgbe_dcb_config_tx_desc_arbiter_82598(hw, dcb_config);
267 return ret;
268}
269
270/**
271 * ixgbe_dcb_config_tx_data_arbiter - Config Tx data arbiter
272 * @hw: pointer to hardware structure
273 * @dcb_config: pointer to ixgbe_dcb_config structure
274 *
275 * Configure Tx Data Arbiter and credits for each traffic class.
276 */
277s32 ixgbe_dcb_config_tx_data_arbiter(struct ixgbe_hw *hw,
278 struct ixgbe_dcb_config *dcb_config)
279{
280 s32 ret = 0;
281 if (hw->mac.type == ixgbe_mac_82598EB)
282 ret = ixgbe_dcb_config_tx_data_arbiter_82598(hw, dcb_config);
283 return ret;
284}
285
286/**
287 * ixgbe_dcb_config_pfc - Config priority flow control
288 * @hw: pointer to hardware structure
289 * @dcb_config: pointer to ixgbe_dcb_config structure
290 *
291 * Configure Priority Flow Control for each traffic class.
292 */
293s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *hw,
294 struct ixgbe_dcb_config *dcb_config)
295{
296 s32 ret = 0;
297 if (hw->mac.type == ixgbe_mac_82598EB)
298 ret = ixgbe_dcb_config_pfc_82598(hw, dcb_config);
299 return ret;
300}
301
302/**
303 * ixgbe_dcb_config_tc_stats - Config traffic class statistics
304 * @hw: pointer to hardware structure
305 *
306 * Configure queue statistics registers, all queues belonging to same traffic
307 * class uses a single set of queue statistics counters.
308 */
309s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *hw)
310{
311 s32 ret = 0;
312 if (hw->mac.type == ixgbe_mac_82598EB)
313 ret = ixgbe_dcb_config_tc_stats_82598(hw);
314 return ret;
315}
316
317/**
318 * ixgbe_dcb_hw_config - Config and enable DCB
319 * @hw: pointer to hardware structure
320 * @dcb_config: pointer to ixgbe_dcb_config structure
321 *
322 * Configure dcb settings and enable dcb mode.
323 */
324s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
325 struct ixgbe_dcb_config *dcb_config)
326{
327 s32 ret = 0;
328 if (hw->mac.type == ixgbe_mac_82598EB)
329 ret = ixgbe_dcb_hw_config_82598(hw, dcb_config);
330 return ret;
331}
332
diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h
new file mode 100644
index 000000000000..62dfd243bedc
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_dcb.h
@@ -0,0 +1,157 @@
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#ifndef _DCB_CONFIG_H_
30#define _DCB_CONFIG_H_
31
32#include "ixgbe_type.h"
33
34/* DCB data structures */
35
36#define IXGBE_MAX_PACKET_BUFFERS 8
37#define MAX_USER_PRIORITY 8
38#define MAX_TRAFFIC_CLASS 8
39#define MAX_BW_GROUP 8
40#define BW_PERCENT 100
41
42#define DCB_TX_CONFIG 0
43#define DCB_RX_CONFIG 1
44
45/* DCB error Codes */
46#define DCB_SUCCESS 0
47#define DCB_ERR_CONFIG -1
48#define DCB_ERR_PARAM -2
49
50/* Transmit and receive Errors */
51/* Error in bandwidth group allocation */
52#define DCB_ERR_BW_GROUP -3
53/* Error in traffic class bandwidth allocation */
54#define DCB_ERR_TC_BW -4
55/* Traffic class has both link strict and group strict enabled */
56#define DCB_ERR_LS_GS -5
57/* Link strict traffic class has non zero bandwidth */
58#define DCB_ERR_LS_BW_NONZERO -6
59/* Link strict bandwidth group has non zero bandwidth */
60#define DCB_ERR_LS_BWG_NONZERO -7
61/* Traffic class has zero bandwidth */
62#define DCB_ERR_TC_BW_ZERO -8
63
64#define DCB_NOT_IMPLEMENTED 0x7FFFFFFF
65
66struct dcb_pfc_tc_debug {
67 u8 tc;
68 u8 pause_status;
69 u64 pause_quanta;
70};
71
72enum strict_prio_type {
73 prio_none = 0,
74 prio_group,
75 prio_link
76};
77
78/* Traffic class bandwidth allocation per direction */
79struct tc_bw_alloc {
80 u8 bwg_id; /* Bandwidth Group (BWG) ID */
81 u8 bwg_percent; /* % of BWG's bandwidth */
82 u8 link_percent; /* % of link bandwidth */
83 u8 up_to_tc_bitmap; /* User Priority to Traffic Class mapping */
84 u16 data_credits_refill; /* Credit refill amount in 64B granularity */
85 u16 data_credits_max; /* Max credits for a configured packet buffer
86 * in 64B granularity.*/
87 enum strict_prio_type prio_type; /* Link or Group Strict Priority */
88};
89
90enum dcb_pfc_type {
91 pfc_disabled = 0,
92 pfc_enabled_full,
93 pfc_enabled_tx,
94 pfc_enabled_rx
95};
96
97/* Traffic class configuration */
98struct tc_configuration {
99 struct tc_bw_alloc path[2]; /* One each for Tx/Rx */
100 enum dcb_pfc_type dcb_pfc; /* Class based flow control setting */
101
102 u16 desc_credits_max; /* For Tx Descriptor arbitration */
103 u8 tc; /* Traffic class (TC) */
104};
105
106enum dcb_rx_pba_cfg {
107 pba_equal, /* PBA[0-7] each use 64KB FIFO */
108 pba_80_48 /* PBA[0-3] each use 80KB, PBA[4-7] each use 48KB */
109};
110
111struct ixgbe_dcb_config {
112 struct tc_configuration tc_config[MAX_TRAFFIC_CLASS];
113 u8 bw_percentage[2][MAX_BW_GROUP]; /* One each for Tx/Rx */
114
115 bool round_robin_enable;
116
117 enum dcb_rx_pba_cfg rx_pba_cfg;
118
119 u32 dcb_cfg_version; /* Not used...OS-specific? */
120 u32 link_speed; /* For bandwidth allocation validation purpose */
121};
122
123/* DCB driver APIs */
124
125/* DCB rule checking function.*/
126s32 ixgbe_dcb_check_config(struct ixgbe_dcb_config *config);
127
128/* DCB credits calculation */
129s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *, u8);
130
131/* DCB PFC functions */
132s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *, struct ixgbe_dcb_config *g);
133s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8);
134
135/* DCB traffic class stats */
136s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *);
137s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8);
138
139/* DCB config arbiters */
140s32 ixgbe_dcb_config_tx_desc_arbiter(struct ixgbe_hw *,
141 struct ixgbe_dcb_config *);
142s32 ixgbe_dcb_config_tx_data_arbiter(struct ixgbe_hw *,
143 struct ixgbe_dcb_config *);
144s32 ixgbe_dcb_config_rx_arbiter(struct ixgbe_hw *, struct ixgbe_dcb_config *);
145
146/* DCB hw initialization */
147s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
148
149/* DCB definitions for credit calculation */
150#define MAX_CREDIT_REFILL 511 /* 0x1FF * 64B = 32704B */
151#define MINIMUM_CREDIT_REFILL 5 /* 5*64B = 320B */
152#define MINIMUM_CREDIT_FOR_JUMBO 145 /* 145= UpperBound((9*1024+54)/64B) for 9KB jumbo frame */
153#define DCB_MAX_TSO_SIZE (32*1024) /* MAX TSO packet size supported in DCB mode */
154#define MINIMUM_CREDIT_FOR_TSO (DCB_MAX_TSO_SIZE/64 + 1) /* 513 for 32KB TSO packet */
155#define MAX_CREDIT 4095 /* Maximum credit supported: 256KB * 1204 / 64B */
156
157#endif /* _DCB_CONFIG_H */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ixgbe/ixgbe_dcb_82598.c
new file mode 100644
index 000000000000..fce6867a4517
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c
@@ -0,0 +1,398 @@
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#include "ixgbe.h"
30#include "ixgbe_type.h"
31#include "ixgbe_dcb.h"
32#include "ixgbe_dcb_82598.h"
33
34/**
35 * ixgbe_dcb_get_tc_stats_82598 - Return status data for each traffic class
36 * @hw: pointer to hardware structure
37 * @stats: pointer to statistics structure
38 * @tc_count: Number of elements in bwg_array.
39 *
40 * This function returns the status data for each of the Traffic Classes in use.
41 */
42s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *hw,
43 struct ixgbe_hw_stats *stats,
44 u8 tc_count)
45{
46 int tc;
47
48 if (tc_count > MAX_TRAFFIC_CLASS)
49 return DCB_ERR_PARAM;
50
51 /* Statistics pertaining to each traffic class */
52 for (tc = 0; tc < tc_count; tc++) {
53 /* Transmitted Packets */
54 stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc));
55 /* Transmitted Bytes */
56 stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC(tc));
57 /* Received Packets */
58 stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc));
59 /* Received Bytes */
60 stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC(tc));
61 }
62
63 return 0;
64}
65
66/**
67 * ixgbe_dcb_get_pfc_stats_82598 - Returns CBFC status data
68 * @hw: pointer to hardware structure
69 * @stats: pointer to statistics structure
70 * @tc_count: Number of elements in bwg_array.
71 *
72 * This function returns the CBFC status data for each of the Traffic Classes.
73 */
74s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *hw,
75 struct ixgbe_hw_stats *stats,
76 u8 tc_count)
77{
78 int tc;
79
80 if (tc_count > MAX_TRAFFIC_CLASS)
81 return DCB_ERR_PARAM;
82
83 for (tc = 0; tc < tc_count; tc++) {
84 /* Priority XOFF Transmitted */
85 stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc));
86 /* Priority XOFF Received */
87 stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(tc));
88 }
89
90 return 0;
91}
92
93/**
94 * ixgbe_dcb_config_packet_buffers_82598 - Configure packet buffers
95 * @hw: pointer to hardware structure
96 * @dcb_config: pointer to ixgbe_dcb_config structure
97 *
98 * Configure packet buffers for DCB mode.
99 */
100s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw,
101 struct ixgbe_dcb_config *dcb_config)
102{
103 s32 ret_val = 0;
104 u32 value = IXGBE_RXPBSIZE_64KB;
105 u8 i = 0;
106
107 /* Setup Rx packet buffer sizes */
108 switch (dcb_config->rx_pba_cfg) {
109 case pba_80_48:
110 /* Setup the first four at 80KB */
111 value = IXGBE_RXPBSIZE_80KB;
112 for (; i < 4; i++)
113 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), value);
114 /* Setup the last four at 48KB...don't re-init i */
115 value = IXGBE_RXPBSIZE_48KB;
116 /* Fall Through */
117 case pba_equal:
118 default:
119 for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
120 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), value);
121
122 /* Setup Tx packet buffer sizes */
123 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
124 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i),
125 IXGBE_TXPBSIZE_40KB);
126 }
127 break;
128 }
129
130 return ret_val;
131}
132
133/**
134 * ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter
135 * @hw: pointer to hardware structure
136 * @dcb_config: pointer to ixgbe_dcb_config structure
137 *
138 * Configure Rx Data Arbiter and credits for each traffic class.
139 */
140s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
141 struct ixgbe_dcb_config *dcb_config)
142{
143 struct tc_bw_alloc *p;
144 u32 reg = 0;
145 u32 credit_refill = 0;
146 u32 credit_max = 0;
147 u8 i = 0;
148
149 reg = IXGBE_READ_REG(hw, IXGBE_RUPPBMR) | IXGBE_RUPPBMR_MQA;
150 IXGBE_WRITE_REG(hw, IXGBE_RUPPBMR, reg);
151
152 reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
153 /* Enable Arbiter */
154 reg &= ~IXGBE_RMCS_ARBDIS;
155 /* Enable Receive Recycle within the BWG */
156 reg |= IXGBE_RMCS_RRM;
157 /* Enable Deficit Fixed Priority arbitration*/
158 reg |= IXGBE_RMCS_DFP;
159
160 IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
161
162 /* Configure traffic class credits and priority */
163 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
164 p = &dcb_config->tc_config[i].path[DCB_RX_CONFIG];
165 credit_refill = p->data_credits_refill;
166 credit_max = p->data_credits_max;
167
168 reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT);
169
170 if (p->prio_type == prio_link)
171 reg |= IXGBE_RT2CR_LSP;
172
173 IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg);
174 }
175
176 reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
177 reg |= IXGBE_RDRXCTL_RDMTS_1_2;
178 reg |= IXGBE_RDRXCTL_MPBEN;
179 reg |= IXGBE_RDRXCTL_MCEN;
180 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
181
182 reg = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
183 /* Make sure there is enough descriptors before arbitration */
184 reg &= ~IXGBE_RXCTRL_DMBYPS;
185 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg);
186
187 return 0;
188}
189
190/**
191 * ixgbe_dcb_config_tx_desc_arbiter_82598 - Config Tx Desc. arbiter
192 * @hw: pointer to hardware structure
193 * @dcb_config: pointer to ixgbe_dcb_config structure
194 *
195 * Configure Tx Descriptor Arbiter and credits for each traffic class.
196 */
197s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
198 struct ixgbe_dcb_config *dcb_config)
199{
200 struct tc_bw_alloc *p;
201 u32 reg, max_credits;
202 u8 i;
203
204 reg = IXGBE_READ_REG(hw, IXGBE_DPMCS);
205
206 /* Enable arbiter */
207 reg &= ~IXGBE_DPMCS_ARBDIS;
208 if (!(dcb_config->round_robin_enable)) {
209 /* Enable DFP and Recycle mode */
210 reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM);
211 }
212 reg |= IXGBE_DPMCS_TSOEF;
213 /* Configure Max TSO packet size 34KB including payload and headers */
214 reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT);
215
216 IXGBE_WRITE_REG(hw, IXGBE_DPMCS, reg);
217
218 /* Configure traffic class credits and priority */
219 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
220 p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
221 max_credits = dcb_config->tc_config[i].desc_credits_max;
222 reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT;
223 reg |= p->data_credits_refill;
224 reg |= (u32)(p->bwg_id) << IXGBE_TDTQ2TCCR_BWG_SHIFT;
225
226 if (p->prio_type == prio_group)
227 reg |= IXGBE_TDTQ2TCCR_GSP;
228
229 if (p->prio_type == prio_link)
230 reg |= IXGBE_TDTQ2TCCR_LSP;
231
232 IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg);
233 }
234
235 return 0;
236}
237
238/**
239 * ixgbe_dcb_config_tx_data_arbiter_82598 - Config Tx data arbiter
240 * @hw: pointer to hardware structure
241 * @dcb_config: pointer to ixgbe_dcb_config structure
242 *
243 * Configure Tx Data Arbiter and credits for each traffic class.
244 */
245s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
246 struct ixgbe_dcb_config *dcb_config)
247{
248 struct tc_bw_alloc *p;
249 u32 reg;
250 u8 i;
251
252 reg = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
253 /* Enable Data Plane Arbiter */
254 reg &= ~IXGBE_PDPMCS_ARBDIS;
255 /* Enable DFP and Transmit Recycle Mode */
256 reg |= (IXGBE_PDPMCS_TPPAC | IXGBE_PDPMCS_TRM);
257
258 IXGBE_WRITE_REG(hw, IXGBE_PDPMCS, reg);
259
260 /* Configure traffic class credits and priority */
261 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
262 p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
263 reg = p->data_credits_refill;
264 reg |= (u32)(p->data_credits_max) << IXGBE_TDPT2TCCR_MCL_SHIFT;
265 reg |= (u32)(p->bwg_id) << IXGBE_TDPT2TCCR_BWG_SHIFT;
266
267 if (p->prio_type == prio_group)
268 reg |= IXGBE_TDPT2TCCR_GSP;
269
270 if (p->prio_type == prio_link)
271 reg |= IXGBE_TDPT2TCCR_LSP;
272
273 IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg);
274 }
275
276 /* Enable Tx packet buffer division */
277 reg = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
278 reg |= IXGBE_DTXCTL_ENDBUBD;
279 IXGBE_WRITE_REG(hw, IXGBE_DTXCTL, reg);
280
281 return 0;
282}
283
284/**
285 * ixgbe_dcb_config_pfc_82598 - Config priority flow control
286 * @hw: pointer to hardware structure
287 * @dcb_config: pointer to ixgbe_dcb_config structure
288 *
289 * Configure Priority Flow Control for each traffic class.
290 */
291s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw,
292 struct ixgbe_dcb_config *dcb_config)
293{
294 u32 reg, rx_pba_size;
295 u8 i;
296
297 /* Enable Transmit Priority Flow Control */
298 reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
299 reg &= ~IXGBE_RMCS_TFCE_802_3X;
300 /* correct the reporting of our flow control status */
301 hw->fc.type = ixgbe_fc_none;
302 reg |= IXGBE_RMCS_TFCE_PRIORITY;
303 IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
304
305 /* Enable Receive Priority Flow Control */
306 reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
307 reg &= ~IXGBE_FCTRL_RFCE;
308 reg |= IXGBE_FCTRL_RPFCE;
309 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
310
311 /*
312 * Configure flow control thresholds and enable priority flow control
313 * for each traffic class.
314 */
315 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
316 if (dcb_config->rx_pba_cfg == pba_equal) {
317 rx_pba_size = IXGBE_RXPBSIZE_64KB;
318 } else {
319 rx_pba_size = (i < 4) ? IXGBE_RXPBSIZE_80KB
320 : IXGBE_RXPBSIZE_48KB;
321 }
322
323 reg = ((rx_pba_size >> 5) & 0xFFF0);
324 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx ||
325 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full)
326 reg |= IXGBE_FCRTL_XONE;
327
328 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg);
329
330 reg = ((rx_pba_size >> 2) & 0xFFF0);
331 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx ||
332 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full)
333 reg |= IXGBE_FCRTH_FCEN;
334
335 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
336 }
337
338 /* Configure pause time */
339 for (i = 0; i < (MAX_TRAFFIC_CLASS >> 1); i++)
340 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), 0x68006800);
341
342 /* Configure flow control refresh threshold value */
343 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, 0x3400);
344
345 return 0;
346}
347
348/**
349 * ixgbe_dcb_config_tc_stats_82598 - Configure traffic class statistics
350 * @hw: pointer to hardware structure
351 *
352 * Configure queue statistics registers, all queues belonging to same traffic
353 * class uses a single set of queue statistics counters.
354 */
355s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
356{
357 u32 reg = 0;
358 u8 i = 0;
359 u8 j = 0;
360
361 /* Receive Queues stats setting - 8 queues per statistics reg */
362 for (i = 0, j = 0; i < 15 && j < 8; i = i + 2, j++) {
363 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i));
364 reg |= ((0x1010101) * j);
365 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
366 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i + 1));
367 reg |= ((0x1010101) * j);
368 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i + 1), reg);
369 }
370 /* Transmit Queues stats setting - 4 queues per statistics reg */
371 for (i = 0; i < 8; i++) {
372 reg = IXGBE_READ_REG(hw, IXGBE_TQSMR(i));
373 reg |= ((0x1010101) * i);
374 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i), reg);
375 }
376
377 return 0;
378}
379
380/**
381 * ixgbe_dcb_hw_config_82598 - Config and enable DCB
382 * @hw: pointer to hardware structure
383 * @dcb_config: pointer to ixgbe_dcb_config structure
384 *
385 * Configure dcb settings and enable dcb mode.
386 */
387s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw,
388 struct ixgbe_dcb_config *dcb_config)
389{
390 ixgbe_dcb_config_packet_buffers_82598(hw, dcb_config);
391 ixgbe_dcb_config_rx_arbiter_82598(hw, dcb_config);
392 ixgbe_dcb_config_tx_desc_arbiter_82598(hw, dcb_config);
393 ixgbe_dcb_config_tx_data_arbiter_82598(hw, dcb_config);
394 ixgbe_dcb_config_pfc_82598(hw, dcb_config);
395 ixgbe_dcb_config_tc_stats_82598(hw);
396
397 return 0;
398}
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ixgbe/ixgbe_dcb_82598.h
new file mode 100644
index 000000000000..1e6a313719d7
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.h
@@ -0,0 +1,94 @@
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#ifndef _DCB_82598_CONFIG_H_
30#define _DCB_82598_CONFIG_H_
31
32/* DCB register definitions */
33
34#define IXGBE_DPMCS_MTSOS_SHIFT 16
35#define IXGBE_DPMCS_TDPAC 0x00000001 /* 0 Round Robin, 1 DFP - Deficit Fixed Priority */
36#define IXGBE_DPMCS_TRM 0x00000010 /* Transmit Recycle Mode */
37#define IXGBE_DPMCS_ARBDIS 0x00000040 /* DCB arbiter disable */
38#define IXGBE_DPMCS_TSOEF 0x00080000 /* TSO Expand Factor: 0=x4, 1=x2 */
39
40#define IXGBE_RUPPBMR_MQA 0x80000000 /* Enable UP to queue mapping */
41
42#define IXGBE_RT2CR_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */
43#define IXGBE_RT2CR_LSP 0x80000000 /* LSP enable bit */
44
45#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet buffers enable */
46#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores (RSS) enable */
47
48#define IXGBE_TDTQ2TCCR_MCL_SHIFT 12
49#define IXGBE_TDTQ2TCCR_BWG_SHIFT 9
50#define IXGBE_TDTQ2TCCR_GSP 0x40000000
51#define IXGBE_TDTQ2TCCR_LSP 0x80000000
52
53#define IXGBE_TDPT2TCCR_MCL_SHIFT 12
54#define IXGBE_TDPT2TCCR_BWG_SHIFT 9
55#define IXGBE_TDPT2TCCR_GSP 0x40000000
56#define IXGBE_TDPT2TCCR_LSP 0x80000000
57
58#define IXGBE_PDPMCS_TPPAC 0x00000020 /* 0 Round Robin, 1 for DFP - Deficit Fixed Priority */
59#define IXGBE_PDPMCS_ARBDIS 0x00000040 /* Arbiter disable */
60#define IXGBE_PDPMCS_TRM 0x00000100 /* Transmit Recycle Mode enable */
61
62#define IXGBE_DTXCTL_ENDBUBD 0x00000004 /* Enable DBU buffer division */
63
64#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */
65#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */
66#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */
67#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */
68
69#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000
70
71/* DCB hardware-specific driver APIs */
72
73/* DCB PFC functions */
74s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, struct ixgbe_dcb_config *);
75s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *, struct ixgbe_hw_stats *,
76 u8);
77
78/* DCB traffic class stats */
79s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *);
80s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *, struct ixgbe_hw_stats *,
81 u8);
82
83/* DCB config arbiters */
84s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *,
85 struct ixgbe_dcb_config *);
86s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *,
87 struct ixgbe_dcb_config *);
88s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *,
89 struct ixgbe_dcb_config *);
90
91/* DCB hw initialization */
92s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *, struct ixgbe_dcb_config *);
93
94#endif /* _DCB_82598_CONFIG_H */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
new file mode 100644
index 000000000000..50bff2af6b04
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -0,0 +1,356 @@
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2008 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#include "ixgbe.h"
30#include <linux/dcbnl.h>
31
32/* Callbacks for DCB netlink in the kernel */
33#define BIT_DCB_MODE 0x01
34#define BIT_PFC 0x02
35#define BIT_PG_RX 0x04
36#define BIT_PG_TX 0x08
37
38int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
39 struct ixgbe_dcb_config *dst_dcb_cfg, int tc_max)
40{
41 struct tc_configuration *src_tc_cfg = NULL;
42 struct tc_configuration *dst_tc_cfg = NULL;
43 int i;
44
45 if (!src_dcb_cfg || !dst_dcb_cfg)
46 return -EINVAL;
47
48 for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) {
49 src_tc_cfg = &src_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0];
50 dst_tc_cfg = &dst_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0];
51
52 dst_tc_cfg->path[DCB_TX_CONFIG].prio_type =
53 src_tc_cfg->path[DCB_TX_CONFIG].prio_type;
54
55 dst_tc_cfg->path[DCB_TX_CONFIG].bwg_id =
56 src_tc_cfg->path[DCB_TX_CONFIG].bwg_id;
57
58 dst_tc_cfg->path[DCB_TX_CONFIG].bwg_percent =
59 src_tc_cfg->path[DCB_TX_CONFIG].bwg_percent;
60
61 dst_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap =
62 src_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap;
63
64 dst_tc_cfg->path[DCB_RX_CONFIG].prio_type =
65 src_tc_cfg->path[DCB_RX_CONFIG].prio_type;
66
67 dst_tc_cfg->path[DCB_RX_CONFIG].bwg_id =
68 src_tc_cfg->path[DCB_RX_CONFIG].bwg_id;
69
70 dst_tc_cfg->path[DCB_RX_CONFIG].bwg_percent =
71 src_tc_cfg->path[DCB_RX_CONFIG].bwg_percent;
72
73 dst_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap =
74 src_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap;
75 }
76
77 for (i = DCB_PG_ATTR_BW_ID_0; i < DCB_PG_ATTR_BW_ID_MAX; i++) {
78 dst_dcb_cfg->bw_percentage[DCB_TX_CONFIG]
79 [i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage
80 [DCB_TX_CONFIG][i-DCB_PG_ATTR_BW_ID_0];
81 dst_dcb_cfg->bw_percentage[DCB_RX_CONFIG]
82 [i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage
83 [DCB_RX_CONFIG][i-DCB_PG_ATTR_BW_ID_0];
84 }
85
86 for (i = DCB_PFC_UP_ATTR_0; i < DCB_PFC_UP_ATTR_MAX; i++) {
87 dst_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc =
88 src_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc;
89 }
90
91 return 0;
92}
93
94static u8 ixgbe_dcbnl_get_state(struct net_device *netdev)
95{
96 struct ixgbe_adapter *adapter = netdev_priv(netdev);
97
98 DPRINTK(DRV, INFO, "Get DCB Admin Mode.\n");
99
100 return !!(adapter->flags & IXGBE_FLAG_DCB_ENABLED);
101}
102
103static u16 ixgbe_dcb_select_queue(struct net_device *dev, struct sk_buff *skb)
104{
105 /* All traffic should default to class 0 */
106 return 0;
107}
108
109static void ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
110{
111 struct ixgbe_adapter *adapter = netdev_priv(netdev);
112
113 DPRINTK(DRV, INFO, "Set DCB Admin Mode.\n");
114
115 if (state > 0) {
116 /* Turn on DCB */
117 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
118 return;
119 } else {
120 if (netif_running(netdev))
121 netdev->stop(netdev);
122 ixgbe_reset_interrupt_capability(adapter);
123 ixgbe_napi_del_all(adapter);
124 kfree(adapter->tx_ring);
125 kfree(adapter->rx_ring);
126 adapter->tx_ring = NULL;
127 adapter->rx_ring = NULL;
128 netdev->select_queue = &ixgbe_dcb_select_queue;
129
130 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
131 adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
132 ixgbe_init_interrupt_scheme(adapter);
133 ixgbe_napi_add_all(adapter);
134 if (netif_running(netdev))
135 netdev->open(netdev);
136 }
137 } else {
138 /* Turn off DCB */
139 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
140 if (netif_running(netdev))
141 netdev->stop(netdev);
142 ixgbe_reset_interrupt_capability(adapter);
143 ixgbe_napi_del_all(adapter);
144 kfree(adapter->tx_ring);
145 kfree(adapter->rx_ring);
146 adapter->tx_ring = NULL;
147 adapter->rx_ring = NULL;
148 netdev->select_queue = NULL;
149
150 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
151 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
152 ixgbe_init_interrupt_scheme(adapter);
153 ixgbe_napi_add_all(adapter);
154 if (netif_running(netdev))
155 netdev->open(netdev);
156 } else {
157 return;
158 }
159 }
160}
161
162static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
163 u8 *perm_addr)
164{
165 struct ixgbe_adapter *adapter = netdev_priv(netdev);
166 int i;
167
168 for (i = 0; i < netdev->addr_len; i++)
169 perm_addr[i] = adapter->hw.mac.perm_addr[i];
170}
171
172static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
173 u8 prio, u8 bwg_id, u8 bw_pct,
174 u8 up_map)
175{
176 struct ixgbe_adapter *adapter = netdev_priv(netdev);
177
178 if (prio != DCB_ATTR_VALUE_UNDEFINED)
179 adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = prio;
180 if (bwg_id != DCB_ATTR_VALUE_UNDEFINED)
181 adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id = bwg_id;
182 if (bw_pct != DCB_ATTR_VALUE_UNDEFINED)
183 adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent =
184 bw_pct;
185 if (up_map != DCB_ATTR_VALUE_UNDEFINED)
186 adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap =
187 up_map;
188
189 if ((adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type !=
190 adapter->dcb_cfg.tc_config[tc].path[0].prio_type) ||
191 (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id !=
192 adapter->dcb_cfg.tc_config[tc].path[0].bwg_id) ||
193 (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent !=
194 adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent) ||
195 (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
196 adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap))
197 adapter->dcb_set_bitmap |= BIT_PG_TX;
198}
199
200static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
201 u8 bw_pct)
202{
203 struct ixgbe_adapter *adapter = netdev_priv(netdev);
204
205 adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct;
206
207 if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] !=
208 adapter->dcb_cfg.bw_percentage[0][bwg_id])
209 adapter->dcb_set_bitmap |= BIT_PG_RX;
210}
211
212static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
213 u8 prio, u8 bwg_id, u8 bw_pct,
214 u8 up_map)
215{
216 struct ixgbe_adapter *adapter = netdev_priv(netdev);
217
218 if (prio != DCB_ATTR_VALUE_UNDEFINED)
219 adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = prio;
220 if (bwg_id != DCB_ATTR_VALUE_UNDEFINED)
221 adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id = bwg_id;
222 if (bw_pct != DCB_ATTR_VALUE_UNDEFINED)
223 adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent =
224 bw_pct;
225 if (up_map != DCB_ATTR_VALUE_UNDEFINED)
226 adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap =
227 up_map;
228
229 if ((adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type !=
230 adapter->dcb_cfg.tc_config[tc].path[1].prio_type) ||
231 (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id !=
232 adapter->dcb_cfg.tc_config[tc].path[1].bwg_id) ||
233 (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent !=
234 adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent) ||
235 (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap !=
236 adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap))
237 adapter->dcb_set_bitmap |= BIT_PG_RX;
238}
239
240static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
241 u8 bw_pct)
242{
243 struct ixgbe_adapter *adapter = netdev_priv(netdev);
244
245 adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct;
246
247 if (adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] !=
248 adapter->dcb_cfg.bw_percentage[1][bwg_id])
249 adapter->dcb_set_bitmap |= BIT_PG_RX;
250}
251
252static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
253 u8 *prio, u8 *bwg_id, u8 *bw_pct,
254 u8 *up_map)
255{
256 struct ixgbe_adapter *adapter = netdev_priv(netdev);
257
258 *prio = adapter->dcb_cfg.tc_config[tc].path[0].prio_type;
259 *bwg_id = adapter->dcb_cfg.tc_config[tc].path[0].bwg_id;
260 *bw_pct = adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent;
261 *up_map = adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap;
262}
263
264static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
265 u8 *bw_pct)
266{
267 struct ixgbe_adapter *adapter = netdev_priv(netdev);
268
269 *bw_pct = adapter->dcb_cfg.bw_percentage[0][bwg_id];
270}
271
272static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc,
273 u8 *prio, u8 *bwg_id, u8 *bw_pct,
274 u8 *up_map)
275{
276 struct ixgbe_adapter *adapter = netdev_priv(netdev);
277
278 *prio = adapter->dcb_cfg.tc_config[tc].path[1].prio_type;
279 *bwg_id = adapter->dcb_cfg.tc_config[tc].path[1].bwg_id;
280 *bw_pct = adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent;
281 *up_map = adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap;
282}
283
284static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
285 u8 *bw_pct)
286{
287 struct ixgbe_adapter *adapter = netdev_priv(netdev);
288
289 *bw_pct = adapter->dcb_cfg.bw_percentage[1][bwg_id];
290}
291
292static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
293 u8 setting)
294{
295 struct ixgbe_adapter *adapter = netdev_priv(netdev);
296
297 adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = setting;
298 if (adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc !=
299 adapter->dcb_cfg.tc_config[priority].dcb_pfc)
300 adapter->dcb_set_bitmap |= BIT_PFC;
301}
302
303static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
304 u8 *setting)
305{
306 struct ixgbe_adapter *adapter = netdev_priv(netdev);
307
308 *setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc;
309}
310
311static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
312{
313 struct ixgbe_adapter *adapter = netdev_priv(netdev);
314 int ret;
315
316 if (!adapter->dcb_set_bitmap)
317 return 1;
318
319 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
320 msleep(1);
321
322 if (netif_running(netdev))
323 ixgbe_down(adapter);
324
325 ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
326 adapter->ring_feature[RING_F_DCB].indices);
327 if (ret) {
328 clear_bit(__IXGBE_RESETTING, &adapter->state);
329 return ret;
330 }
331
332 if (netif_running(netdev))
333 ixgbe_up(adapter);
334
335 adapter->dcb_set_bitmap = 0x00;
336 clear_bit(__IXGBE_RESETTING, &adapter->state);
337 return ret;
338}
339
340struct dcbnl_rtnl_ops dcbnl_ops = {
341 .getstate = ixgbe_dcbnl_get_state,
342 .setstate = ixgbe_dcbnl_set_state,
343 .getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr,
344 .setpgtccfgtx = ixgbe_dcbnl_set_pg_tc_cfg_tx,
345 .setpgbwgcfgtx = ixgbe_dcbnl_set_pg_bwg_cfg_tx,
346 .setpgtccfgrx = ixgbe_dcbnl_set_pg_tc_cfg_rx,
347 .setpgbwgcfgrx = ixgbe_dcbnl_set_pg_bwg_cfg_rx,
348 .getpgtccfgtx = ixgbe_dcbnl_get_pg_tc_cfg_tx,
349 .getpgbwgcfgtx = ixgbe_dcbnl_get_pg_bwg_cfg_tx,
350 .getpgtccfgrx = ixgbe_dcbnl_get_pg_tc_cfg_rx,
351 .getpgbwgcfgrx = ixgbe_dcbnl_get_pg_bwg_cfg_rx,
352 .setpfccfg = ixgbe_dcbnl_set_pfc_cfg,
353 .getpfccfg = ixgbe_dcbnl_get_pfc_cfg,
354 .setall = ixgbe_dcbnl_set_all
355};
356
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index a610016a0172..aaa4404e7c5f 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -97,9 +97,18 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
97 ((((struct ixgbe_adapter *)netdev_priv(netdev))->num_tx_queues + \ 97 ((((struct ixgbe_adapter *)netdev_priv(netdev))->num_tx_queues + \
98 ((struct ixgbe_adapter *)netdev_priv(netdev))->num_rx_queues) * \ 98 ((struct ixgbe_adapter *)netdev_priv(netdev))->num_rx_queues) * \
99 (sizeof(struct ixgbe_queue_stats) / sizeof(u64))) 99 (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
100#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
101#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) 100#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
102#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN) 101#define IXGBE_PB_STATS_LEN ( \
102 (((struct ixgbe_adapter *)netdev->priv)->flags & \
103 IXGBE_FLAG_DCB_ENABLED) ? \
104 (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
105 sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
106 sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
107 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
108 / sizeof(u64) : 0)
109#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
110 IXGBE_PB_STATS_LEN + \
111 IXGBE_QUEUE_STATS_LEN)
103 112
104static int ixgbe_get_settings(struct net_device *netdev, 113static int ixgbe_get_settings(struct net_device *netdev,
105 struct ethtool_cmd *ecmd) 114 struct ethtool_cmd *ecmd)
@@ -831,6 +840,16 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
831 data[i + k] = queue_stat[k]; 840 data[i + k] = queue_stat[k];
832 i += k; 841 i += k;
833 } 842 }
843 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
844 for (j = 0; j < MAX_TX_PACKET_BUFFERS; j++) {
845 data[i++] = adapter->stats.pxontxc[j];
846 data[i++] = adapter->stats.pxofftxc[j];
847 }
848 for (j = 0; j < MAX_RX_PACKET_BUFFERS; j++) {
849 data[i++] = adapter->stats.pxonrxc[j];
850 data[i++] = adapter->stats.pxoffrxc[j];
851 }
852 }
834} 853}
835 854
836static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, 855static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
@@ -859,6 +878,13 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
859 sprintf(p, "rx_queue_%u_bytes", i); 878 sprintf(p, "rx_queue_%u_bytes", i);
860 p += ETH_GSTRING_LEN; 879 p += ETH_GSTRING_LEN;
861 } 880 }
881 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
882 for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
883 sprintf(p, "tx_pb_%u_pxon", i);
884 }
885 for (i = 0; i < MAX_RX_PACKET_BUFFERS; i++) {
886 }
887 }
862 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ 888 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
863 break; 889 break;
864 } 890 }
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 40108523377f..91dde9cdab66 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -404,7 +404,7 @@ static void ixgbe_receive_skb(struct ixgbe_adapter *adapter,
404 404
405 if (adapter->netdev->features & NETIF_F_LRO && 405 if (adapter->netdev->features & NETIF_F_LRO &&
406 skb->ip_summed == CHECKSUM_UNNECESSARY) { 406 skb->ip_summed == CHECKSUM_UNNECESSARY) {
407 if (adapter->vlgrp && is_vlan) 407 if (adapter->vlgrp && is_vlan && (tag != 0))
408 lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb, 408 lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb,
409 adapter->vlgrp, tag, 409 adapter->vlgrp, tag,
410 rx_desc); 410 rx_desc);
@@ -413,12 +413,12 @@ static void ixgbe_receive_skb(struct ixgbe_adapter *adapter,
413 ring->lro_used = true; 413 ring->lro_used = true;
414 } else { 414 } else {
415 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) { 415 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
416 if (adapter->vlgrp && is_vlan) 416 if (adapter->vlgrp && is_vlan && (tag != 0))
417 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, tag); 417 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, tag);
418 else 418 else
419 netif_receive_skb(skb); 419 netif_receive_skb(skb);
420 } else { 420 } else {
421 if (adapter->vlgrp && is_vlan) 421 if (adapter->vlgrp && is_vlan && (tag != 0))
422 vlan_hwaccel_rx(skb, adapter->vlgrp, tag); 422 vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
423 else 423 else
424 netif_rx(skb); 424 netif_rx(skb);
@@ -1670,10 +1670,12 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1670 * effects of setting this bit are only that SRRCTL must be 1670 * effects of setting this bit are only that SRRCTL must be
1671 * fully programmed [0..15] 1671 * fully programmed [0..15]
1672 */ 1672 */
1673 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 1673 if (adapter->flags &
1674 rdrxctl |= IXGBE_RDRXCTL_MVMEN; 1674 (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_VMDQ_ENABLED)) {
1675 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); 1675 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1676 1676 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
1677 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
1678 }
1677 1679
1678 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { 1680 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
1679 /* Fill out redirection table */ 1681 /* Fill out redirection table */
@@ -1732,6 +1734,16 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
1732 ixgbe_irq_disable(adapter); 1734 ixgbe_irq_disable(adapter);
1733 adapter->vlgrp = grp; 1735 adapter->vlgrp = grp;
1734 1736
1737 /*
1738 * For a DCB driver, always enable VLAN tag stripping so we can
1739 * still receive traffic from a DCB-enabled host even if we're
1740 * not in DCB mode.
1741 */
1742 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
1743 ctrl |= IXGBE_VLNCTRL_VME;
1744 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1745 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
1746
1735 if (grp) { 1747 if (grp) {
1736 /* enable VLAN tag insert/strip */ 1748 /* enable VLAN tag insert/strip */
1737 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL); 1749 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
@@ -1896,6 +1908,44 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
1896 } 1908 }
1897} 1909}
1898 1910
1911#ifdef CONFIG_IXGBE_DCBNL
1912/*
1913 * ixgbe_configure_dcb - Configure DCB hardware
1914 * @adapter: ixgbe adapter struct
1915 *
1916 * This is called by the driver on open to configure the DCB hardware.
1917 * This is also called by the gennetlink interface when reconfiguring
1918 * the DCB state.
1919 */
1920static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
1921{
1922 struct ixgbe_hw *hw = &adapter->hw;
1923 u32 txdctl, vlnctrl;
1924 int i, j;
1925
1926 ixgbe_dcb_check_config(&adapter->dcb_cfg);
1927 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG);
1928 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG);
1929
1930 /* reconfigure the hardware */
1931 ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
1932
1933 for (i = 0; i < adapter->num_tx_queues; i++) {
1934 j = adapter->tx_ring[i].reg_idx;
1935 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
1936 /* PThresh workaround for Tx hang with DFP enabled. */
1937 txdctl |= 32;
1938 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
1939 }
1940 /* Enable VLAN tag insert/strip */
1941 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1942 vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
1943 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
1944 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1945 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
1946}
1947
1948#endif
1899static void ixgbe_configure(struct ixgbe_adapter *adapter) 1949static void ixgbe_configure(struct ixgbe_adapter *adapter)
1900{ 1950{
1901 struct net_device *netdev = adapter->netdev; 1951 struct net_device *netdev = adapter->netdev;
@@ -1904,6 +1954,16 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
1904 ixgbe_set_rx_mode(netdev); 1954 ixgbe_set_rx_mode(netdev);
1905 1955
1906 ixgbe_restore_vlan(adapter); 1956 ixgbe_restore_vlan(adapter);
1957#ifdef CONFIG_IXGBE_DCBNL
1958 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
1959 netif_set_gso_max_size(netdev, 32768);
1960 ixgbe_configure_dcb(adapter);
1961 } else {
1962 netif_set_gso_max_size(netdev, 65536);
1963 }
1964#else
1965 netif_set_gso_max_size(netdev, 65536);
1966#endif
1907 1967
1908 ixgbe_configure_tx(adapter); 1968 ixgbe_configure_tx(adapter);
1909 ixgbe_configure_rx(adapter); 1969 ixgbe_configure_rx(adapter);
@@ -1995,9 +2055,6 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
1995 2055
1996 ixgbe_irq_enable(adapter); 2056 ixgbe_irq_enable(adapter);
1997 2057
1998 /* enable transmits */
1999 netif_tx_start_all_queues(netdev);
2000
2001 /* bring the link up in the watchdog, this could race with our first 2058 /* bring the link up in the watchdog, this could race with our first
2002 * link up interrupt but shouldn't be a problem */ 2059 * link up interrupt but shouldn't be a problem */
2003 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 2060 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
@@ -2260,6 +2317,11 @@ static void ixgbe_reset_task(struct work_struct *work)
2260 struct ixgbe_adapter *adapter; 2317 struct ixgbe_adapter *adapter;
2261 adapter = container_of(work, struct ixgbe_adapter, reset_task); 2318 adapter = container_of(work, struct ixgbe_adapter, reset_task);
2262 2319
2320 /* If we're already down or resetting, just bail */
2321 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
2322 test_bit(__IXGBE_RESETTING, &adapter->state))
2323 return;
2324
2263 adapter->tx_timeout_count++; 2325 adapter->tx_timeout_count++;
2264 2326
2265 ixgbe_reinit_locked(adapter); 2327 ixgbe_reinit_locked(adapter);
@@ -2269,15 +2331,31 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
2269{ 2331{
2270 int nrq = 1, ntq = 1; 2332 int nrq = 1, ntq = 1;
2271 int feature_mask = 0, rss_i, rss_m; 2333 int feature_mask = 0, rss_i, rss_m;
2334 int dcb_i, dcb_m;
2272 2335
2273 /* Number of supported queues */ 2336 /* Number of supported queues */
2274 switch (adapter->hw.mac.type) { 2337 switch (adapter->hw.mac.type) {
2275 case ixgbe_mac_82598EB: 2338 case ixgbe_mac_82598EB:
2339 dcb_i = adapter->ring_feature[RING_F_DCB].indices;
2340 dcb_m = 0;
2276 rss_i = adapter->ring_feature[RING_F_RSS].indices; 2341 rss_i = adapter->ring_feature[RING_F_RSS].indices;
2277 rss_m = 0; 2342 rss_m = 0;
2278 feature_mask |= IXGBE_FLAG_RSS_ENABLED; 2343 feature_mask |= IXGBE_FLAG_RSS_ENABLED;
2344 feature_mask |= IXGBE_FLAG_DCB_ENABLED;
2279 2345
2280 switch (adapter->flags & feature_mask) { 2346 switch (adapter->flags & feature_mask) {
2347 case (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_DCB_ENABLED):
2348 dcb_m = 0x7 << 3;
2349 rss_i = min(8, rss_i);
2350 rss_m = 0x7;
2351 nrq = dcb_i * rss_i;
2352 ntq = min(MAX_TX_QUEUES, dcb_i * rss_i);
2353 break;
2354 case (IXGBE_FLAG_DCB_ENABLED):
2355 dcb_m = 0x7 << 3;
2356 nrq = dcb_i;
2357 ntq = dcb_i;
2358 break;
2281 case (IXGBE_FLAG_RSS_ENABLED): 2359 case (IXGBE_FLAG_RSS_ENABLED):
2282 rss_m = 0xF; 2360 rss_m = 0xF;
2283 nrq = rss_i; 2361 nrq = rss_i;
@@ -2285,6 +2363,8 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
2285 break; 2363 break;
2286 case 0: 2364 case 0:
2287 default: 2365 default:
2366 dcb_i = 0;
2367 dcb_m = 0;
2288 rss_i = 0; 2368 rss_i = 0;
2289 rss_m = 0; 2369 rss_m = 0;
2290 nrq = 1; 2370 nrq = 1;
@@ -2292,6 +2372,12 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
2292 break; 2372 break;
2293 } 2373 }
2294 2374
2375 /* Sanity check, we should never have zero queues */
2376 nrq = (nrq ?:1);
2377 ntq = (ntq ?:1);
2378
2379 adapter->ring_feature[RING_F_DCB].indices = dcb_i;
2380 adapter->ring_feature[RING_F_DCB].mask = dcb_m;
2295 adapter->ring_feature[RING_F_RSS].indices = rss_i; 2381 adapter->ring_feature[RING_F_RSS].indices = rss_i;
2296 adapter->ring_feature[RING_F_RSS].mask = rss_m; 2382 adapter->ring_feature[RING_F_RSS].mask = rss_m;
2297 break; 2383 break;
@@ -2343,6 +2429,7 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
2343 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 2429 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2344 kfree(adapter->msix_entries); 2430 kfree(adapter->msix_entries);
2345 adapter->msix_entries = NULL; 2431 adapter->msix_entries = NULL;
2432 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
2346 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; 2433 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
2347 ixgbe_set_num_queues(adapter); 2434 ixgbe_set_num_queues(adapter);
2348 } else { 2435 } else {
@@ -2362,15 +2449,42 @@ static void __devinit ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
2362{ 2449{
2363 int feature_mask = 0, rss_i; 2450 int feature_mask = 0, rss_i;
2364 int i, txr_idx, rxr_idx; 2451 int i, txr_idx, rxr_idx;
2452 int dcb_i;
2365 2453
2366 /* Number of supported queues */ 2454 /* Number of supported queues */
2367 switch (adapter->hw.mac.type) { 2455 switch (adapter->hw.mac.type) {
2368 case ixgbe_mac_82598EB: 2456 case ixgbe_mac_82598EB:
2457 dcb_i = adapter->ring_feature[RING_F_DCB].indices;
2369 rss_i = adapter->ring_feature[RING_F_RSS].indices; 2458 rss_i = adapter->ring_feature[RING_F_RSS].indices;
2370 txr_idx = 0; 2459 txr_idx = 0;
2371 rxr_idx = 0; 2460 rxr_idx = 0;
2461 feature_mask |= IXGBE_FLAG_DCB_ENABLED;
2372 feature_mask |= IXGBE_FLAG_RSS_ENABLED; 2462 feature_mask |= IXGBE_FLAG_RSS_ENABLED;
2373 switch (adapter->flags & feature_mask) { 2463 switch (adapter->flags & feature_mask) {
2464 case (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_DCB_ENABLED):
2465 for (i = 0; i < dcb_i; i++) {
2466 int j;
2467 /* Rx first */
2468 for (j = 0; j < adapter->num_rx_queues; j++) {
2469 adapter->rx_ring[rxr_idx].reg_idx =
2470 i << 3 | j;
2471 rxr_idx++;
2472 }
2473 /* Tx now */
2474 for (j = 0; j < adapter->num_tx_queues; j++) {
2475 adapter->tx_ring[txr_idx].reg_idx =
2476 i << 2 | (j >> 1);
2477 if (j & 1)
2478 txr_idx++;
2479 }
2480 }
2481 case (IXGBE_FLAG_DCB_ENABLED):
2482 /* the number of queues is assumed to be symmetric */
2483 for (i = 0; i < dcb_i; i++) {
2484 adapter->rx_ring[i].reg_idx = i << 3;
2485 adapter->tx_ring[i].reg_idx = i << 2;
2486 }
2487 break;
2374 case (IXGBE_FLAG_RSS_ENABLED): 2488 case (IXGBE_FLAG_RSS_ENABLED):
2375 for (i = 0; i < adapter->num_rx_queues; i++) 2489 for (i = 0; i < adapter->num_rx_queues; i++)
2376 adapter->rx_ring[i].reg_idx = i; 2490 adapter->rx_ring[i].reg_idx = i;
@@ -2395,7 +2509,7 @@ static void __devinit ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
2395 * number of queues at compile-time. The polling_netdev array is 2509 * number of queues at compile-time. The polling_netdev array is
2396 * intended for Multiqueue, but should work fine with a single queue. 2510 * intended for Multiqueue, but should work fine with a single queue.
2397 **/ 2511 **/
2398static int __devinit ixgbe_alloc_queues(struct ixgbe_adapter *adapter) 2512static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
2399{ 2513{
2400 int i; 2514 int i;
2401 2515
@@ -2465,6 +2579,7 @@ static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter
2465 adapter->msix_entries = kcalloc(v_budget, 2579 adapter->msix_entries = kcalloc(v_budget,
2466 sizeof(struct msix_entry), GFP_KERNEL); 2580 sizeof(struct msix_entry), GFP_KERNEL);
2467 if (!adapter->msix_entries) { 2581 if (!adapter->msix_entries) {
2582 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
2468 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; 2583 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
2469 ixgbe_set_num_queues(adapter); 2584 ixgbe_set_num_queues(adapter);
2470 kfree(adapter->tx_ring); 2585 kfree(adapter->tx_ring);
@@ -2505,7 +2620,7 @@ out:
2505 return err; 2620 return err;
2506} 2621}
2507 2622
2508static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) 2623void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
2509{ 2624{
2510 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 2625 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2511 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 2626 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
@@ -2529,7 +2644,7 @@ static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
2529 * - Hardware queue count (num_*_queues) 2644 * - Hardware queue count (num_*_queues)
2530 * - defined by miscellaneous hardware support/features (RSS, etc.) 2645 * - defined by miscellaneous hardware support/features (RSS, etc.)
2531 **/ 2646 **/
2532static int __devinit ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) 2647int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
2533{ 2648{
2534 int err; 2649 int err;
2535 2650
@@ -2577,6 +2692,10 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
2577 struct ixgbe_hw *hw = &adapter->hw; 2692 struct ixgbe_hw *hw = &adapter->hw;
2578 struct pci_dev *pdev = adapter->pdev; 2693 struct pci_dev *pdev = adapter->pdev;
2579 unsigned int rss; 2694 unsigned int rss;
2695#ifdef CONFIG_IXGBE_DCBNL
2696 int j;
2697 struct tc_configuration *tc;
2698#endif
2580 2699
2581 /* PCI config space info */ 2700 /* PCI config space info */
2582 2701
@@ -2590,6 +2709,27 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
2590 rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus()); 2709 rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
2591 adapter->ring_feature[RING_F_RSS].indices = rss; 2710 adapter->ring_feature[RING_F_RSS].indices = rss;
2592 adapter->flags |= IXGBE_FLAG_RSS_ENABLED; 2711 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
2712 adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
2713
2714#ifdef CONFIG_IXGBE_DCBNL
2715 /* Configure DCB traffic classes */
2716 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
2717 tc = &adapter->dcb_cfg.tc_config[j];
2718 tc->path[DCB_TX_CONFIG].bwg_id = 0;
2719 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
2720 tc->path[DCB_RX_CONFIG].bwg_id = 0;
2721 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
2722 tc->dcb_pfc = pfc_disabled;
2723 }
2724 adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
2725 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
2726 adapter->dcb_cfg.rx_pba_cfg = pba_equal;
2727 adapter->dcb_cfg.round_robin_enable = false;
2728 adapter->dcb_set_bitmap = 0x00;
2729 ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
2730 adapter->ring_feature[RING_F_DCB].indices);
2731
2732#endif
2593 if (hw->mac.ops.get_media_type && 2733 if (hw->mac.ops.get_media_type &&
2594 (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) 2734 (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper))
2595 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; 2735 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
@@ -2967,7 +3107,7 @@ static int ixgbe_close(struct net_device *netdev)
2967 * @adapter: private struct 3107 * @adapter: private struct
2968 * helper function to napi_add each possible q_vector->napi 3108 * helper function to napi_add each possible q_vector->napi
2969 */ 3109 */
2970static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter) 3110void ixgbe_napi_add_all(struct ixgbe_adapter *adapter)
2971{ 3111{
2972 int q_idx, q_vectors; 3112 int q_idx, q_vectors;
2973 int (*poll)(struct napi_struct *, int); 3113 int (*poll)(struct napi_struct *, int);
@@ -2988,7 +3128,7 @@ static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter)
2988 } 3128 }
2989} 3129}
2990 3130
2991static void ixgbe_napi_del_all(struct ixgbe_adapter *adapter) 3131void ixgbe_napi_del_all(struct ixgbe_adapter *adapter)
2992{ 3132{
2993 int q_idx; 3133 int q_idx;
2994 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 3134 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
@@ -3109,6 +3249,18 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
3109 adapter->stats.mpc[i] += mpc; 3249 adapter->stats.mpc[i] += mpc;
3110 total_mpc += adapter->stats.mpc[i]; 3250 total_mpc += adapter->stats.mpc[i];
3111 adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i)); 3251 adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
3252 adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
3253 adapter->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
3254 adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
3255 adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
3256 adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw,
3257 IXGBE_PXONRXC(i));
3258 adapter->stats.pxontxc[i] += IXGBE_READ_REG(hw,
3259 IXGBE_PXONTXC(i));
3260 adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw,
3261 IXGBE_PXOFFRXC(i));
3262 adapter->stats.pxofftxc[i] += IXGBE_READ_REG(hw,
3263 IXGBE_PXOFFTXC(i));
3112 } 3264 }
3113 adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); 3265 adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
3114 /* work around hardware counting issue */ 3266 /* work around hardware counting issue */
@@ -3248,6 +3400,7 @@ static void ixgbe_watchdog_task(struct work_struct *work)
3248 (FLOW_TX ? "TX" : "None")))); 3400 (FLOW_TX ? "TX" : "None"))));
3249 3401
3250 netif_carrier_on(netdev); 3402 netif_carrier_on(netdev);
3403 netif_tx_wake_all_queues(netdev);
3251 } else { 3404 } else {
3252 /* Force detection of hung controller */ 3405 /* Force detection of hung controller */
3253 adapter->detect_tx_hung = true; 3406 adapter->detect_tx_hung = true;
@@ -3258,6 +3411,7 @@ static void ixgbe_watchdog_task(struct work_struct *work)
3258 if (netif_carrier_ok(netdev)) { 3411 if (netif_carrier_ok(netdev)) {
3259 DPRINTK(LINK, INFO, "NIC Link is Down\n"); 3412 DPRINTK(LINK, INFO, "NIC Link is Down\n");
3260 netif_carrier_off(netdev); 3413 netif_carrier_off(netdev);
3414 netif_tx_stop_all_queues(netdev);
3261 } 3415 }
3262 } 3416 }
3263 3417
@@ -3604,6 +3758,14 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3604 3758
3605 if (adapter->vlgrp && vlan_tx_tag_present(skb)) { 3759 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
3606 tx_flags |= vlan_tx_tag_get(skb); 3760 tx_flags |= vlan_tx_tag_get(skb);
3761 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
3762 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
3763 tx_flags |= (skb->queue_mapping << 13);
3764 }
3765 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3766 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3767 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
3768 tx_flags |= (skb->queue_mapping << 13);
3607 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 3769 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3608 tx_flags |= IXGBE_TX_FLAGS_VLAN; 3770 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3609 } 3771 }
@@ -3878,6 +4040,13 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3878 netdev->vlan_features |= NETIF_F_IP_CSUM; 4040 netdev->vlan_features |= NETIF_F_IP_CSUM;
3879 netdev->vlan_features |= NETIF_F_SG; 4041 netdev->vlan_features |= NETIF_F_SG;
3880 4042
4043 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
4044 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
4045
4046#ifdef CONFIG_IXGBE_DCBNL
4047 netdev->dcbnl_ops = &dcbnl_ops;
4048#endif
4049
3881 if (pci_using_dac) 4050 if (pci_using_dac)
3882 netdev->features |= NETIF_F_HIGHDMA; 4051 netdev->features |= NETIF_F_HIGHDMA;
3883 4052
@@ -3946,6 +4115,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3946 } 4115 }
3947 4116
3948 netif_carrier_off(netdev); 4117 netif_carrier_off(netdev);
4118 netif_tx_stop_all_queues(netdev);
3949 4119
3950 ixgbe_napi_add_all(adapter); 4120 ixgbe_napi_add_all(adapter);
3951 4121