aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/Kconfig10
-rw-r--r--drivers/net/ixgbe/Makefile2
-rw-r--r--drivers/net/ixgbe/ixgbe.h25
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.c332
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.h157
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.c398
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.h94
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c356
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c30
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c200
-rw-r--r--include/linux/dcbnl.h230
-rw-r--r--include/linux/netdevice.h8
-rw-r--r--include/linux/rtnetlink.h5
-rw-r--r--include/net/dcbnl.h44
-rw-r--r--net/Kconfig1
-rw-r--r--net/Makefile3
-rw-r--r--net/dcb/Kconfig12
-rw-r--r--net/dcb/Makefile1
-rw-r--r--net/dcb/dcbnl.c704
19 files changed, 2593 insertions, 19 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index afa206590ada..efd461d7c2bb 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2451,6 +2451,16 @@ config IXGBE_DCA
2451 driver. DCA is a method for warming the CPU cache before data 2451 driver. DCA is a method for warming the CPU cache before data
2452 is used, with the intent of lessening the impact of cache misses. 2452 is used, with the intent of lessening the impact of cache misses.
2453 2453
2454config IXGBE_DCBNL
2455 bool "Data Center Bridging (DCB) Support"
2456 default n
2457 depends on IXGBE && DCBNL
2458 ---help---
2459 Say Y here if you want to use Data Center Bridging (DCB) in the
2460 driver.
2461
2462 If unsure, say N.
2463
2454config IXGB 2464config IXGB
2455 tristate "Intel(R) PRO/10GbE support" 2465 tristate "Intel(R) PRO/10GbE support"
2456 depends on PCI 2466 depends on PCI
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index ccd83d9f579e..3228e508e628 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -34,3 +34,5 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
34 34
35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ 35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
36 ixgbe_82598.o ixgbe_phy.o 36 ixgbe_82598.o ixgbe_phy.o
37
38ixgbe-$(CONFIG_IXGBE_DCBNL) += ixgbe_dcb.o ixgbe_dcb_82598.o ixgbe_dcb_nl.o
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 132854f646ba..796f189f3879 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -35,7 +35,7 @@
35 35
36#include "ixgbe_type.h" 36#include "ixgbe_type.h"
37#include "ixgbe_common.h" 37#include "ixgbe_common.h"
38 38#include "ixgbe_dcb.h"
39#ifdef CONFIG_IXGBE_DCA 39#ifdef CONFIG_IXGBE_DCA
40#include <linux/dca.h> 40#include <linux/dca.h>
41#endif 41#endif
@@ -84,6 +84,7 @@
84#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2) 84#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2)
85#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3) 85#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3)
86#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 86#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
87#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
87#define IXGBE_TX_FLAGS_VLAN_SHIFT 16 88#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
88 89
89#define IXGBE_MAX_LRO_DESCRIPTORS 8 90#define IXGBE_MAX_LRO_DESCRIPTORS 8
@@ -134,7 +135,7 @@ struct ixgbe_ring {
134 135
135 u16 reg_idx; /* holds the special value that gets the hardware register 136 u16 reg_idx; /* holds the special value that gets the hardware register
136 * offset associated with this ring, which is different 137 * offset associated with this ring, which is different
137 * for DCE and RSS modes */ 138 * for DCB and RSS modes */
138 139
139#ifdef CONFIG_IXGBE_DCA 140#ifdef CONFIG_IXGBE_DCA
140 /* cpu for tx queue */ 141 /* cpu for tx queue */
@@ -152,8 +153,10 @@ struct ixgbe_ring {
152 u16 rx_buf_len; 153 u16 rx_buf_len;
153}; 154};
154 155
156#define RING_F_DCB 0
155#define RING_F_VMDQ 1 157#define RING_F_VMDQ 1
156#define RING_F_RSS 2 158#define RING_F_RSS 2
159#define IXGBE_MAX_DCB_INDICES 8
157#define IXGBE_MAX_RSS_INDICES 16 160#define IXGBE_MAX_RSS_INDICES 16
158#define IXGBE_MAX_VMDQ_INDICES 16 161#define IXGBE_MAX_VMDQ_INDICES 16
159struct ixgbe_ring_feature { 162struct ixgbe_ring_feature {
@@ -164,6 +167,10 @@ struct ixgbe_ring_feature {
164#define MAX_RX_QUEUES 64 167#define MAX_RX_QUEUES 64
165#define MAX_TX_QUEUES 32 168#define MAX_TX_QUEUES 32
166 169
170#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
171 ? 8 : 1)
172#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
173
167/* MAX_MSIX_Q_VECTORS of these are allocated, 174/* MAX_MSIX_Q_VECTORS of these are allocated,
168 * but we only use one per queue-specific vector. 175 * but we only use one per queue-specific vector.
169 */ 176 */
@@ -215,6 +222,9 @@ struct ixgbe_adapter {
215 struct work_struct reset_task; 222 struct work_struct reset_task;
216 struct ixgbe_q_vector q_vector[MAX_MSIX_Q_VECTORS]; 223 struct ixgbe_q_vector q_vector[MAX_MSIX_Q_VECTORS];
217 char name[MAX_MSIX_COUNT][IFNAMSIZ + 5]; 224 char name[MAX_MSIX_COUNT][IFNAMSIZ + 5];
225 struct ixgbe_dcb_config dcb_cfg;
226 struct ixgbe_dcb_config temp_dcb_cfg;
227 u8 dcb_set_bitmap;
218 228
219 /* Interrupt Throttle Rate */ 229 /* Interrupt Throttle Rate */
220 u32 itr_setting; 230 u32 itr_setting;
@@ -270,6 +280,7 @@ struct ixgbe_adapter {
270#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 20) 280#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 20)
271#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22) 281#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22)
272#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 23) 282#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 23)
283#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 24)
273 284
274/* default to trying for four seconds */ 285/* default to trying for four seconds */
275#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ) 286#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
@@ -313,6 +324,12 @@ enum ixgbe_boards {
313}; 324};
314 325
315extern struct ixgbe_info ixgbe_82598_info; 326extern struct ixgbe_info ixgbe_82598_info;
327#ifdef CONFIG_IXGBE_DCBNL
328extern struct dcbnl_rtnl_ops dcbnl_ops;
329extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
330 struct ixgbe_dcb_config *dst_dcb_cfg,
331 int tc_max);
332#endif
316 333
317extern char ixgbe_driver_name[]; 334extern char ixgbe_driver_name[];
318extern const char ixgbe_driver_version[]; 335extern const char ixgbe_driver_version[];
@@ -327,5 +344,9 @@ extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *)
327extern void ixgbe_free_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); 344extern void ixgbe_free_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
328extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); 345extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
329extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); 346extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
347extern void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter);
348extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
349void ixgbe_napi_add_all(struct ixgbe_adapter *adapter);
350void ixgbe_napi_del_all(struct ixgbe_adapter *adapter);
330 351
331#endif /* _IXGBE_H_ */ 352#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c
new file mode 100644
index 000000000000..e2e28ac63dec
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_dcb.c
@@ -0,0 +1,332 @@
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29
30#include "ixgbe.h"
31#include "ixgbe_type.h"
32#include "ixgbe_dcb.h"
33#include "ixgbe_dcb_82598.h"
34
35/**
36 * ixgbe_dcb_config - Struct containing DCB settings.
37 * @dcb_config: Pointer to DCB config structure
38 *
39 * This function checks DCB rules for DCB settings.
40 * The following rules are checked:
41 * 1. The sum of bandwidth percentages of all Bandwidth Groups must total 100%.
42 * 2. The sum of bandwidth percentages of all Traffic Classes within a Bandwidth
43 * Group must total 100.
44 * 3. A Traffic Class should not be set to both Link Strict Priority
45 * and Group Strict Priority.
46 * 4. Link strict Bandwidth Groups can only have link strict traffic classes
47 * with zero bandwidth.
48 */
49s32 ixgbe_dcb_check_config(struct ixgbe_dcb_config *dcb_config)
50{
51 struct tc_bw_alloc *p;
52 s32 ret_val = 0;
53 u8 i, j, bw = 0, bw_id;
54 u8 bw_sum[2][MAX_BW_GROUP];
55 bool link_strict[2][MAX_BW_GROUP];
56
57 memset(bw_sum, 0, sizeof(bw_sum));
58 memset(link_strict, 0, sizeof(link_strict));
59
60 /* First Tx, then Rx */
61 for (i = 0; i < 2; i++) {
62 /* Check each traffic class for rule violation */
63 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
64 p = &dcb_config->tc_config[j].path[i];
65
66 bw = p->bwg_percent;
67 bw_id = p->bwg_id;
68
69 if (bw_id >= MAX_BW_GROUP) {
70 ret_val = DCB_ERR_CONFIG;
71 goto err_config;
72 }
73 if (p->prio_type == prio_link) {
74 link_strict[i][bw_id] = true;
75 /* Link strict should have zero bandwidth */
76 if (bw) {
77 ret_val = DCB_ERR_LS_BW_NONZERO;
78 goto err_config;
79 }
80 } else if (!bw) {
81 /*
82 * Traffic classes without link strict
83 * should have non-zero bandwidth.
84 */
85 ret_val = DCB_ERR_TC_BW_ZERO;
86 goto err_config;
87 }
88 bw_sum[i][bw_id] += bw;
89 }
90
91 bw = 0;
92
93 /* Check each bandwidth group for rule violation */
94 for (j = 0; j < MAX_BW_GROUP; j++) {
95 bw += dcb_config->bw_percentage[i][j];
96 /*
97 * Sum of bandwidth percentages of all traffic classes
98 * within a Bandwidth Group must total 100 except for
99 * link strict group (zero bandwidth).
100 */
101 if (link_strict[i][j]) {
102 if (bw_sum[i][j]) {
103 /*
104 * Link strict group should have zero
105 * bandwidth.
106 */
107 ret_val = DCB_ERR_LS_BWG_NONZERO;
108 goto err_config;
109 }
110 } else if (bw_sum[i][j] != BW_PERCENT &&
111 bw_sum[i][j] != 0) {
112 ret_val = DCB_ERR_TC_BW;
113 goto err_config;
114 }
115 }
116
117 if (bw != BW_PERCENT) {
118 ret_val = DCB_ERR_BW_GROUP;
119 goto err_config;
120 }
121 }
122
123err_config:
124 return ret_val;
125}
126
127/**
128 * ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits
129 * @ixgbe_dcb_config: Struct containing DCB settings.
130 * @direction: Configuring either Tx or Rx.
131 *
132 * This function calculates the credits allocated to each traffic class.
133 * It should be called only after the rules are checked by
134 * ixgbe_dcb_check_config().
135 */
136s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config,
137 u8 direction)
138{
139 struct tc_bw_alloc *p;
140 s32 ret_val = 0;
141 /* Initialization values default for Tx settings */
142 u32 credit_refill = 0;
143 u32 credit_max = 0;
144 u16 link_percentage = 0;
145 u8 bw_percent = 0;
146 u8 i;
147
148 if (dcb_config == NULL) {
149 ret_val = DCB_ERR_CONFIG;
150 goto out;
151 }
152
153 /* Find out the link percentage for each TC first */
154 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
155 p = &dcb_config->tc_config[i].path[direction];
156 bw_percent = dcb_config->bw_percentage[direction][p->bwg_id];
157
158 link_percentage = p->bwg_percent;
159 /* Must be careful of integer division for very small nums */
160 link_percentage = (link_percentage * bw_percent) / 100;
161 if (p->bwg_percent > 0 && link_percentage == 0)
162 link_percentage = 1;
163
164 /* Save link_percentage for reference */
165 p->link_percent = (u8)link_percentage;
166
167 /* Calculate credit refill and save it */
168 credit_refill = link_percentage * MINIMUM_CREDIT_REFILL;
169 p->data_credits_refill = (u16)credit_refill;
170
171 /* Calculate maximum credit for the TC */
172 credit_max = (link_percentage * MAX_CREDIT) / 100;
173
174 /*
175 * Adjustment based on rule checking, if the percentage
176 * of a TC is too small, the maximum credit may not be
177 * enough to send out a jumbo frame in data plane arbitration.
178 */
179 if (credit_max && (credit_max < MINIMUM_CREDIT_FOR_JUMBO))
180 credit_max = MINIMUM_CREDIT_FOR_JUMBO;
181
182 if (direction == DCB_TX_CONFIG) {
183 /*
184 * Adjustment based on rule checking, if the
185 * percentage of a TC is too small, the maximum
186 * credit may not be enough to send out a TSO
187 * packet in descriptor plane arbitration.
188 */
189 if (credit_max &&
190 (credit_max < MINIMUM_CREDIT_FOR_TSO))
191 credit_max = MINIMUM_CREDIT_FOR_TSO;
192
193 dcb_config->tc_config[i].desc_credits_max =
194 (u16)credit_max;
195 }
196
197 p->data_credits_max = (u16)credit_max;
198 }
199
200out:
201 return ret_val;
202}
203
204/**
205 * ixgbe_dcb_get_tc_stats - Returns status of each traffic class
206 * @hw: pointer to hardware structure
207 * @stats: pointer to statistics structure
208 * @tc_count: Number of elements in bwg_array.
209 *
210 * This function returns the status data for each of the Traffic Classes in use.
211 */
212s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
213 u8 tc_count)
214{
215 s32 ret = 0;
216 if (hw->mac.type == ixgbe_mac_82598EB)
217 ret = ixgbe_dcb_get_tc_stats_82598(hw, stats, tc_count);
218 return ret;
219}
220
221/**
222 * ixgbe_dcb_get_pfc_stats - Returns CBFC status of each traffic class
223 * hw - pointer to hardware structure
224 * stats - pointer to statistics structure
225 * tc_count - Number of elements in bwg_array.
226 *
227 * This function returns the CBFC status data for each of the Traffic Classes.
228 */
229s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
230 u8 tc_count)
231{
232 s32 ret = 0;
233 if (hw->mac.type == ixgbe_mac_82598EB)
234 ret = ixgbe_dcb_get_pfc_stats_82598(hw, stats, tc_count);
235 return ret;
236}
237
238/**
239 * ixgbe_dcb_config_rx_arbiter - Config Rx arbiter
240 * @hw: pointer to hardware structure
241 * @dcb_config: pointer to ixgbe_dcb_config structure
242 *
243 * Configure Rx Data Arbiter and credits for each traffic class.
244 */
245s32 ixgbe_dcb_config_rx_arbiter(struct ixgbe_hw *hw,
246 struct ixgbe_dcb_config *dcb_config)
247{
248 s32 ret = 0;
249 if (hw->mac.type == ixgbe_mac_82598EB)
250 ret = ixgbe_dcb_config_rx_arbiter_82598(hw, dcb_config);
251 return ret;
252}
253
254/**
255 * ixgbe_dcb_config_tx_desc_arbiter - Config Tx Desc arbiter
256 * @hw: pointer to hardware structure
257 * @dcb_config: pointer to ixgbe_dcb_config structure
258 *
259 * Configure Tx Descriptor Arbiter and credits for each traffic class.
260 */
261s32 ixgbe_dcb_config_tx_desc_arbiter(struct ixgbe_hw *hw,
262 struct ixgbe_dcb_config *dcb_config)
263{
264 s32 ret = 0;
265 if (hw->mac.type == ixgbe_mac_82598EB)
266 ret = ixgbe_dcb_config_tx_desc_arbiter_82598(hw, dcb_config);
267 return ret;
268}
269
270/**
271 * ixgbe_dcb_config_tx_data_arbiter - Config Tx data arbiter
272 * @hw: pointer to hardware structure
273 * @dcb_config: pointer to ixgbe_dcb_config structure
274 *
275 * Configure Tx Data Arbiter and credits for each traffic class.
276 */
277s32 ixgbe_dcb_config_tx_data_arbiter(struct ixgbe_hw *hw,
278 struct ixgbe_dcb_config *dcb_config)
279{
280 s32 ret = 0;
281 if (hw->mac.type == ixgbe_mac_82598EB)
282 ret = ixgbe_dcb_config_tx_data_arbiter_82598(hw, dcb_config);
283 return ret;
284}
285
286/**
287 * ixgbe_dcb_config_pfc - Config priority flow control
288 * @hw: pointer to hardware structure
289 * @dcb_config: pointer to ixgbe_dcb_config structure
290 *
291 * Configure Priority Flow Control for each traffic class.
292 */
293s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *hw,
294 struct ixgbe_dcb_config *dcb_config)
295{
296 s32 ret = 0;
297 if (hw->mac.type == ixgbe_mac_82598EB)
298 ret = ixgbe_dcb_config_pfc_82598(hw, dcb_config);
299 return ret;
300}
301
302/**
303 * ixgbe_dcb_config_tc_stats - Config traffic class statistics
304 * @hw: pointer to hardware structure
305 *
306 * Configure queue statistics registers, all queues belonging to same traffic
307 * class uses a single set of queue statistics counters.
308 */
309s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *hw)
310{
311 s32 ret = 0;
312 if (hw->mac.type == ixgbe_mac_82598EB)
313 ret = ixgbe_dcb_config_tc_stats_82598(hw);
314 return ret;
315}
316
317/**
318 * ixgbe_dcb_hw_config - Config and enable DCB
319 * @hw: pointer to hardware structure
320 * @dcb_config: pointer to ixgbe_dcb_config structure
321 *
322 * Configure dcb settings and enable dcb mode.
323 */
324s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
325 struct ixgbe_dcb_config *dcb_config)
326{
327 s32 ret = 0;
328 if (hw->mac.type == ixgbe_mac_82598EB)
329 ret = ixgbe_dcb_hw_config_82598(hw, dcb_config);
330 return ret;
331}
332
diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h
new file mode 100644
index 000000000000..62dfd243bedc
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_dcb.h
@@ -0,0 +1,157 @@
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#ifndef _DCB_CONFIG_H_
30#define _DCB_CONFIG_H_
31
32#include "ixgbe_type.h"
33
34/* DCB data structures */
35
36#define IXGBE_MAX_PACKET_BUFFERS 8
37#define MAX_USER_PRIORITY 8
38#define MAX_TRAFFIC_CLASS 8
39#define MAX_BW_GROUP 8
40#define BW_PERCENT 100
41
42#define DCB_TX_CONFIG 0
43#define DCB_RX_CONFIG 1
44
45/* DCB error Codes */
46#define DCB_SUCCESS 0
47#define DCB_ERR_CONFIG -1
48#define DCB_ERR_PARAM -2
49
50/* Transmit and receive Errors */
51/* Error in bandwidth group allocation */
52#define DCB_ERR_BW_GROUP -3
53/* Error in traffic class bandwidth allocation */
54#define DCB_ERR_TC_BW -4
55/* Traffic class has both link strict and group strict enabled */
56#define DCB_ERR_LS_GS -5
57/* Link strict traffic class has non zero bandwidth */
58#define DCB_ERR_LS_BW_NONZERO -6
59/* Link strict bandwidth group has non zero bandwidth */
60#define DCB_ERR_LS_BWG_NONZERO -7
61/* Traffic class has zero bandwidth */
62#define DCB_ERR_TC_BW_ZERO -8
63
64#define DCB_NOT_IMPLEMENTED 0x7FFFFFFF
65
66struct dcb_pfc_tc_debug {
67 u8 tc;
68 u8 pause_status;
69 u64 pause_quanta;
70};
71
72enum strict_prio_type {
73 prio_none = 0,
74 prio_group,
75 prio_link
76};
77
78/* Traffic class bandwidth allocation per direction */
79struct tc_bw_alloc {
80 u8 bwg_id; /* Bandwidth Group (BWG) ID */
81 u8 bwg_percent; /* % of BWG's bandwidth */
82 u8 link_percent; /* % of link bandwidth */
83 u8 up_to_tc_bitmap; /* User Priority to Traffic Class mapping */
84 u16 data_credits_refill; /* Credit refill amount in 64B granularity */
85 u16 data_credits_max; /* Max credits for a configured packet buffer
86 * in 64B granularity.*/
87 enum strict_prio_type prio_type; /* Link or Group Strict Priority */
88};
89
90enum dcb_pfc_type {
91 pfc_disabled = 0,
92 pfc_enabled_full,
93 pfc_enabled_tx,
94 pfc_enabled_rx
95};
96
97/* Traffic class configuration */
98struct tc_configuration {
99 struct tc_bw_alloc path[2]; /* One each for Tx/Rx */
100 enum dcb_pfc_type dcb_pfc; /* Class based flow control setting */
101
102 u16 desc_credits_max; /* For Tx Descriptor arbitration */
103 u8 tc; /* Traffic class (TC) */
104};
105
106enum dcb_rx_pba_cfg {
107 pba_equal, /* PBA[0-7] each use 64KB FIFO */
108 pba_80_48 /* PBA[0-3] each use 80KB, PBA[4-7] each use 48KB */
109};
110
111struct ixgbe_dcb_config {
112 struct tc_configuration tc_config[MAX_TRAFFIC_CLASS];
113 u8 bw_percentage[2][MAX_BW_GROUP]; /* One each for Tx/Rx */
114
115 bool round_robin_enable;
116
117 enum dcb_rx_pba_cfg rx_pba_cfg;
118
119 u32 dcb_cfg_version; /* Not used...OS-specific? */
120 u32 link_speed; /* For bandwidth allocation validation purpose */
121};
122
123/* DCB driver APIs */
124
125/* DCB rule checking function.*/
126s32 ixgbe_dcb_check_config(struct ixgbe_dcb_config *config);
127
128/* DCB credits calculation */
129s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *, u8);
130
131/* DCB PFC functions */
132s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *, struct ixgbe_dcb_config *g);
133s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8);
134
135/* DCB traffic class stats */
136s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *);
137s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8);
138
139/* DCB config arbiters */
140s32 ixgbe_dcb_config_tx_desc_arbiter(struct ixgbe_hw *,
141 struct ixgbe_dcb_config *);
142s32 ixgbe_dcb_config_tx_data_arbiter(struct ixgbe_hw *,
143 struct ixgbe_dcb_config *);
144s32 ixgbe_dcb_config_rx_arbiter(struct ixgbe_hw *, struct ixgbe_dcb_config *);
145
146/* DCB hw initialization */
147s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
148
149/* DCB definitions for credit calculation */
150#define MAX_CREDIT_REFILL 511 /* 0x1FF * 64B = 32704B */
151#define MINIMUM_CREDIT_REFILL 5 /* 5*64B = 320B */
152#define MINIMUM_CREDIT_FOR_JUMBO 145 /* 145= UpperBound((9*1024+54)/64B) for 9KB jumbo frame */
153#define DCB_MAX_TSO_SIZE (32*1024) /* MAX TSO packet size supported in DCB mode */
154#define MINIMUM_CREDIT_FOR_TSO (DCB_MAX_TSO_SIZE/64 + 1) /* 513 for 32KB TSO packet */
155#define MAX_CREDIT 4095 /* Maximum credit supported: 256KB * 1204 / 64B */
156
157#endif /* _DCB_CONFIG_H */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ixgbe/ixgbe_dcb_82598.c
new file mode 100644
index 000000000000..fce6867a4517
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c
@@ -0,0 +1,398 @@
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#include "ixgbe.h"
30#include "ixgbe_type.h"
31#include "ixgbe_dcb.h"
32#include "ixgbe_dcb_82598.h"
33
34/**
35 * ixgbe_dcb_get_tc_stats_82598 - Return status data for each traffic class
36 * @hw: pointer to hardware structure
37 * @stats: pointer to statistics structure
38 * @tc_count: Number of elements in bwg_array.
39 *
40 * This function returns the status data for each of the Traffic Classes in use.
41 */
42s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *hw,
43 struct ixgbe_hw_stats *stats,
44 u8 tc_count)
45{
46 int tc;
47
48 if (tc_count > MAX_TRAFFIC_CLASS)
49 return DCB_ERR_PARAM;
50
51 /* Statistics pertaining to each traffic class */
52 for (tc = 0; tc < tc_count; tc++) {
53 /* Transmitted Packets */
54 stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc));
55 /* Transmitted Bytes */
56 stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC(tc));
57 /* Received Packets */
58 stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc));
59 /* Received Bytes */
60 stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC(tc));
61 }
62
63 return 0;
64}
65
66/**
67 * ixgbe_dcb_get_pfc_stats_82598 - Returns CBFC status data
68 * @hw: pointer to hardware structure
69 * @stats: pointer to statistics structure
70 * @tc_count: Number of elements in bwg_array.
71 *
72 * This function returns the CBFC status data for each of the Traffic Classes.
73 */
74s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *hw,
75 struct ixgbe_hw_stats *stats,
76 u8 tc_count)
77{
78 int tc;
79
80 if (tc_count > MAX_TRAFFIC_CLASS)
81 return DCB_ERR_PARAM;
82
83 for (tc = 0; tc < tc_count; tc++) {
84 /* Priority XOFF Transmitted */
85 stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc));
86 /* Priority XOFF Received */
87 stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(tc));
88 }
89
90 return 0;
91}
92
93/**
94 * ixgbe_dcb_config_packet_buffers_82598 - Configure packet buffers
95 * @hw: pointer to hardware structure
96 * @dcb_config: pointer to ixgbe_dcb_config structure
97 *
98 * Configure packet buffers for DCB mode.
99 */
100s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw,
101 struct ixgbe_dcb_config *dcb_config)
102{
103 s32 ret_val = 0;
104 u32 value = IXGBE_RXPBSIZE_64KB;
105 u8 i = 0;
106
107 /* Setup Rx packet buffer sizes */
108 switch (dcb_config->rx_pba_cfg) {
109 case pba_80_48:
110 /* Setup the first four at 80KB */
111 value = IXGBE_RXPBSIZE_80KB;
112 for (; i < 4; i++)
113 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), value);
114 /* Setup the last four at 48KB...don't re-init i */
115 value = IXGBE_RXPBSIZE_48KB;
116 /* Fall Through */
117 case pba_equal:
118 default:
119 for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
120 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), value);
121
122 /* Setup Tx packet buffer sizes */
123 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
124 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i),
125 IXGBE_TXPBSIZE_40KB);
126 }
127 break;
128 }
129
130 return ret_val;
131}
132
133/**
134 * ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter
135 * @hw: pointer to hardware structure
136 * @dcb_config: pointer to ixgbe_dcb_config structure
137 *
138 * Configure Rx Data Arbiter and credits for each traffic class.
139 */
140s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
141 struct ixgbe_dcb_config *dcb_config)
142{
143 struct tc_bw_alloc *p;
144 u32 reg = 0;
145 u32 credit_refill = 0;
146 u32 credit_max = 0;
147 u8 i = 0;
148
149 reg = IXGBE_READ_REG(hw, IXGBE_RUPPBMR) | IXGBE_RUPPBMR_MQA;
150 IXGBE_WRITE_REG(hw, IXGBE_RUPPBMR, reg);
151
152 reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
153 /* Enable Arbiter */
154 reg &= ~IXGBE_RMCS_ARBDIS;
155 /* Enable Receive Recycle within the BWG */
156 reg |= IXGBE_RMCS_RRM;
157 /* Enable Deficit Fixed Priority arbitration*/
158 reg |= IXGBE_RMCS_DFP;
159
160 IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
161
162 /* Configure traffic class credits and priority */
163 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
164 p = &dcb_config->tc_config[i].path[DCB_RX_CONFIG];
165 credit_refill = p->data_credits_refill;
166 credit_max = p->data_credits_max;
167
168 reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT);
169
170 if (p->prio_type == prio_link)
171 reg |= IXGBE_RT2CR_LSP;
172
173 IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg);
174 }
175
176 reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
177 reg |= IXGBE_RDRXCTL_RDMTS_1_2;
178 reg |= IXGBE_RDRXCTL_MPBEN;
179 reg |= IXGBE_RDRXCTL_MCEN;
180 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
181
182 reg = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
183 /* Make sure there is enough descriptors before arbitration */
184 reg &= ~IXGBE_RXCTRL_DMBYPS;
185 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg);
186
187 return 0;
188}
189
190/**
191 * ixgbe_dcb_config_tx_desc_arbiter_82598 - Config Tx Desc. arbiter
192 * @hw: pointer to hardware structure
193 * @dcb_config: pointer to ixgbe_dcb_config structure
194 *
195 * Configure Tx Descriptor Arbiter and credits for each traffic class.
196 */
197s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
198 struct ixgbe_dcb_config *dcb_config)
199{
200 struct tc_bw_alloc *p;
201 u32 reg, max_credits;
202 u8 i;
203
204 reg = IXGBE_READ_REG(hw, IXGBE_DPMCS);
205
206 /* Enable arbiter */
207 reg &= ~IXGBE_DPMCS_ARBDIS;
208 if (!(dcb_config->round_robin_enable)) {
209 /* Enable DFP and Recycle mode */
210 reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM);
211 }
212 reg |= IXGBE_DPMCS_TSOEF;
213 /* Configure Max TSO packet size 34KB including payload and headers */
214 reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT);
215
216 IXGBE_WRITE_REG(hw, IXGBE_DPMCS, reg);
217
218 /* Configure traffic class credits and priority */
219 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
220 p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
221 max_credits = dcb_config->tc_config[i].desc_credits_max;
222 reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT;
223 reg |= p->data_credits_refill;
224 reg |= (u32)(p->bwg_id) << IXGBE_TDTQ2TCCR_BWG_SHIFT;
225
226 if (p->prio_type == prio_group)
227 reg |= IXGBE_TDTQ2TCCR_GSP;
228
229 if (p->prio_type == prio_link)
230 reg |= IXGBE_TDTQ2TCCR_LSP;
231
232 IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg);
233 }
234
235 return 0;
236}
237
238/**
239 * ixgbe_dcb_config_tx_data_arbiter_82598 - Config Tx data arbiter
240 * @hw: pointer to hardware structure
241 * @dcb_config: pointer to ixgbe_dcb_config structure
242 *
243 * Configure Tx Data Arbiter and credits for each traffic class.
244 */
245s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
246 struct ixgbe_dcb_config *dcb_config)
247{
248 struct tc_bw_alloc *p;
249 u32 reg;
250 u8 i;
251
252 reg = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
253 /* Enable Data Plane Arbiter */
254 reg &= ~IXGBE_PDPMCS_ARBDIS;
255 /* Enable DFP and Transmit Recycle Mode */
256 reg |= (IXGBE_PDPMCS_TPPAC | IXGBE_PDPMCS_TRM);
257
258 IXGBE_WRITE_REG(hw, IXGBE_PDPMCS, reg);
259
260 /* Configure traffic class credits and priority */
261 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
262 p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
263 reg = p->data_credits_refill;
264 reg |= (u32)(p->data_credits_max) << IXGBE_TDPT2TCCR_MCL_SHIFT;
265 reg |= (u32)(p->bwg_id) << IXGBE_TDPT2TCCR_BWG_SHIFT;
266
267 if (p->prio_type == prio_group)
268 reg |= IXGBE_TDPT2TCCR_GSP;
269
270 if (p->prio_type == prio_link)
271 reg |= IXGBE_TDPT2TCCR_LSP;
272
273 IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg);
274 }
275
276 /* Enable Tx packet buffer division */
277 reg = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
278 reg |= IXGBE_DTXCTL_ENDBUBD;
279 IXGBE_WRITE_REG(hw, IXGBE_DTXCTL, reg);
280
281 return 0;
282}
283
284/**
285 * ixgbe_dcb_config_pfc_82598 - Config priority flow control
286 * @hw: pointer to hardware structure
287 * @dcb_config: pointer to ixgbe_dcb_config structure
288 *
289 * Configure Priority Flow Control for each traffic class.
290 */
291s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw,
292 struct ixgbe_dcb_config *dcb_config)
293{
294 u32 reg, rx_pba_size;
295 u8 i;
296
297 /* Enable Transmit Priority Flow Control */
298 reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
299 reg &= ~IXGBE_RMCS_TFCE_802_3X;
300 /* correct the reporting of our flow control status */
301 hw->fc.type = ixgbe_fc_none;
302 reg |= IXGBE_RMCS_TFCE_PRIORITY;
303 IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
304
305 /* Enable Receive Priority Flow Control */
306 reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
307 reg &= ~IXGBE_FCTRL_RFCE;
308 reg |= IXGBE_FCTRL_RPFCE;
309 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
310
311 /*
312 * Configure flow control thresholds and enable priority flow control
313 * for each traffic class.
314 */
315 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
316 if (dcb_config->rx_pba_cfg == pba_equal) {
317 rx_pba_size = IXGBE_RXPBSIZE_64KB;
318 } else {
319 rx_pba_size = (i < 4) ? IXGBE_RXPBSIZE_80KB
320 : IXGBE_RXPBSIZE_48KB;
321 }
322
323 reg = ((rx_pba_size >> 5) & 0xFFF0);
324 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx ||
325 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full)
326 reg |= IXGBE_FCRTL_XONE;
327
328 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg);
329
330 reg = ((rx_pba_size >> 2) & 0xFFF0);
331 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx ||
332 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full)
333 reg |= IXGBE_FCRTH_FCEN;
334
335 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
336 }
337
338 /* Configure pause time */
339 for (i = 0; i < (MAX_TRAFFIC_CLASS >> 1); i++)
340 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), 0x68006800);
341
342 /* Configure flow control refresh threshold value */
343 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, 0x3400);
344
345 return 0;
346}
347
348/**
349 * ixgbe_dcb_config_tc_stats_82598 - Configure traffic class statistics
350 * @hw: pointer to hardware structure
351 *
352 * Configure queue statistics registers, all queues belonging to same traffic
353 * class uses a single set of queue statistics counters.
354 */
355s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
356{
357 u32 reg = 0;
358 u8 i = 0;
359 u8 j = 0;
360
361 /* Receive Queues stats setting - 8 queues per statistics reg */
362 for (i = 0, j = 0; i < 15 && j < 8; i = i + 2, j++) {
363 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i));
364 reg |= ((0x1010101) * j);
365 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
366 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i + 1));
367 reg |= ((0x1010101) * j);
368 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i + 1), reg);
369 }
370 /* Transmit Queues stats setting - 4 queues per statistics reg */
371 for (i = 0; i < 8; i++) {
372 reg = IXGBE_READ_REG(hw, IXGBE_TQSMR(i));
373 reg |= ((0x1010101) * i);
374 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i), reg);
375 }
376
377 return 0;
378}
379
380/**
381 * ixgbe_dcb_hw_config_82598 - Config and enable DCB
382 * @hw: pointer to hardware structure
383 * @dcb_config: pointer to ixgbe_dcb_config structure
384 *
385 * Configure dcb settings and enable dcb mode.
386 */
387s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw,
388 struct ixgbe_dcb_config *dcb_config)
389{
390 ixgbe_dcb_config_packet_buffers_82598(hw, dcb_config);
391 ixgbe_dcb_config_rx_arbiter_82598(hw, dcb_config);
392 ixgbe_dcb_config_tx_desc_arbiter_82598(hw, dcb_config);
393 ixgbe_dcb_config_tx_data_arbiter_82598(hw, dcb_config);
394 ixgbe_dcb_config_pfc_82598(hw, dcb_config);
395 ixgbe_dcb_config_tc_stats_82598(hw);
396
397 return 0;
398}
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ixgbe/ixgbe_dcb_82598.h
new file mode 100644
index 000000000000..1e6a313719d7
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.h
@@ -0,0 +1,94 @@
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#ifndef _DCB_82598_CONFIG_H_
30#define _DCB_82598_CONFIG_H_
31
32/* DCB register definitions */
33
34#define IXGBE_DPMCS_MTSOS_SHIFT 16
35#define IXGBE_DPMCS_TDPAC 0x00000001 /* 0 Round Robin, 1 DFP - Deficit Fixed Priority */
36#define IXGBE_DPMCS_TRM 0x00000010 /* Transmit Recycle Mode */
37#define IXGBE_DPMCS_ARBDIS 0x00000040 /* DCB arbiter disable */
38#define IXGBE_DPMCS_TSOEF 0x00080000 /* TSO Expand Factor: 0=x4, 1=x2 */
39
40#define IXGBE_RUPPBMR_MQA 0x80000000 /* Enable UP to queue mapping */
41
42#define IXGBE_RT2CR_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */
43#define IXGBE_RT2CR_LSP 0x80000000 /* LSP enable bit */
44
45#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet buffers enable */
46#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores (RSS) enable */
47
48#define IXGBE_TDTQ2TCCR_MCL_SHIFT 12
49#define IXGBE_TDTQ2TCCR_BWG_SHIFT 9
50#define IXGBE_TDTQ2TCCR_GSP 0x40000000
51#define IXGBE_TDTQ2TCCR_LSP 0x80000000
52
53#define IXGBE_TDPT2TCCR_MCL_SHIFT 12
54#define IXGBE_TDPT2TCCR_BWG_SHIFT 9
55#define IXGBE_TDPT2TCCR_GSP 0x40000000
56#define IXGBE_TDPT2TCCR_LSP 0x80000000
57
58#define IXGBE_PDPMCS_TPPAC 0x00000020 /* 0 Round Robin, 1 for DFP - Deficit Fixed Priority */
59#define IXGBE_PDPMCS_ARBDIS 0x00000040 /* Arbiter disable */
60#define IXGBE_PDPMCS_TRM 0x00000100 /* Transmit Recycle Mode enable */
61
62#define IXGBE_DTXCTL_ENDBUBD 0x00000004 /* Enable DBU buffer division */
63
64#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */
65#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */
66#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */
67#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */
68
69#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000
70
71/* DCB hardware-specific driver APIs */
72
73/* DCB PFC functions */
74s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, struct ixgbe_dcb_config *);
75s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *, struct ixgbe_hw_stats *,
76 u8);
77
78/* DCB traffic class stats */
79s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *);
80s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *, struct ixgbe_hw_stats *,
81 u8);
82
83/* DCB config arbiters */
84s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *,
85 struct ixgbe_dcb_config *);
86s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *,
87 struct ixgbe_dcb_config *);
88s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *,
89 struct ixgbe_dcb_config *);
90
91/* DCB hw initialization */
92s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *, struct ixgbe_dcb_config *);
93
94#endif /* _DCB_82598_CONFIG_H */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
new file mode 100644
index 000000000000..50bff2af6b04
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -0,0 +1,356 @@
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2008 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#include "ixgbe.h"
30#include <linux/dcbnl.h>
31
32/* Callbacks for DCB netlink in the kernel */
33#define BIT_DCB_MODE 0x01
34#define BIT_PFC 0x02
35#define BIT_PG_RX 0x04
36#define BIT_PG_TX 0x08
37
38int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
39 struct ixgbe_dcb_config *dst_dcb_cfg, int tc_max)
40{
41 struct tc_configuration *src_tc_cfg = NULL;
42 struct tc_configuration *dst_tc_cfg = NULL;
43 int i;
44
45 if (!src_dcb_cfg || !dst_dcb_cfg)
46 return -EINVAL;
47
48 for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) {
49 src_tc_cfg = &src_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0];
50 dst_tc_cfg = &dst_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0];
51
52 dst_tc_cfg->path[DCB_TX_CONFIG].prio_type =
53 src_tc_cfg->path[DCB_TX_CONFIG].prio_type;
54
55 dst_tc_cfg->path[DCB_TX_CONFIG].bwg_id =
56 src_tc_cfg->path[DCB_TX_CONFIG].bwg_id;
57
58 dst_tc_cfg->path[DCB_TX_CONFIG].bwg_percent =
59 src_tc_cfg->path[DCB_TX_CONFIG].bwg_percent;
60
61 dst_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap =
62 src_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap;
63
64 dst_tc_cfg->path[DCB_RX_CONFIG].prio_type =
65 src_tc_cfg->path[DCB_RX_CONFIG].prio_type;
66
67 dst_tc_cfg->path[DCB_RX_CONFIG].bwg_id =
68 src_tc_cfg->path[DCB_RX_CONFIG].bwg_id;
69
70 dst_tc_cfg->path[DCB_RX_CONFIG].bwg_percent =
71 src_tc_cfg->path[DCB_RX_CONFIG].bwg_percent;
72
73 dst_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap =
74 src_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap;
75 }
76
77 for (i = DCB_PG_ATTR_BW_ID_0; i < DCB_PG_ATTR_BW_ID_MAX; i++) {
78 dst_dcb_cfg->bw_percentage[DCB_TX_CONFIG]
79 [i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage
80 [DCB_TX_CONFIG][i-DCB_PG_ATTR_BW_ID_0];
81 dst_dcb_cfg->bw_percentage[DCB_RX_CONFIG]
82 [i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage
83 [DCB_RX_CONFIG][i-DCB_PG_ATTR_BW_ID_0];
84 }
85
86 for (i = DCB_PFC_UP_ATTR_0; i < DCB_PFC_UP_ATTR_MAX; i++) {
87 dst_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc =
88 src_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc;
89 }
90
91 return 0;
92}
93
94static u8 ixgbe_dcbnl_get_state(struct net_device *netdev)
95{
96 struct ixgbe_adapter *adapter = netdev_priv(netdev);
97
98 DPRINTK(DRV, INFO, "Get DCB Admin Mode.\n");
99
100 return !!(adapter->flags & IXGBE_FLAG_DCB_ENABLED);
101}
102
103static u16 ixgbe_dcb_select_queue(struct net_device *dev, struct sk_buff *skb)
104{
105 /* All traffic should default to class 0 */
106 return 0;
107}
108
109static void ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
110{
111 struct ixgbe_adapter *adapter = netdev_priv(netdev);
112
113 DPRINTK(DRV, INFO, "Set DCB Admin Mode.\n");
114
115 if (state > 0) {
116 /* Turn on DCB */
117 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
118 return;
119 } else {
120 if (netif_running(netdev))
121 netdev->stop(netdev);
122 ixgbe_reset_interrupt_capability(adapter);
123 ixgbe_napi_del_all(adapter);
124 kfree(adapter->tx_ring);
125 kfree(adapter->rx_ring);
126 adapter->tx_ring = NULL;
127 adapter->rx_ring = NULL;
128 netdev->select_queue = &ixgbe_dcb_select_queue;
129
130 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
131 adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
132 ixgbe_init_interrupt_scheme(adapter);
133 ixgbe_napi_add_all(adapter);
134 if (netif_running(netdev))
135 netdev->open(netdev);
136 }
137 } else {
138 /* Turn off DCB */
139 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
140 if (netif_running(netdev))
141 netdev->stop(netdev);
142 ixgbe_reset_interrupt_capability(adapter);
143 ixgbe_napi_del_all(adapter);
144 kfree(adapter->tx_ring);
145 kfree(adapter->rx_ring);
146 adapter->tx_ring = NULL;
147 adapter->rx_ring = NULL;
148 netdev->select_queue = NULL;
149
150 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
151 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
152 ixgbe_init_interrupt_scheme(adapter);
153 ixgbe_napi_add_all(adapter);
154 if (netif_running(netdev))
155 netdev->open(netdev);
156 } else {
157 return;
158 }
159 }
160}
161
162static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
163 u8 *perm_addr)
164{
165 struct ixgbe_adapter *adapter = netdev_priv(netdev);
166 int i;
167
168 for (i = 0; i < netdev->addr_len; i++)
169 perm_addr[i] = adapter->hw.mac.perm_addr[i];
170}
171
172static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
173 u8 prio, u8 bwg_id, u8 bw_pct,
174 u8 up_map)
175{
176 struct ixgbe_adapter *adapter = netdev_priv(netdev);
177
178 if (prio != DCB_ATTR_VALUE_UNDEFINED)
179 adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = prio;
180 if (bwg_id != DCB_ATTR_VALUE_UNDEFINED)
181 adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id = bwg_id;
182 if (bw_pct != DCB_ATTR_VALUE_UNDEFINED)
183 adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent =
184 bw_pct;
185 if (up_map != DCB_ATTR_VALUE_UNDEFINED)
186 adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap =
187 up_map;
188
189 if ((adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type !=
190 adapter->dcb_cfg.tc_config[tc].path[0].prio_type) ||
191 (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id !=
192 adapter->dcb_cfg.tc_config[tc].path[0].bwg_id) ||
193 (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent !=
194 adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent) ||
195 (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
196 adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap))
197 adapter->dcb_set_bitmap |= BIT_PG_TX;
198}
199
200static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
201 u8 bw_pct)
202{
203 struct ixgbe_adapter *adapter = netdev_priv(netdev);
204
205 adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct;
206
207 if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] !=
208 adapter->dcb_cfg.bw_percentage[0][bwg_id])
209 adapter->dcb_set_bitmap |= BIT_PG_RX;
210}
211
212static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
213 u8 prio, u8 bwg_id, u8 bw_pct,
214 u8 up_map)
215{
216 struct ixgbe_adapter *adapter = netdev_priv(netdev);
217
218 if (prio != DCB_ATTR_VALUE_UNDEFINED)
219 adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = prio;
220 if (bwg_id != DCB_ATTR_VALUE_UNDEFINED)
221 adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id = bwg_id;
222 if (bw_pct != DCB_ATTR_VALUE_UNDEFINED)
223 adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent =
224 bw_pct;
225 if (up_map != DCB_ATTR_VALUE_UNDEFINED)
226 adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap =
227 up_map;
228
229 if ((adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type !=
230 adapter->dcb_cfg.tc_config[tc].path[1].prio_type) ||
231 (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id !=
232 adapter->dcb_cfg.tc_config[tc].path[1].bwg_id) ||
233 (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent !=
234 adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent) ||
235 (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap !=
236 adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap))
237 adapter->dcb_set_bitmap |= BIT_PG_RX;
238}
239
240static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
241 u8 bw_pct)
242{
243 struct ixgbe_adapter *adapter = netdev_priv(netdev);
244
245 adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct;
246
247 if (adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] !=
248 adapter->dcb_cfg.bw_percentage[1][bwg_id])
249 adapter->dcb_set_bitmap |= BIT_PG_RX;
250}
251
252static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
253 u8 *prio, u8 *bwg_id, u8 *bw_pct,
254 u8 *up_map)
255{
256 struct ixgbe_adapter *adapter = netdev_priv(netdev);
257
258 *prio = adapter->dcb_cfg.tc_config[tc].path[0].prio_type;
259 *bwg_id = adapter->dcb_cfg.tc_config[tc].path[0].bwg_id;
260 *bw_pct = adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent;
261 *up_map = adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap;
262}
263
264static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
265 u8 *bw_pct)
266{
267 struct ixgbe_adapter *adapter = netdev_priv(netdev);
268
269 *bw_pct = adapter->dcb_cfg.bw_percentage[0][bwg_id];
270}
271
272static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc,
273 u8 *prio, u8 *bwg_id, u8 *bw_pct,
274 u8 *up_map)
275{
276 struct ixgbe_adapter *adapter = netdev_priv(netdev);
277
278 *prio = adapter->dcb_cfg.tc_config[tc].path[1].prio_type;
279 *bwg_id = adapter->dcb_cfg.tc_config[tc].path[1].bwg_id;
280 *bw_pct = adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent;
281 *up_map = adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap;
282}
283
284static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
285 u8 *bw_pct)
286{
287 struct ixgbe_adapter *adapter = netdev_priv(netdev);
288
289 *bw_pct = adapter->dcb_cfg.bw_percentage[1][bwg_id];
290}
291
292static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
293 u8 setting)
294{
295 struct ixgbe_adapter *adapter = netdev_priv(netdev);
296
297 adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = setting;
298 if (adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc !=
299 adapter->dcb_cfg.tc_config[priority].dcb_pfc)
300 adapter->dcb_set_bitmap |= BIT_PFC;
301}
302
303static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
304 u8 *setting)
305{
306 struct ixgbe_adapter *adapter = netdev_priv(netdev);
307
308 *setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc;
309}
310
311static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
312{
313 struct ixgbe_adapter *adapter = netdev_priv(netdev);
314 int ret;
315
316 if (!adapter->dcb_set_bitmap)
317 return 1;
318
319 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
320 msleep(1);
321
322 if (netif_running(netdev))
323 ixgbe_down(adapter);
324
325 ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
326 adapter->ring_feature[RING_F_DCB].indices);
327 if (ret) {
328 clear_bit(__IXGBE_RESETTING, &adapter->state);
329 return ret;
330 }
331
332 if (netif_running(netdev))
333 ixgbe_up(adapter);
334
335 adapter->dcb_set_bitmap = 0x00;
336 clear_bit(__IXGBE_RESETTING, &adapter->state);
337 return ret;
338}
339
340struct dcbnl_rtnl_ops dcbnl_ops = {
341 .getstate = ixgbe_dcbnl_get_state,
342 .setstate = ixgbe_dcbnl_set_state,
343 .getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr,
344 .setpgtccfgtx = ixgbe_dcbnl_set_pg_tc_cfg_tx,
345 .setpgbwgcfgtx = ixgbe_dcbnl_set_pg_bwg_cfg_tx,
346 .setpgtccfgrx = ixgbe_dcbnl_set_pg_tc_cfg_rx,
347 .setpgbwgcfgrx = ixgbe_dcbnl_set_pg_bwg_cfg_rx,
348 .getpgtccfgtx = ixgbe_dcbnl_get_pg_tc_cfg_tx,
349 .getpgbwgcfgtx = ixgbe_dcbnl_get_pg_bwg_cfg_tx,
350 .getpgtccfgrx = ixgbe_dcbnl_get_pg_tc_cfg_rx,
351 .getpgbwgcfgrx = ixgbe_dcbnl_get_pg_bwg_cfg_rx,
352 .setpfccfg = ixgbe_dcbnl_set_pfc_cfg,
353 .getpfccfg = ixgbe_dcbnl_get_pfc_cfg,
354 .setall = ixgbe_dcbnl_set_all
355};
356
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index a610016a0172..aaa4404e7c5f 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -97,9 +97,18 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
97 ((((struct ixgbe_adapter *)netdev_priv(netdev))->num_tx_queues + \ 97 ((((struct ixgbe_adapter *)netdev_priv(netdev))->num_tx_queues + \
98 ((struct ixgbe_adapter *)netdev_priv(netdev))->num_rx_queues) * \ 98 ((struct ixgbe_adapter *)netdev_priv(netdev))->num_rx_queues) * \
99 (sizeof(struct ixgbe_queue_stats) / sizeof(u64))) 99 (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
100#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
101#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) 100#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
102#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN) 101#define IXGBE_PB_STATS_LEN ( \
102 (((struct ixgbe_adapter *)netdev->priv)->flags & \
103 IXGBE_FLAG_DCB_ENABLED) ? \
104 (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
105 sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
106 sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
107 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
108 / sizeof(u64) : 0)
109#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
110 IXGBE_PB_STATS_LEN + \
111 IXGBE_QUEUE_STATS_LEN)
103 112
104static int ixgbe_get_settings(struct net_device *netdev, 113static int ixgbe_get_settings(struct net_device *netdev,
105 struct ethtool_cmd *ecmd) 114 struct ethtool_cmd *ecmd)
@@ -831,6 +840,16 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
831 data[i + k] = queue_stat[k]; 840 data[i + k] = queue_stat[k];
832 i += k; 841 i += k;
833 } 842 }
843 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
844 for (j = 0; j < MAX_TX_PACKET_BUFFERS; j++) {
845 data[i++] = adapter->stats.pxontxc[j];
846 data[i++] = adapter->stats.pxofftxc[j];
847 }
848 for (j = 0; j < MAX_RX_PACKET_BUFFERS; j++) {
849 data[i++] = adapter->stats.pxonrxc[j];
850 data[i++] = adapter->stats.pxoffrxc[j];
851 }
852 }
834} 853}
835 854
836static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, 855static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
@@ -859,6 +878,13 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
859 sprintf(p, "rx_queue_%u_bytes", i); 878 sprintf(p, "rx_queue_%u_bytes", i);
860 p += ETH_GSTRING_LEN; 879 p += ETH_GSTRING_LEN;
861 } 880 }
881 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
882 for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
883 sprintf(p, "tx_pb_%u_pxon", i);
884 }
885 for (i = 0; i < MAX_RX_PACKET_BUFFERS; i++) {
886 }
887 }
862 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ 888 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
863 break; 889 break;
864 } 890 }
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 40108523377f..91dde9cdab66 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -404,7 +404,7 @@ static void ixgbe_receive_skb(struct ixgbe_adapter *adapter,
404 404
405 if (adapter->netdev->features & NETIF_F_LRO && 405 if (adapter->netdev->features & NETIF_F_LRO &&
406 skb->ip_summed == CHECKSUM_UNNECESSARY) { 406 skb->ip_summed == CHECKSUM_UNNECESSARY) {
407 if (adapter->vlgrp && is_vlan) 407 if (adapter->vlgrp && is_vlan && (tag != 0))
408 lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb, 408 lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb,
409 adapter->vlgrp, tag, 409 adapter->vlgrp, tag,
410 rx_desc); 410 rx_desc);
@@ -413,12 +413,12 @@ static void ixgbe_receive_skb(struct ixgbe_adapter *adapter,
413 ring->lro_used = true; 413 ring->lro_used = true;
414 } else { 414 } else {
415 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) { 415 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
416 if (adapter->vlgrp && is_vlan) 416 if (adapter->vlgrp && is_vlan && (tag != 0))
417 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, tag); 417 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, tag);
418 else 418 else
419 netif_receive_skb(skb); 419 netif_receive_skb(skb);
420 } else { 420 } else {
421 if (adapter->vlgrp && is_vlan) 421 if (adapter->vlgrp && is_vlan && (tag != 0))
422 vlan_hwaccel_rx(skb, adapter->vlgrp, tag); 422 vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
423 else 423 else
424 netif_rx(skb); 424 netif_rx(skb);
@@ -1670,10 +1670,12 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1670 * effects of setting this bit are only that SRRCTL must be 1670 * effects of setting this bit are only that SRRCTL must be
1671 * fully programmed [0..15] 1671 * fully programmed [0..15]
1672 */ 1672 */
1673 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 1673 if (adapter->flags &
1674 rdrxctl |= IXGBE_RDRXCTL_MVMEN; 1674 (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_VMDQ_ENABLED)) {
1675 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); 1675 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1676 1676 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
1677 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
1678 }
1677 1679
1678 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { 1680 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
1679 /* Fill out redirection table */ 1681 /* Fill out redirection table */
@@ -1732,6 +1734,16 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
1732 ixgbe_irq_disable(adapter); 1734 ixgbe_irq_disable(adapter);
1733 adapter->vlgrp = grp; 1735 adapter->vlgrp = grp;
1734 1736
1737 /*
1738 * For a DCB driver, always enable VLAN tag stripping so we can
1739 * still receive traffic from a DCB-enabled host even if we're
1740 * not in DCB mode.
1741 */
1742 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
1743 ctrl |= IXGBE_VLNCTRL_VME;
1744 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1745 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
1746
1735 if (grp) { 1747 if (grp) {
1736 /* enable VLAN tag insert/strip */ 1748 /* enable VLAN tag insert/strip */
1737 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL); 1749 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
@@ -1896,6 +1908,44 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
1896 } 1908 }
1897} 1909}
1898 1910
1911#ifdef CONFIG_IXGBE_DCBNL
1912/*
1913 * ixgbe_configure_dcb - Configure DCB hardware
1914 * @adapter: ixgbe adapter struct
1915 *
1916 * This is called by the driver on open to configure the DCB hardware.
1917 * This is also called by the gennetlink interface when reconfiguring
1918 * the DCB state.
1919 */
1920static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
1921{
1922 struct ixgbe_hw *hw = &adapter->hw;
1923 u32 txdctl, vlnctrl;
1924 int i, j;
1925
1926 ixgbe_dcb_check_config(&adapter->dcb_cfg);
1927 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG);
1928 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG);
1929
1930 /* reconfigure the hardware */
1931 ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
1932
1933 for (i = 0; i < adapter->num_tx_queues; i++) {
1934 j = adapter->tx_ring[i].reg_idx;
1935 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
1936 /* PThresh workaround for Tx hang with DFP enabled. */
1937 txdctl |= 32;
1938 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
1939 }
1940 /* Enable VLAN tag insert/strip */
1941 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1942 vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
1943 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
1944 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1945 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
1946}
1947
1948#endif
1899static void ixgbe_configure(struct ixgbe_adapter *adapter) 1949static void ixgbe_configure(struct ixgbe_adapter *adapter)
1900{ 1950{
1901 struct net_device *netdev = adapter->netdev; 1951 struct net_device *netdev = adapter->netdev;
@@ -1904,6 +1954,16 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
1904 ixgbe_set_rx_mode(netdev); 1954 ixgbe_set_rx_mode(netdev);
1905 1955
1906 ixgbe_restore_vlan(adapter); 1956 ixgbe_restore_vlan(adapter);
1957#ifdef CONFIG_IXGBE_DCBNL
1958 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
1959 netif_set_gso_max_size(netdev, 32768);
1960 ixgbe_configure_dcb(adapter);
1961 } else {
1962 netif_set_gso_max_size(netdev, 65536);
1963 }
1964#else
1965 netif_set_gso_max_size(netdev, 65536);
1966#endif
1907 1967
1908 ixgbe_configure_tx(adapter); 1968 ixgbe_configure_tx(adapter);
1909 ixgbe_configure_rx(adapter); 1969 ixgbe_configure_rx(adapter);
@@ -1995,9 +2055,6 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
1995 2055
1996 ixgbe_irq_enable(adapter); 2056 ixgbe_irq_enable(adapter);
1997 2057
1998 /* enable transmits */
1999 netif_tx_start_all_queues(netdev);
2000
2001 /* bring the link up in the watchdog, this could race with our first 2058 /* bring the link up in the watchdog, this could race with our first
2002 * link up interrupt but shouldn't be a problem */ 2059 * link up interrupt but shouldn't be a problem */
2003 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 2060 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
@@ -2260,6 +2317,11 @@ static void ixgbe_reset_task(struct work_struct *work)
2260 struct ixgbe_adapter *adapter; 2317 struct ixgbe_adapter *adapter;
2261 adapter = container_of(work, struct ixgbe_adapter, reset_task); 2318 adapter = container_of(work, struct ixgbe_adapter, reset_task);
2262 2319
2320 /* If we're already down or resetting, just bail */
2321 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
2322 test_bit(__IXGBE_RESETTING, &adapter->state))
2323 return;
2324
2263 adapter->tx_timeout_count++; 2325 adapter->tx_timeout_count++;
2264 2326
2265 ixgbe_reinit_locked(adapter); 2327 ixgbe_reinit_locked(adapter);
@@ -2269,15 +2331,31 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
2269{ 2331{
2270 int nrq = 1, ntq = 1; 2332 int nrq = 1, ntq = 1;
2271 int feature_mask = 0, rss_i, rss_m; 2333 int feature_mask = 0, rss_i, rss_m;
2334 int dcb_i, dcb_m;
2272 2335
2273 /* Number of supported queues */ 2336 /* Number of supported queues */
2274 switch (adapter->hw.mac.type) { 2337 switch (adapter->hw.mac.type) {
2275 case ixgbe_mac_82598EB: 2338 case ixgbe_mac_82598EB:
2339 dcb_i = adapter->ring_feature[RING_F_DCB].indices;
2340 dcb_m = 0;
2276 rss_i = adapter->ring_feature[RING_F_RSS].indices; 2341 rss_i = adapter->ring_feature[RING_F_RSS].indices;
2277 rss_m = 0; 2342 rss_m = 0;
2278 feature_mask |= IXGBE_FLAG_RSS_ENABLED; 2343 feature_mask |= IXGBE_FLAG_RSS_ENABLED;
2344 feature_mask |= IXGBE_FLAG_DCB_ENABLED;
2279 2345
2280 switch (adapter->flags & feature_mask) { 2346 switch (adapter->flags & feature_mask) {
2347 case (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_DCB_ENABLED):
2348 dcb_m = 0x7 << 3;
2349 rss_i = min(8, rss_i);
2350 rss_m = 0x7;
2351 nrq = dcb_i * rss_i;
2352 ntq = min(MAX_TX_QUEUES, dcb_i * rss_i);
2353 break;
2354 case (IXGBE_FLAG_DCB_ENABLED):
2355 dcb_m = 0x7 << 3;
2356 nrq = dcb_i;
2357 ntq = dcb_i;
2358 break;
2281 case (IXGBE_FLAG_RSS_ENABLED): 2359 case (IXGBE_FLAG_RSS_ENABLED):
2282 rss_m = 0xF; 2360 rss_m = 0xF;
2283 nrq = rss_i; 2361 nrq = rss_i;
@@ -2285,6 +2363,8 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
2285 break; 2363 break;
2286 case 0: 2364 case 0:
2287 default: 2365 default:
2366 dcb_i = 0;
2367 dcb_m = 0;
2288 rss_i = 0; 2368 rss_i = 0;
2289 rss_m = 0; 2369 rss_m = 0;
2290 nrq = 1; 2370 nrq = 1;
@@ -2292,6 +2372,12 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
2292 break; 2372 break;
2293 } 2373 }
2294 2374
2375 /* Sanity check, we should never have zero queues */
2376 nrq = (nrq ?:1);
2377 ntq = (ntq ?:1);
2378
2379 adapter->ring_feature[RING_F_DCB].indices = dcb_i;
2380 adapter->ring_feature[RING_F_DCB].mask = dcb_m;
2295 adapter->ring_feature[RING_F_RSS].indices = rss_i; 2381 adapter->ring_feature[RING_F_RSS].indices = rss_i;
2296 adapter->ring_feature[RING_F_RSS].mask = rss_m; 2382 adapter->ring_feature[RING_F_RSS].mask = rss_m;
2297 break; 2383 break;
@@ -2343,6 +2429,7 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
2343 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 2429 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2344 kfree(adapter->msix_entries); 2430 kfree(adapter->msix_entries);
2345 adapter->msix_entries = NULL; 2431 adapter->msix_entries = NULL;
2432 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
2346 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; 2433 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
2347 ixgbe_set_num_queues(adapter); 2434 ixgbe_set_num_queues(adapter);
2348 } else { 2435 } else {
@@ -2362,15 +2449,42 @@ static void __devinit ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
2362{ 2449{
2363 int feature_mask = 0, rss_i; 2450 int feature_mask = 0, rss_i;
2364 int i, txr_idx, rxr_idx; 2451 int i, txr_idx, rxr_idx;
2452 int dcb_i;
2365 2453
2366 /* Number of supported queues */ 2454 /* Number of supported queues */
2367 switch (adapter->hw.mac.type) { 2455 switch (adapter->hw.mac.type) {
2368 case ixgbe_mac_82598EB: 2456 case ixgbe_mac_82598EB:
2457 dcb_i = adapter->ring_feature[RING_F_DCB].indices;
2369 rss_i = adapter->ring_feature[RING_F_RSS].indices; 2458 rss_i = adapter->ring_feature[RING_F_RSS].indices;
2370 txr_idx = 0; 2459 txr_idx = 0;
2371 rxr_idx = 0; 2460 rxr_idx = 0;
2461 feature_mask |= IXGBE_FLAG_DCB_ENABLED;
2372 feature_mask |= IXGBE_FLAG_RSS_ENABLED; 2462 feature_mask |= IXGBE_FLAG_RSS_ENABLED;
2373 switch (adapter->flags & feature_mask) { 2463 switch (adapter->flags & feature_mask) {
2464 case (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_DCB_ENABLED):
2465 for (i = 0; i < dcb_i; i++) {
2466 int j;
2467 /* Rx first */
2468 for (j = 0; j < adapter->num_rx_queues; j++) {
2469 adapter->rx_ring[rxr_idx].reg_idx =
2470 i << 3 | j;
2471 rxr_idx++;
2472 }
2473 /* Tx now */
2474 for (j = 0; j < adapter->num_tx_queues; j++) {
2475 adapter->tx_ring[txr_idx].reg_idx =
2476 i << 2 | (j >> 1);
2477 if (j & 1)
2478 txr_idx++;
2479 }
2480 }
2481 case (IXGBE_FLAG_DCB_ENABLED):
2482 /* the number of queues is assumed to be symmetric */
2483 for (i = 0; i < dcb_i; i++) {
2484 adapter->rx_ring[i].reg_idx = i << 3;
2485 adapter->tx_ring[i].reg_idx = i << 2;
2486 }
2487 break;
2374 case (IXGBE_FLAG_RSS_ENABLED): 2488 case (IXGBE_FLAG_RSS_ENABLED):
2375 for (i = 0; i < adapter->num_rx_queues; i++) 2489 for (i = 0; i < adapter->num_rx_queues; i++)
2376 adapter->rx_ring[i].reg_idx = i; 2490 adapter->rx_ring[i].reg_idx = i;
@@ -2395,7 +2509,7 @@ static void __devinit ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
2395 * number of queues at compile-time. The polling_netdev array is 2509 * number of queues at compile-time. The polling_netdev array is
2396 * intended for Multiqueue, but should work fine with a single queue. 2510 * intended for Multiqueue, but should work fine with a single queue.
2397 **/ 2511 **/
2398static int __devinit ixgbe_alloc_queues(struct ixgbe_adapter *adapter) 2512static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
2399{ 2513{
2400 int i; 2514 int i;
2401 2515
@@ -2465,6 +2579,7 @@ static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter
2465 adapter->msix_entries = kcalloc(v_budget, 2579 adapter->msix_entries = kcalloc(v_budget,
2466 sizeof(struct msix_entry), GFP_KERNEL); 2580 sizeof(struct msix_entry), GFP_KERNEL);
2467 if (!adapter->msix_entries) { 2581 if (!adapter->msix_entries) {
2582 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
2468 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; 2583 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
2469 ixgbe_set_num_queues(adapter); 2584 ixgbe_set_num_queues(adapter);
2470 kfree(adapter->tx_ring); 2585 kfree(adapter->tx_ring);
@@ -2505,7 +2620,7 @@ out:
2505 return err; 2620 return err;
2506} 2621}
2507 2622
2508static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) 2623void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
2509{ 2624{
2510 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 2625 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2511 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 2626 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
@@ -2529,7 +2644,7 @@ static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
2529 * - Hardware queue count (num_*_queues) 2644 * - Hardware queue count (num_*_queues)
2530 * - defined by miscellaneous hardware support/features (RSS, etc.) 2645 * - defined by miscellaneous hardware support/features (RSS, etc.)
2531 **/ 2646 **/
2532static int __devinit ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) 2647int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
2533{ 2648{
2534 int err; 2649 int err;
2535 2650
@@ -2577,6 +2692,10 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
2577 struct ixgbe_hw *hw = &adapter->hw; 2692 struct ixgbe_hw *hw = &adapter->hw;
2578 struct pci_dev *pdev = adapter->pdev; 2693 struct pci_dev *pdev = adapter->pdev;
2579 unsigned int rss; 2694 unsigned int rss;
2695#ifdef CONFIG_IXGBE_DCBNL
2696 int j;
2697 struct tc_configuration *tc;
2698#endif
2580 2699
2581 /* PCI config space info */ 2700 /* PCI config space info */
2582 2701
@@ -2590,6 +2709,27 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
2590 rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus()); 2709 rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
2591 adapter->ring_feature[RING_F_RSS].indices = rss; 2710 adapter->ring_feature[RING_F_RSS].indices = rss;
2592 adapter->flags |= IXGBE_FLAG_RSS_ENABLED; 2711 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
2712 adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
2713
2714#ifdef CONFIG_IXGBE_DCBNL
2715 /* Configure DCB traffic classes */
2716 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
2717 tc = &adapter->dcb_cfg.tc_config[j];
2718 tc->path[DCB_TX_CONFIG].bwg_id = 0;
2719 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
2720 tc->path[DCB_RX_CONFIG].bwg_id = 0;
2721 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
2722 tc->dcb_pfc = pfc_disabled;
2723 }
2724 adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
2725 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
2726 adapter->dcb_cfg.rx_pba_cfg = pba_equal;
2727 adapter->dcb_cfg.round_robin_enable = false;
2728 adapter->dcb_set_bitmap = 0x00;
2729 ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
2730 adapter->ring_feature[RING_F_DCB].indices);
2731
2732#endif
2593 if (hw->mac.ops.get_media_type && 2733 if (hw->mac.ops.get_media_type &&
2594 (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) 2734 (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper))
2595 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; 2735 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
@@ -2967,7 +3107,7 @@ static int ixgbe_close(struct net_device *netdev)
2967 * @adapter: private struct 3107 * @adapter: private struct
2968 * helper function to napi_add each possible q_vector->napi 3108 * helper function to napi_add each possible q_vector->napi
2969 */ 3109 */
2970static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter) 3110void ixgbe_napi_add_all(struct ixgbe_adapter *adapter)
2971{ 3111{
2972 int q_idx, q_vectors; 3112 int q_idx, q_vectors;
2973 int (*poll)(struct napi_struct *, int); 3113 int (*poll)(struct napi_struct *, int);
@@ -2988,7 +3128,7 @@ static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter)
2988 } 3128 }
2989} 3129}
2990 3130
2991static void ixgbe_napi_del_all(struct ixgbe_adapter *adapter) 3131void ixgbe_napi_del_all(struct ixgbe_adapter *adapter)
2992{ 3132{
2993 int q_idx; 3133 int q_idx;
2994 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 3134 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
@@ -3109,6 +3249,18 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
3109 adapter->stats.mpc[i] += mpc; 3249 adapter->stats.mpc[i] += mpc;
3110 total_mpc += adapter->stats.mpc[i]; 3250 total_mpc += adapter->stats.mpc[i];
3111 adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i)); 3251 adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
3252 adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
3253 adapter->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
3254 adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
3255 adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
3256 adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw,
3257 IXGBE_PXONRXC(i));
3258 adapter->stats.pxontxc[i] += IXGBE_READ_REG(hw,
3259 IXGBE_PXONTXC(i));
3260 adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw,
3261 IXGBE_PXOFFRXC(i));
3262 adapter->stats.pxofftxc[i] += IXGBE_READ_REG(hw,
3263 IXGBE_PXOFFTXC(i));
3112 } 3264 }
3113 adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); 3265 adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
3114 /* work around hardware counting issue */ 3266 /* work around hardware counting issue */
@@ -3248,6 +3400,7 @@ static void ixgbe_watchdog_task(struct work_struct *work)
3248 (FLOW_TX ? "TX" : "None")))); 3400 (FLOW_TX ? "TX" : "None"))));
3249 3401
3250 netif_carrier_on(netdev); 3402 netif_carrier_on(netdev);
3403 netif_tx_wake_all_queues(netdev);
3251 } else { 3404 } else {
3252 /* Force detection of hung controller */ 3405 /* Force detection of hung controller */
3253 adapter->detect_tx_hung = true; 3406 adapter->detect_tx_hung = true;
@@ -3258,6 +3411,7 @@ static void ixgbe_watchdog_task(struct work_struct *work)
3258 if (netif_carrier_ok(netdev)) { 3411 if (netif_carrier_ok(netdev)) {
3259 DPRINTK(LINK, INFO, "NIC Link is Down\n"); 3412 DPRINTK(LINK, INFO, "NIC Link is Down\n");
3260 netif_carrier_off(netdev); 3413 netif_carrier_off(netdev);
3414 netif_tx_stop_all_queues(netdev);
3261 } 3415 }
3262 } 3416 }
3263 3417
@@ -3604,6 +3758,14 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3604 3758
3605 if (adapter->vlgrp && vlan_tx_tag_present(skb)) { 3759 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
3606 tx_flags |= vlan_tx_tag_get(skb); 3760 tx_flags |= vlan_tx_tag_get(skb);
3761 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
3762 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
3763 tx_flags |= (skb->queue_mapping << 13);
3764 }
3765 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3766 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3767 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
3768 tx_flags |= (skb->queue_mapping << 13);
3607 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 3769 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3608 tx_flags |= IXGBE_TX_FLAGS_VLAN; 3770 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3609 } 3771 }
@@ -3878,6 +4040,13 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3878 netdev->vlan_features |= NETIF_F_IP_CSUM; 4040 netdev->vlan_features |= NETIF_F_IP_CSUM;
3879 netdev->vlan_features |= NETIF_F_SG; 4041 netdev->vlan_features |= NETIF_F_SG;
3880 4042
4043 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
4044 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
4045
4046#ifdef CONFIG_IXGBE_DCBNL
4047 netdev->dcbnl_ops = &dcbnl_ops;
4048#endif
4049
3881 if (pci_using_dac) 4050 if (pci_using_dac)
3882 netdev->features |= NETIF_F_HIGHDMA; 4051 netdev->features |= NETIF_F_HIGHDMA;
3883 4052
@@ -3946,6 +4115,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3946 } 4115 }
3947 4116
3948 netif_carrier_off(netdev); 4117 netif_carrier_off(netdev);
4118 netif_tx_stop_all_queues(netdev);
3949 4119
3950 ixgbe_napi_add_all(adapter); 4120 ixgbe_napi_add_all(adapter);
3951 4121
diff --git a/include/linux/dcbnl.h b/include/linux/dcbnl.h
new file mode 100644
index 000000000000..32d32c1ee410
--- /dev/null
+++ b/include/linux/dcbnl.h
@@ -0,0 +1,230 @@
1/*
2 * Copyright (c) 2008, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Author: Lucy Liu <lucy.liu@intel.com>
18 */
19
20#ifndef __LINUX_DCBNL_H__
21#define __LINUX_DCBNL_H__
22
23#define DCB_PROTO_VERSION 1
24
25struct dcbmsg {
26 unsigned char dcb_family;
27 __u8 cmd;
28 __u16 dcb_pad;
29};
30
31/**
32 * enum dcbnl_commands - supported DCB commands
33 *
34 * @DCB_CMD_UNDEFINED: unspecified command to catch errors
35 * @DCB_CMD_GSTATE: request the state of DCB in the device
36 * @DCB_CMD_SSTATE: set the state of DCB in the device
37 * @DCB_CMD_PGTX_GCFG: request the priority group configuration for Tx
38 * @DCB_CMD_PGTX_SCFG: set the priority group configuration for Tx
39 * @DCB_CMD_PGRX_GCFG: request the priority group configuration for Rx
40 * @DCB_CMD_PGRX_SCFG: set the priority group configuration for Rx
41 * @DCB_CMD_PFC_GCFG: request the priority flow control configuration
42 * @DCB_CMD_PFC_SCFG: set the priority flow control configuration
43 * @DCB_CMD_SET_ALL: apply all changes to the underlying device
44 * @DCB_CMD_GPERM_HWADDR: get the permanent MAC address of the underlying
45 * device. Only useful when using bonding.
46 */
47enum dcbnl_commands {
48 DCB_CMD_UNDEFINED,
49
50 DCB_CMD_GSTATE,
51 DCB_CMD_SSTATE,
52
53 DCB_CMD_PGTX_GCFG,
54 DCB_CMD_PGTX_SCFG,
55 DCB_CMD_PGRX_GCFG,
56 DCB_CMD_PGRX_SCFG,
57
58 DCB_CMD_PFC_GCFG,
59 DCB_CMD_PFC_SCFG,
60
61 DCB_CMD_SET_ALL,
62 DCB_CMD_GPERM_HWADDR,
63
64 __DCB_CMD_ENUM_MAX,
65 DCB_CMD_MAX = __DCB_CMD_ENUM_MAX - 1,
66};
67
68
69/**
70 * enum dcbnl_attrs - DCB top-level netlink attributes
71 *
72 * @DCB_ATTR_UNDEFINED: unspecified attribute to catch errors
73 * @DCB_ATTR_IFNAME: interface name of the underlying device (NLA_STRING)
74 * @DCB_ATTR_STATE: enable state of DCB in the device (NLA_U8)
75 * @DCB_ATTR_PFC_STATE: enable state of PFC in the device (NLA_U8)
76 * @DCB_ATTR_PFC_CFG: priority flow control configuration (NLA_NESTED)
77 * @DCB_ATTR_NUM_TC: number of traffic classes supported in the device (NLA_U8)
78 * @DCB_ATTR_PG_CFG: priority group configuration (NLA_NESTED)
79 * @DCB_ATTR_SET_ALL: bool to commit changes to hardware or not (NLA_U8)
80 * @DCB_ATTR_PERM_HWADDR: MAC address of the physical device (NLA_NESTED)
81 */
82enum dcbnl_attrs {
83 DCB_ATTR_UNDEFINED,
84
85 DCB_ATTR_IFNAME,
86 DCB_ATTR_STATE,
87 DCB_ATTR_PFC_STATE,
88 DCB_ATTR_PFC_CFG,
89 DCB_ATTR_NUM_TC,
90 DCB_ATTR_PG_CFG,
91 DCB_ATTR_SET_ALL,
92 DCB_ATTR_PERM_HWADDR,
93
94 __DCB_ATTR_ENUM_MAX,
95 DCB_ATTR_MAX = __DCB_ATTR_ENUM_MAX - 1,
96};
97
98/**
99 * enum dcbnl_pfc_attrs - DCB Priority Flow Control user priority nested attrs
100 *
101 * @DCB_PFC_UP_ATTR_UNDEFINED: unspecified attribute to catch errors
102 * @DCB_PFC_UP_ATTR_0: Priority Flow Control value for User Priority 0 (NLA_U8)
103 * @DCB_PFC_UP_ATTR_1: Priority Flow Control value for User Priority 1 (NLA_U8)
104 * @DCB_PFC_UP_ATTR_2: Priority Flow Control value for User Priority 2 (NLA_U8)
105 * @DCB_PFC_UP_ATTR_3: Priority Flow Control value for User Priority 3 (NLA_U8)
106 * @DCB_PFC_UP_ATTR_4: Priority Flow Control value for User Priority 4 (NLA_U8)
107 * @DCB_PFC_UP_ATTR_5: Priority Flow Control value for User Priority 5 (NLA_U8)
108 * @DCB_PFC_UP_ATTR_6: Priority Flow Control value for User Priority 6 (NLA_U8)
109 * @DCB_PFC_UP_ATTR_7: Priority Flow Control value for User Priority 7 (NLA_U8)
110 * @DCB_PFC_UP_ATTR_MAX: highest attribute number currently defined
111 * @DCB_PFC_UP_ATTR_ALL: apply to all priority flow control attrs (NLA_FLAG)
112 *
113 */
114enum dcbnl_pfc_up_attrs {
115 DCB_PFC_UP_ATTR_UNDEFINED,
116
117 DCB_PFC_UP_ATTR_0,
118 DCB_PFC_UP_ATTR_1,
119 DCB_PFC_UP_ATTR_2,
120 DCB_PFC_UP_ATTR_3,
121 DCB_PFC_UP_ATTR_4,
122 DCB_PFC_UP_ATTR_5,
123 DCB_PFC_UP_ATTR_6,
124 DCB_PFC_UP_ATTR_7,
125 DCB_PFC_UP_ATTR_ALL,
126
127 __DCB_PFC_UP_ATTR_ENUM_MAX,
128 DCB_PFC_UP_ATTR_MAX = __DCB_PFC_UP_ATTR_ENUM_MAX - 1,
129};
130
131/**
132 * enum dcbnl_pg_attrs - DCB Priority Group attributes
133 *
134 * @DCB_PG_ATTR_UNDEFINED: unspecified attribute to catch errors
135 * @DCB_PG_ATTR_TC_0: Priority Group Traffic Class 0 configuration (NLA_NESTED)
136 * @DCB_PG_ATTR_TC_1: Priority Group Traffic Class 1 configuration (NLA_NESTED)
137 * @DCB_PG_ATTR_TC_2: Priority Group Traffic Class 2 configuration (NLA_NESTED)
138 * @DCB_PG_ATTR_TC_3: Priority Group Traffic Class 3 configuration (NLA_NESTED)
139 * @DCB_PG_ATTR_TC_4: Priority Group Traffic Class 4 configuration (NLA_NESTED)
140 * @DCB_PG_ATTR_TC_5: Priority Group Traffic Class 5 configuration (NLA_NESTED)
141 * @DCB_PG_ATTR_TC_6: Priority Group Traffic Class 6 configuration (NLA_NESTED)
142 * @DCB_PG_ATTR_TC_7: Priority Group Traffic Class 7 configuration (NLA_NESTED)
143 * @DCB_PG_ATTR_TC_MAX: highest attribute number currently defined
144 * @DCB_PG_ATTR_TC_ALL: apply to all traffic classes (NLA_NESTED)
145 * @DCB_PG_ATTR_BW_ID_0: Percent of link bandwidth for Priority Group 0 (NLA_U8)
146 * @DCB_PG_ATTR_BW_ID_1: Percent of link bandwidth for Priority Group 1 (NLA_U8)
147 * @DCB_PG_ATTR_BW_ID_2: Percent of link bandwidth for Priority Group 2 (NLA_U8)
148 * @DCB_PG_ATTR_BW_ID_3: Percent of link bandwidth for Priority Group 3 (NLA_U8)
149 * @DCB_PG_ATTR_BW_ID_4: Percent of link bandwidth for Priority Group 4 (NLA_U8)
150 * @DCB_PG_ATTR_BW_ID_5: Percent of link bandwidth for Priority Group 5 (NLA_U8)
151 * @DCB_PG_ATTR_BW_ID_6: Percent of link bandwidth for Priority Group 6 (NLA_U8)
152 * @DCB_PG_ATTR_BW_ID_7: Percent of link bandwidth for Priority Group 7 (NLA_U8)
153 * @DCB_PG_ATTR_BW_ID_MAX: highest attribute number currently defined
154 * @DCB_PG_ATTR_BW_ID_ALL: apply to all priority groups (NLA_FLAG)
155 *
156 */
157enum dcbnl_pg_attrs {
158 DCB_PG_ATTR_UNDEFINED,
159
160 DCB_PG_ATTR_TC_0,
161 DCB_PG_ATTR_TC_1,
162 DCB_PG_ATTR_TC_2,
163 DCB_PG_ATTR_TC_3,
164 DCB_PG_ATTR_TC_4,
165 DCB_PG_ATTR_TC_5,
166 DCB_PG_ATTR_TC_6,
167 DCB_PG_ATTR_TC_7,
168 DCB_PG_ATTR_TC_MAX,
169 DCB_PG_ATTR_TC_ALL,
170
171 DCB_PG_ATTR_BW_ID_0,
172 DCB_PG_ATTR_BW_ID_1,
173 DCB_PG_ATTR_BW_ID_2,
174 DCB_PG_ATTR_BW_ID_3,
175 DCB_PG_ATTR_BW_ID_4,
176 DCB_PG_ATTR_BW_ID_5,
177 DCB_PG_ATTR_BW_ID_6,
178 DCB_PG_ATTR_BW_ID_7,
179 DCB_PG_ATTR_BW_ID_MAX,
180 DCB_PG_ATTR_BW_ID_ALL,
181
182 __DCB_PG_ATTR_ENUM_MAX,
183 DCB_PG_ATTR_MAX = __DCB_PG_ATTR_ENUM_MAX - 1,
184};
185
186/**
187 * enum dcbnl_tc_attrs - DCB Traffic Class attributes
188 *
189 * @DCB_TC_ATTR_PARAM_UNDEFINED: unspecified attribute to catch errors
190 * @DCB_TC_ATTR_PARAM_PGID: (NLA_U8) Priority group the traffic class belongs to
191 * Valid values are: 0-7
192 * @DCB_TC_ATTR_PARAM_UP_MAPPING: (NLA_U8) Traffic class to user priority map
193 * Some devices may not support changing the
194 * user priority map of a TC.
195 * @DCB_TC_ATTR_PARAM_STRICT_PRIO: (NLA_U8) Strict priority setting
196 * 0 - none
197 * 1 - group strict
198 * 2 - link strict
199 * @DCB_TC_ATTR_PARAM_BW_PCT: optional - (NLA_U8) If supported by the device and
200 * not configured to use link strict priority,
201 * this is the percentage of bandwidth of the
202 * priority group this traffic class belongs to
203 * @DCB_TC_ATTR_PARAM_ALL: (NLA_FLAG) all traffic class parameters
204 *
205 */
206enum dcbnl_tc_attrs {
207 DCB_TC_ATTR_PARAM_UNDEFINED,
208
209 DCB_TC_ATTR_PARAM_PGID,
210 DCB_TC_ATTR_PARAM_UP_MAPPING,
211 DCB_TC_ATTR_PARAM_STRICT_PRIO,
212 DCB_TC_ATTR_PARAM_BW_PCT,
213 DCB_TC_ATTR_PARAM_ALL,
214
215 __DCB_TC_ATTR_PARAM_ENUM_MAX,
216 DCB_TC_ATTR_PARAM_MAX = __DCB_TC_ATTR_PARAM_ENUM_MAX - 1,
217};
218
219/**
220 * enum dcb_general_attr_values - general DCB attribute values
221 *
222 * @DCB_ATTR_UNDEFINED: value used to indicate an attribute is not supported
223 *
224 */
225enum dcb_general_attr_values {
226 DCB_ATTR_VALUE_UNDEFINED = 0xff
227};
228
229
230#endif /* __LINUX_DCBNL_H__ */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index d8fb23679ee3..6095af572dfd 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -43,6 +43,9 @@
43 43
44#include <net/net_namespace.h> 44#include <net/net_namespace.h>
45#include <net/dsa.h> 45#include <net/dsa.h>
46#ifdef CONFIG_DCBNL
47#include <net/dcbnl.h>
48#endif
46 49
47struct vlan_group; 50struct vlan_group;
48struct ethtool_ops; 51struct ethtool_ops;
@@ -843,6 +846,11 @@ struct net_device
843#define GSO_MAX_SIZE 65536 846#define GSO_MAX_SIZE 65536
844 unsigned int gso_max_size; 847 unsigned int gso_max_size;
845 848
849#ifdef CONFIG_DCBNL
850 /* Data Center Bridging netlink ops */
851 struct dcbnl_rtnl_ops *dcbnl_ops;
852#endif
853
846#ifdef CONFIG_COMPAT_NET_DEV_OPS 854#ifdef CONFIG_COMPAT_NET_DEV_OPS
847 struct { 855 struct {
848 int (*init)(struct net_device *dev); 856 int (*init)(struct net_device *dev);
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 2b3d51c6ec9c..e88f7058b3a1 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -107,6 +107,11 @@ enum {
107 RTM_GETADDRLABEL, 107 RTM_GETADDRLABEL,
108#define RTM_GETADDRLABEL RTM_GETADDRLABEL 108#define RTM_GETADDRLABEL RTM_GETADDRLABEL
109 109
110 RTM_GETDCB = 78,
111#define RTM_GETDCB RTM_GETDCB
112 RTM_SETDCB,
113#define RTM_SETDCB RTM_SETDCB
114
110 __RTM_MAX, 115 __RTM_MAX,
111#define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1) 116#define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1)
112}; 117};
diff --git a/include/net/dcbnl.h b/include/net/dcbnl.h
new file mode 100644
index 000000000000..0ef0c5a46d8b
--- /dev/null
+++ b/include/net/dcbnl.h
@@ -0,0 +1,44 @@
1/*
2 * Copyright (c) 2008, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Author: Lucy Liu <lucy.liu@intel.com>
18 */
19
20#ifndef __NET_DCBNL_H__
21#define __NET_DCBNL_H__
22
23/*
24 * Ops struct for the netlink callbacks. Used by DCB-enabled drivers through
25 * the netdevice struct.
26 */
27struct dcbnl_rtnl_ops {
28 u8 (*getstate)(struct net_device *);
29 void (*setstate)(struct net_device *, u8);
30 void (*getpermhwaddr)(struct net_device *, u8 *);
31 void (*setpgtccfgtx)(struct net_device *, int, u8, u8, u8, u8);
32 void (*setpgbwgcfgtx)(struct net_device *, int, u8);
33 void (*setpgtccfgrx)(struct net_device *, int, u8, u8, u8, u8);
34 void (*setpgbwgcfgrx)(struct net_device *, int, u8);
35 void (*getpgtccfgtx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *);
36 void (*getpgbwgcfgtx)(struct net_device *, int, u8 *);
37 void (*getpgtccfgrx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *);
38 void (*getpgbwgcfgrx)(struct net_device *, int, u8 *);
39 void (*setpfccfg)(struct net_device *, int, u8);
40 void (*getpfccfg)(struct net_device *, int, u8 *);
41 u8 (*setall)(struct net_device *);
42};
43
44#endif /* __NET_DCBNL_H__ */
diff --git a/net/Kconfig b/net/Kconfig
index 4e2e40ba8ba6..c7d01c3a23c5 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -194,6 +194,7 @@ source "net/lapb/Kconfig"
194source "net/econet/Kconfig" 194source "net/econet/Kconfig"
195source "net/wanrouter/Kconfig" 195source "net/wanrouter/Kconfig"
196source "net/sched/Kconfig" 196source "net/sched/Kconfig"
197source "net/dcb/Kconfig"
197 198
198menu "Network testing" 199menu "Network testing"
199 200
diff --git a/net/Makefile b/net/Makefile
index 27d1f10dc0e0..83b064651f1d 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -57,6 +57,9 @@ obj-$(CONFIG_NETLABEL) += netlabel/
57obj-$(CONFIG_IUCV) += iucv/ 57obj-$(CONFIG_IUCV) += iucv/
58obj-$(CONFIG_RFKILL) += rfkill/ 58obj-$(CONFIG_RFKILL) += rfkill/
59obj-$(CONFIG_NET_9P) += 9p/ 59obj-$(CONFIG_NET_9P) += 9p/
60ifeq ($(CONFIG_DCBNL),y)
61obj-$(CONFIG_DCB) += dcb/
62endif
60 63
61ifeq ($(CONFIG_NET),y) 64ifeq ($(CONFIG_NET),y)
62obj-$(CONFIG_SYSCTL) += sysctl_net.o 65obj-$(CONFIG_SYSCTL) += sysctl_net.o
diff --git a/net/dcb/Kconfig b/net/dcb/Kconfig
new file mode 100644
index 000000000000..bdf38802d339
--- /dev/null
+++ b/net/dcb/Kconfig
@@ -0,0 +1,12 @@
1config DCB
2 tristate "Data Center Bridging support"
3
4config DCBNL
5 bool "Data Center Bridging netlink interface support"
6 depends on DCB
7 default n
8 ---help---
9 This option turns on the netlink interface
10 (dcbnl) for Data Center Bridging capable devices.
11
12 If unsure, say N.
diff --git a/net/dcb/Makefile b/net/dcb/Makefile
new file mode 100644
index 000000000000..9930f4cde818
--- /dev/null
+++ b/net/dcb/Makefile
@@ -0,0 +1 @@
obj-$(CONFIG_DCB) += dcbnl.o
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
new file mode 100644
index 000000000000..516e8be83d72
--- /dev/null
+++ b/net/dcb/dcbnl.c
@@ -0,0 +1,704 @@
1/*
2 * Copyright (c) 2008, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Author: Lucy Liu <lucy.liu@intel.com>
18 */
19
20#include <linux/netdevice.h>
21#include <linux/netlink.h>
22#include <net/netlink.h>
23#include <net/rtnetlink.h>
24#include <linux/dcbnl.h>
25#include <linux/rtnetlink.h>
26#include <net/sock.h>
27
28/**
29 * Data Center Bridging (DCB) is a collection of Ethernet enhancements
30 * intended to allow network traffic with differing requirements
31 * (highly reliable, no drops vs. best effort vs. low latency) to operate
32 * and co-exist on Ethernet. Current DCB features are:
33 *
34 * Enhanced Transmission Selection (aka Priority Grouping [PG]) - provides a
35 * framework for assigning bandwidth guarantees to traffic classes.
36 *
37 * Priority-based Flow Control (PFC) - provides a flow control mechanism which
38 * can work independently for each 802.1p priority.
39 *
40 * Congestion Notification - provides a mechanism for end-to-end congestion
41 * control for protocols which do not have built-in congestion management.
42 *
43 * More information about the emerging standards for these Ethernet features
44 * can be found at: http://www.ieee802.org/1/pages/dcbridges.html
45 *
46 * This file implements an rtnetlink interface to allow configuration of DCB
47 * features for capable devices.
48 */
49
50MODULE_AUTHOR("Lucy Liu, <lucy.liu@intel.com>");
51MODULE_DESCRIPTION("Data Center Bridging generic netlink interface");
52MODULE_LICENSE("GPL");
53
54/**************** DCB attribute policies *************************************/
55
56/* DCB netlink attributes policy */
57static struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = {
58 [DCB_ATTR_IFNAME] = {.type = NLA_STRING, .len = IFNAMSIZ - 1},
59 [DCB_ATTR_STATE] = {.type = NLA_U8},
60 [DCB_ATTR_PFC_CFG] = {.type = NLA_NESTED},
61 [DCB_ATTR_PG_CFG] = {.type = NLA_NESTED},
62 [DCB_ATTR_SET_ALL] = {.type = NLA_U8},
63 [DCB_ATTR_PERM_HWADDR] = {.type = NLA_FLAG},
64};
65
66/* DCB priority flow control to User Priority nested attributes */
67static struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = {
68 [DCB_PFC_UP_ATTR_0] = {.type = NLA_U8},
69 [DCB_PFC_UP_ATTR_1] = {.type = NLA_U8},
70 [DCB_PFC_UP_ATTR_2] = {.type = NLA_U8},
71 [DCB_PFC_UP_ATTR_3] = {.type = NLA_U8},
72 [DCB_PFC_UP_ATTR_4] = {.type = NLA_U8},
73 [DCB_PFC_UP_ATTR_5] = {.type = NLA_U8},
74 [DCB_PFC_UP_ATTR_6] = {.type = NLA_U8},
75 [DCB_PFC_UP_ATTR_7] = {.type = NLA_U8},
76 [DCB_PFC_UP_ATTR_ALL] = {.type = NLA_FLAG},
77};
78
79/* DCB priority grouping nested attributes */
80static struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = {
81 [DCB_PG_ATTR_TC_0] = {.type = NLA_NESTED},
82 [DCB_PG_ATTR_TC_1] = {.type = NLA_NESTED},
83 [DCB_PG_ATTR_TC_2] = {.type = NLA_NESTED},
84 [DCB_PG_ATTR_TC_3] = {.type = NLA_NESTED},
85 [DCB_PG_ATTR_TC_4] = {.type = NLA_NESTED},
86 [DCB_PG_ATTR_TC_5] = {.type = NLA_NESTED},
87 [DCB_PG_ATTR_TC_6] = {.type = NLA_NESTED},
88 [DCB_PG_ATTR_TC_7] = {.type = NLA_NESTED},
89 [DCB_PG_ATTR_TC_ALL] = {.type = NLA_NESTED},
90 [DCB_PG_ATTR_BW_ID_0] = {.type = NLA_U8},
91 [DCB_PG_ATTR_BW_ID_1] = {.type = NLA_U8},
92 [DCB_PG_ATTR_BW_ID_2] = {.type = NLA_U8},
93 [DCB_PG_ATTR_BW_ID_3] = {.type = NLA_U8},
94 [DCB_PG_ATTR_BW_ID_4] = {.type = NLA_U8},
95 [DCB_PG_ATTR_BW_ID_5] = {.type = NLA_U8},
96 [DCB_PG_ATTR_BW_ID_6] = {.type = NLA_U8},
97 [DCB_PG_ATTR_BW_ID_7] = {.type = NLA_U8},
98 [DCB_PG_ATTR_BW_ID_ALL] = {.type = NLA_FLAG},
99};
100
101/* DCB traffic class nested attributes. */
102static struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = {
103 [DCB_TC_ATTR_PARAM_PGID] = {.type = NLA_U8},
104 [DCB_TC_ATTR_PARAM_UP_MAPPING] = {.type = NLA_U8},
105 [DCB_TC_ATTR_PARAM_STRICT_PRIO] = {.type = NLA_U8},
106 [DCB_TC_ATTR_PARAM_BW_PCT] = {.type = NLA_U8},
107 [DCB_TC_ATTR_PARAM_ALL] = {.type = NLA_FLAG},
108};
109
110
111/* standard netlink reply call */
112static int dcbnl_reply(u8 value, u8 event, u8 cmd, u8 attr, u32 pid,
113 u32 seq, u16 flags)
114{
115 struct sk_buff *dcbnl_skb;
116 struct dcbmsg *dcb;
117 struct nlmsghdr *nlh;
118 int ret = -EINVAL;
119
120 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
121 if (!dcbnl_skb)
122 return ret;
123
124 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, event, sizeof(*dcb), flags);
125
126 dcb = NLMSG_DATA(nlh);
127 dcb->dcb_family = AF_UNSPEC;
128 dcb->cmd = cmd;
129 dcb->dcb_pad = 0;
130
131 ret = nla_put_u8(dcbnl_skb, attr, value);
132 if (ret)
133 goto err;
134
135 /* end the message, assign the nlmsg_len. */
136 nlmsg_end(dcbnl_skb, nlh);
137 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
138 if (ret)
139 goto err;
140
141 return 0;
142nlmsg_failure:
143err:
144 kfree(dcbnl_skb);
145 return ret;
146}
147
148static int dcbnl_getstate(struct net_device *netdev, struct nlattr **tb,
149 u32 pid, u32 seq, u16 flags)
150{
151 int ret = -EINVAL;
152
153 /* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */
154 if (!netdev->dcbnl_ops->getstate)
155 return ret;
156
157 ret = dcbnl_reply(netdev->dcbnl_ops->getstate(netdev), RTM_GETDCB,
158 DCB_CMD_GSTATE, DCB_ATTR_STATE, pid, seq, flags);
159
160 return ret;
161}
162
163static int dcbnl_getpfccfg(struct net_device *netdev, struct nlattr **tb,
164 u32 pid, u32 seq, u16 flags)
165{
166 struct sk_buff *dcbnl_skb;
167 struct nlmsghdr *nlh;
168 struct dcbmsg *dcb;
169 struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1], *nest;
170 u8 value;
171 int ret = -EINVAL;
172 int i;
173 int getall = 0;
174
175 if (!tb[DCB_ATTR_PFC_CFG] || !netdev->dcbnl_ops->getpfccfg)
176 return ret;
177
178 ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
179 tb[DCB_ATTR_PFC_CFG],
180 dcbnl_pfc_up_nest);
181 if (ret)
182 goto err_out;
183
184 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
185 if (!dcbnl_skb)
186 goto err_out;
187
188 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
189
190 dcb = NLMSG_DATA(nlh);
191 dcb->dcb_family = AF_UNSPEC;
192 dcb->cmd = DCB_CMD_PFC_GCFG;
193
194 nest = nla_nest_start(dcbnl_skb, DCB_ATTR_PFC_CFG);
195 if (!nest)
196 goto err;
197
198 if (data[DCB_PFC_UP_ATTR_ALL])
199 getall = 1;
200
201 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
202 if (!getall && !data[i])
203 continue;
204
205 netdev->dcbnl_ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0,
206 &value);
207 ret = nla_put_u8(dcbnl_skb, i, value);
208
209 if (ret) {
210 nla_nest_cancel(dcbnl_skb, nest);
211 goto err;
212 }
213 }
214 nla_nest_end(dcbnl_skb, nest);
215
216 nlmsg_end(dcbnl_skb, nlh);
217
218 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
219 if (ret)
220 goto err;
221
222 return 0;
223nlmsg_failure:
224err:
225 kfree(dcbnl_skb);
226err_out:
227 return -EINVAL;
228}
229
230static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlattr **tb,
231 u32 pid, u32 seq, u16 flags)
232{
233 struct sk_buff *dcbnl_skb;
234 struct nlmsghdr *nlh;
235 struct dcbmsg *dcb;
236 u8 perm_addr[MAX_ADDR_LEN];
237 int ret = -EINVAL;
238
239 if (!netdev->dcbnl_ops->getpermhwaddr)
240 return ret;
241
242 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
243 if (!dcbnl_skb)
244 goto err_out;
245
246 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
247
248 dcb = NLMSG_DATA(nlh);
249 dcb->dcb_family = AF_UNSPEC;
250 dcb->cmd = DCB_CMD_GPERM_HWADDR;
251
252 netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr);
253
254 ret = nla_put(dcbnl_skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr),
255 perm_addr);
256
257 nlmsg_end(dcbnl_skb, nlh);
258
259 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
260 if (ret)
261 goto err;
262
263 return 0;
264
265nlmsg_failure:
266err:
267 kfree(dcbnl_skb);
268err_out:
269 return -EINVAL;
270}
271
272static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlattr **tb,
273 u32 pid, u32 seq, u16 flags, int dir)
274{
275 struct sk_buff *dcbnl_skb;
276 struct nlmsghdr *nlh;
277 struct dcbmsg *dcb;
278 struct nlattr *pg_nest, *param_nest, *data;
279 struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
280 struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
281 u8 prio, pgid, tc_pct, up_map;
282 int ret = -EINVAL;
283 int getall = 0;
284 int i;
285
286 if (!tb[DCB_ATTR_PG_CFG] ||
287 !netdev->dcbnl_ops->getpgtccfgtx ||
288 !netdev->dcbnl_ops->getpgtccfgrx ||
289 !netdev->dcbnl_ops->getpgbwgcfgtx ||
290 !netdev->dcbnl_ops->getpgbwgcfgrx)
291 return ret;
292
293 ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX,
294 tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest);
295
296 if (ret)
297 goto err_out;
298
299 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
300 if (!dcbnl_skb)
301 goto err_out;
302
303 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
304
305 dcb = NLMSG_DATA(nlh);
306 dcb->dcb_family = AF_UNSPEC;
307 dcb->cmd = (dir) ? DCB_CMD_PGRX_GCFG : DCB_CMD_PGTX_GCFG;
308
309 pg_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_PG_CFG);
310 if (!pg_nest)
311 goto err;
312
313 if (pg_tb[DCB_PG_ATTR_TC_ALL])
314 getall = 1;
315
316 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
317 if (!getall && !pg_tb[i])
318 continue;
319
320 if (pg_tb[DCB_PG_ATTR_TC_ALL])
321 data = pg_tb[DCB_PG_ATTR_TC_ALL];
322 else
323 data = pg_tb[i];
324 ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX,
325 data, dcbnl_tc_param_nest);
326 if (ret)
327 goto err_pg;
328
329 param_nest = nla_nest_start(dcbnl_skb, i);
330 if (!param_nest)
331 goto err_pg;
332
333 pgid = DCB_ATTR_VALUE_UNDEFINED;
334 prio = DCB_ATTR_VALUE_UNDEFINED;
335 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
336 up_map = DCB_ATTR_VALUE_UNDEFINED;
337
338 if (dir) {
339 /* Rx */
340 netdev->dcbnl_ops->getpgtccfgrx(netdev,
341 i - DCB_PG_ATTR_TC_0, &prio,
342 &pgid, &tc_pct, &up_map);
343 } else {
344 /* Tx */
345 netdev->dcbnl_ops->getpgtccfgtx(netdev,
346 i - DCB_PG_ATTR_TC_0, &prio,
347 &pgid, &tc_pct, &up_map);
348 }
349
350 if (param_tb[DCB_TC_ATTR_PARAM_PGID] ||
351 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
352 ret = nla_put_u8(dcbnl_skb,
353 DCB_TC_ATTR_PARAM_PGID, pgid);
354 if (ret)
355 goto err_param;
356 }
357 if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING] ||
358 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
359 ret = nla_put_u8(dcbnl_skb,
360 DCB_TC_ATTR_PARAM_UP_MAPPING, up_map);
361 if (ret)
362 goto err_param;
363 }
364 if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO] ||
365 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
366 ret = nla_put_u8(dcbnl_skb,
367 DCB_TC_ATTR_PARAM_STRICT_PRIO, prio);
368 if (ret)
369 goto err_param;
370 }
371 if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT] ||
372 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
373 ret = nla_put_u8(dcbnl_skb, DCB_TC_ATTR_PARAM_BW_PCT,
374 tc_pct);
375 if (ret)
376 goto err_param;
377 }
378 nla_nest_end(dcbnl_skb, param_nest);
379 }
380
381 if (pg_tb[DCB_PG_ATTR_BW_ID_ALL])
382 getall = 1;
383 else
384 getall = 0;
385
386 for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
387 if (!getall && !pg_tb[i])
388 continue;
389
390 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
391
392 if (dir) {
393 /* Rx */
394 netdev->dcbnl_ops->getpgbwgcfgrx(netdev,
395 i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
396 } else {
397 /* Tx */
398 netdev->dcbnl_ops->getpgbwgcfgtx(netdev,
399 i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
400 }
401 ret = nla_put_u8(dcbnl_skb, i, tc_pct);
402
403 if (ret)
404 goto err_pg;
405 }
406
407 nla_nest_end(dcbnl_skb, pg_nest);
408
409 nlmsg_end(dcbnl_skb, nlh);
410
411 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
412 if (ret)
413 goto err;
414
415 return 0;
416
417err_param:
418 nla_nest_cancel(dcbnl_skb, param_nest);
419err_pg:
420 nla_nest_cancel(dcbnl_skb, pg_nest);
421nlmsg_failure:
422err:
423 kfree(dcbnl_skb);
424err_out:
425 ret = -EINVAL;
426 return ret;
427}
428
429static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlattr **tb,
430 u32 pid, u32 seq, u16 flags)
431{
432 return __dcbnl_pg_getcfg(netdev, tb, pid, seq, flags, 0);
433}
434
435static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlattr **tb,
436 u32 pid, u32 seq, u16 flags)
437{
438 return __dcbnl_pg_getcfg(netdev, tb, pid, seq, flags, 1);
439}
440
441static int dcbnl_setstate(struct net_device *netdev, struct nlattr **tb,
442 u32 pid, u32 seq, u16 flags)
443{
444 int ret = -EINVAL;
445 u8 value;
446
447 if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->setstate)
448 return ret;
449
450 value = nla_get_u8(tb[DCB_ATTR_STATE]);
451
452 netdev->dcbnl_ops->setstate(netdev, value);
453
454 ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_SSTATE, DCB_ATTR_STATE,
455 pid, seq, flags);
456
457 return ret;
458}
459
460static int dcbnl_setpfccfg(struct net_device *netdev, struct nlattr **tb,
461 u32 pid, u32 seq, u16 flags)
462{
463 struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1];
464 int i;
465 int ret = -EINVAL;
466 u8 value;
467
468 if (!tb[DCB_ATTR_PFC_CFG] || !netdev->dcbnl_ops->setpfccfg)
469 return ret;
470
471 ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
472 tb[DCB_ATTR_PFC_CFG],
473 dcbnl_pfc_up_nest);
474 if (ret)
475 goto err;
476
477 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
478 if (data[i] == NULL)
479 continue;
480 value = nla_get_u8(data[i]);
481 netdev->dcbnl_ops->setpfccfg(netdev,
482 data[i]->nla_type - DCB_PFC_UP_ATTR_0, value);
483 }
484
485 ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_PFC_SCFG, DCB_ATTR_PFC_CFG,
486 pid, seq, flags);
487err:
488 return ret;
489}
490
491static int dcbnl_setall(struct net_device *netdev, struct nlattr **tb,
492 u32 pid, u32 seq, u16 flags)
493{
494 int ret = -EINVAL;
495
496 if (!tb[DCB_ATTR_SET_ALL] || !netdev->dcbnl_ops->setall)
497 return ret;
498
499 ret = dcbnl_reply(netdev->dcbnl_ops->setall(netdev), RTM_SETDCB,
500 DCB_CMD_SET_ALL, DCB_ATTR_SET_ALL, pid, seq, flags);
501
502 return ret;
503}
504
505static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlattr **tb,
506 u32 pid, u32 seq, u16 flags, int dir)
507{
508 struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
509 struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
510 int ret = -EINVAL;
511 int i;
512 u8 pgid;
513 u8 up_map;
514 u8 prio;
515 u8 tc_pct;
516
517 if (!tb[DCB_ATTR_PG_CFG] ||
518 !netdev->dcbnl_ops->setpgtccfgtx ||
519 !netdev->dcbnl_ops->setpgtccfgrx ||
520 !netdev->dcbnl_ops->setpgbwgcfgtx ||
521 !netdev->dcbnl_ops->setpgbwgcfgrx)
522 return ret;
523
524 ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX,
525 tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest);
526 if (ret)
527 goto err;
528
529 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
530 if (!pg_tb[i])
531 continue;
532
533 ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX,
534 pg_tb[i], dcbnl_tc_param_nest);
535 if (ret)
536 goto err;
537
538 pgid = DCB_ATTR_VALUE_UNDEFINED;
539 prio = DCB_ATTR_VALUE_UNDEFINED;
540 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
541 up_map = DCB_ATTR_VALUE_UNDEFINED;
542
543 if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO])
544 prio =
545 nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]);
546
547 if (param_tb[DCB_TC_ATTR_PARAM_PGID])
548 pgid = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_PGID]);
549
550 if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT])
551 tc_pct = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_BW_PCT]);
552
553 if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING])
554 up_map =
555 nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]);
556
557 /* dir: Tx = 0, Rx = 1 */
558 if (dir) {
559 /* Rx */
560 netdev->dcbnl_ops->setpgtccfgrx(netdev,
561 i - DCB_PG_ATTR_TC_0,
562 prio, pgid, tc_pct, up_map);
563 } else {
564 /* Tx */
565 netdev->dcbnl_ops->setpgtccfgtx(netdev,
566 i - DCB_PG_ATTR_TC_0,
567 prio, pgid, tc_pct, up_map);
568 }
569 }
570
571 for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
572 if (!pg_tb[i])
573 continue;
574
575 tc_pct = nla_get_u8(pg_tb[i]);
576
577 /* dir: Tx = 0, Rx = 1 */
578 if (dir) {
579 /* Rx */
580 netdev->dcbnl_ops->setpgbwgcfgrx(netdev,
581 i - DCB_PG_ATTR_BW_ID_0, tc_pct);
582 } else {
583 /* Tx */
584 netdev->dcbnl_ops->setpgbwgcfgtx(netdev,
585 i - DCB_PG_ATTR_BW_ID_0, tc_pct);
586 }
587 }
588
589 ret = dcbnl_reply(0, RTM_SETDCB,
590 (dir ? DCB_CMD_PGRX_SCFG : DCB_CMD_PGTX_SCFG),
591 DCB_ATTR_PG_CFG, pid, seq, flags);
592
593err:
594 return ret;
595}
596
597static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlattr **tb,
598 u32 pid, u32 seq, u16 flags)
599{
600 return __dcbnl_pg_setcfg(netdev, tb, pid, seq, flags, 0);
601}
602
603static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlattr **tb,
604 u32 pid, u32 seq, u16 flags)
605{
606 return __dcbnl_pg_setcfg(netdev, tb, pid, seq, flags, 1);
607}
608
609static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
610{
611 struct net *net = sock_net(skb->sk);
612 struct net_device *netdev;
613 struct dcbmsg *dcb = (struct dcbmsg *)NLMSG_DATA(nlh);
614 struct nlattr *tb[DCB_ATTR_MAX + 1];
615 u32 pid = skb ? NETLINK_CB(skb).pid : 0;
616 int ret = -EINVAL;
617
618 if (net != &init_net)
619 return -EINVAL;
620
621 ret = nlmsg_parse(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX,
622 dcbnl_rtnl_policy);
623 if (ret < 0)
624 return ret;
625
626 if (!tb[DCB_ATTR_IFNAME])
627 return -EINVAL;
628
629 netdev = dev_get_by_name(&init_net, nla_data(tb[DCB_ATTR_IFNAME]));
630 if (!netdev)
631 return -EINVAL;
632
633 if (!netdev->dcbnl_ops)
634 goto errout;
635
636 switch (dcb->cmd) {
637 case DCB_CMD_GSTATE:
638 ret = dcbnl_getstate(netdev, tb, pid, nlh->nlmsg_seq,
639 nlh->nlmsg_flags);
640 goto out;
641 case DCB_CMD_PFC_GCFG:
642 ret = dcbnl_getpfccfg(netdev, tb, pid, nlh->nlmsg_seq,
643 nlh->nlmsg_flags);
644 goto out;
645 case DCB_CMD_GPERM_HWADDR:
646 ret = dcbnl_getperm_hwaddr(netdev, tb, pid, nlh->nlmsg_seq,
647 nlh->nlmsg_flags);
648 goto out;
649 case DCB_CMD_PGTX_GCFG:
650 ret = dcbnl_pgtx_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
651 nlh->nlmsg_flags);
652 goto out;
653 case DCB_CMD_PGRX_GCFG:
654 ret = dcbnl_pgrx_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
655 nlh->nlmsg_flags);
656 goto out;
657 case DCB_CMD_SSTATE:
658 ret = dcbnl_setstate(netdev, tb, pid, nlh->nlmsg_seq,
659 nlh->nlmsg_flags);
660 goto out;
661 case DCB_CMD_PFC_SCFG:
662 ret = dcbnl_setpfccfg(netdev, tb, pid, nlh->nlmsg_seq,
663 nlh->nlmsg_flags);
664 goto out;
665
666 case DCB_CMD_SET_ALL:
667 ret = dcbnl_setall(netdev, tb, pid, nlh->nlmsg_seq,
668 nlh->nlmsg_flags);
669 goto out;
670 case DCB_CMD_PGTX_SCFG:
671 ret = dcbnl_pgtx_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
672 nlh->nlmsg_flags);
673 goto out;
674 case DCB_CMD_PGRX_SCFG:
675 ret = dcbnl_pgrx_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
676 nlh->nlmsg_flags);
677 goto out;
678 default:
679 goto errout;
680 }
681errout:
682 ret = -EINVAL;
683out:
684 dev_put(netdev);
685 return ret;
686}
687
688static int __init dcbnl_init(void)
689{
690 rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL);
691 rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL);
692
693 return 0;
694}
695module_init(dcbnl_init);
696
697static void __exit dcbnl_exit(void)
698{
699 rtnl_unregister(PF_UNSPEC, RTM_GETDCB);
700 rtnl_unregister(PF_UNSPEC, RTM_SETDCB);
701}
702module_exit(dcbnl_exit);
703
704