aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
commitc71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch)
treeecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /drivers/net/ixgbe
parentea53c912f8a86a8567697115b6a0d8152beee5c8 (diff)
parent6a00f206debf8a5c8899055726ad127dbeeed098 (diff)
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts: litmus/sched_cedf.c
Diffstat (limited to 'drivers/net/ixgbe')
-rw-r--r--drivers/net/ixgbe/Makefile2
-rw-r--r--drivers/net/ixgbe/ixgbe.h295
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c219
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c1449
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c1692
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h29
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.c383
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.h36
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.c203
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.h40
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.c313
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.h50
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c481
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c1047
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c188
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.h9
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c5110
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.c48
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.h13
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c622
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.h12
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c286
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.h11
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h415
-rw-r--r--drivers/net/ixgbe/ixgbe_x540.c938
25 files changed, 8559 insertions, 5332 deletions
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index 8f81efb49169..7d7387fbdecd 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -34,7 +34,7 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
34 34
35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ 35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
36 ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ 36 ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
37 ixgbe_mbx.o 37 ixgbe_mbx.o ixgbe_x540.o
38 38
39ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ 39ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
40 ixgbe_dcb_82599.o ixgbe_dcb_nl.o 40 ixgbe_dcb_82599.o ixgbe_dcb_nl.o
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 9e15eb93860e..e467b20ed1f0 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -28,10 +28,13 @@
28#ifndef _IXGBE_H_ 28#ifndef _IXGBE_H_
29#define _IXGBE_H_ 29#define _IXGBE_H_
30 30
31#include <linux/bitops.h>
31#include <linux/types.h> 32#include <linux/types.h>
32#include <linux/pci.h> 33#include <linux/pci.h>
33#include <linux/netdevice.h> 34#include <linux/netdevice.h>
35#include <linux/cpumask.h>
34#include <linux/aer.h> 36#include <linux/aer.h>
37#include <linux/if_vlan.h>
35 38
36#include "ixgbe_type.h" 39#include "ixgbe_type.h"
37#include "ixgbe_common.h" 40#include "ixgbe_common.h"
@@ -58,10 +61,8 @@
58#define IXGBE_MIN_RXD 64 61#define IXGBE_MIN_RXD 64
59 62
60/* flow control */ 63/* flow control */
61#define IXGBE_DEFAULT_FCRTL 0x10000
62#define IXGBE_MIN_FCRTL 0x40 64#define IXGBE_MIN_FCRTL 0x40
63#define IXGBE_MAX_FCRTL 0x7FF80 65#define IXGBE_MAX_FCRTL 0x7FF80
64#define IXGBE_DEFAULT_FCRTH 0x20000
65#define IXGBE_MIN_FCRTH 0x600 66#define IXGBE_MIN_FCRTH 0x600
66#define IXGBE_MAX_FCRTH 0x7FFF0 67#define IXGBE_MAX_FCRTH 0x7FFF0
67#define IXGBE_DEFAULT_FCPAUSE 0xFFFF 68#define IXGBE_DEFAULT_FCPAUSE 0xFFFF
@@ -69,15 +70,20 @@
69#define IXGBE_MAX_FCPAUSE 0xFFFF 70#define IXGBE_MAX_FCPAUSE 0xFFFF
70 71
71/* Supported Rx Buffer Sizes */ 72/* Supported Rx Buffer Sizes */
72#define IXGBE_RXBUFFER_64 64 /* Used for packet split */ 73#define IXGBE_RXBUFFER_512 512 /* Used for packet split */
73#define IXGBE_RXBUFFER_128 128 /* Used for packet split */
74#define IXGBE_RXBUFFER_256 256 /* Used for packet split */
75#define IXGBE_RXBUFFER_2048 2048 74#define IXGBE_RXBUFFER_2048 2048
76#define IXGBE_RXBUFFER_4096 4096 75#define IXGBE_RXBUFFER_4096 4096
77#define IXGBE_RXBUFFER_8192 8192 76#define IXGBE_RXBUFFER_8192 8192
78#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */ 77#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
79 78
80#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256 79/*
80 * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN mans we
81 * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
82 * this adds up to 512 bytes of extra data meaning the smallest allocation
83 * we could have is 1K.
84 * i.e. RXBUFFER_512 --> size-1024 slab
85 */
86#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_512
81 87
82#define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN) 88#define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
83 89
@@ -100,6 +106,7 @@
100#define IXGBE_MAX_VF_FUNCTIONS 64 106#define IXGBE_MAX_VF_FUNCTIONS 64
101#define IXGBE_MAX_VFTA_ENTRIES 128 107#define IXGBE_MAX_VFTA_ENTRIES 128
102#define MAX_EMULATION_MAC_ADDRS 16 108#define MAX_EMULATION_MAC_ADDRS 16
109#define IXGBE_MAX_PF_MACVLANS 15
103#define VMDQ_P(p) ((p) + adapter->num_vfs) 110#define VMDQ_P(p) ((p) + adapter->num_vfs)
104 111
105struct vf_data_storage { 112struct vf_data_storage {
@@ -112,6 +119,16 @@ struct vf_data_storage {
112 bool pf_set_mac; 119 bool pf_set_mac;
113 u16 pf_vlan; /* When set, guest VLAN config not allowed. */ 120 u16 pf_vlan; /* When set, guest VLAN config not allowed. */
114 u16 pf_qos; 121 u16 pf_qos;
122 u16 tx_rate;
123};
124
125struct vf_macvlans {
126 struct list_head l;
127 int vf;
128 int rar_entry;
129 bool free;
130 bool is_macvlan;
131 u8 vf_macvlan[ETH_ALEN];
115}; 132};
116 133
117/* wrapper around a pointer to a socket buffer, 134/* wrapper around a pointer to a socket buffer,
@@ -122,7 +139,9 @@ struct ixgbe_tx_buffer {
122 unsigned long time_stamp; 139 unsigned long time_stamp;
123 u16 length; 140 u16 length;
124 u16 next_to_watch; 141 u16 next_to_watch;
125 u16 mapped_as_page; 142 unsigned int bytecount;
143 u16 gso_segs;
144 u8 mapped_as_page;
126}; 145};
127 146
128struct ixgbe_rx_buffer { 147struct ixgbe_rx_buffer {
@@ -138,12 +157,56 @@ struct ixgbe_queue_stats {
138 u64 bytes; 157 u64 bytes;
139}; 158};
140 159
160struct ixgbe_tx_queue_stats {
161 u64 restart_queue;
162 u64 tx_busy;
163 u64 completed;
164 u64 tx_done_old;
165};
166
167struct ixgbe_rx_queue_stats {
168 u64 rsc_count;
169 u64 rsc_flush;
170 u64 non_eop_descs;
171 u64 alloc_rx_page_failed;
172 u64 alloc_rx_buff_failed;
173};
174
175enum ixbge_ring_state_t {
176 __IXGBE_TX_FDIR_INIT_DONE,
177 __IXGBE_TX_DETECT_HANG,
178 __IXGBE_HANG_CHECK_ARMED,
179 __IXGBE_RX_PS_ENABLED,
180 __IXGBE_RX_RSC_ENABLED,
181};
182
183#define ring_is_ps_enabled(ring) \
184 test_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
185#define set_ring_ps_enabled(ring) \
186 set_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
187#define clear_ring_ps_enabled(ring) \
188 clear_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
189#define check_for_tx_hang(ring) \
190 test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
191#define set_check_for_tx_hang(ring) \
192 set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
193#define clear_check_for_tx_hang(ring) \
194 clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
195#define ring_is_rsc_enabled(ring) \
196 test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
197#define set_ring_rsc_enabled(ring) \
198 set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
199#define clear_ring_rsc_enabled(ring) \
200 clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
141struct ixgbe_ring { 201struct ixgbe_ring {
142 void *desc; /* descriptor ring memory */ 202 void *desc; /* descriptor ring memory */
203 struct device *dev; /* device for DMA mapping */
204 struct net_device *netdev; /* netdev ring belongs to */
143 union { 205 union {
144 struct ixgbe_tx_buffer *tx_buffer_info; 206 struct ixgbe_tx_buffer *tx_buffer_info;
145 struct ixgbe_rx_buffer *rx_buffer_info; 207 struct ixgbe_rx_buffer *rx_buffer_info;
146 }; 208 };
209 unsigned long state;
147 u8 atr_sample_rate; 210 u8 atr_sample_rate;
148 u8 atr_count; 211 u8 atr_count;
149 u16 count; /* amount of descriptors */ 212 u16 count; /* amount of descriptors */
@@ -152,37 +215,31 @@ struct ixgbe_ring {
152 u16 next_to_clean; 215 u16 next_to_clean;
153 216
154 u8 queue_index; /* needed for multiqueue queue management */ 217 u8 queue_index; /* needed for multiqueue queue management */
155 218 u8 reg_idx; /* holds the special value that gets
156#define IXGBE_RING_RX_PS_ENABLED (u8)(1)
157 u8 flags; /* per ring feature flags */
158 u16 head;
159 u16 tail;
160
161 unsigned int total_bytes;
162 unsigned int total_packets;
163
164#ifdef CONFIG_IXGBE_DCA
165 /* cpu for tx queue */
166 int cpu;
167#endif
168
169 u16 work_limit; /* max work per interrupt */
170 u16 reg_idx; /* holds the special value that gets
171 * the hardware register offset 219 * the hardware register offset
172 * associated with this ring, which is 220 * associated with this ring, which is
173 * different for DCB and RSS modes 221 * different for DCB and RSS modes
174 */ 222 */
223 u8 dcb_tc;
224
225 u16 work_limit; /* max work per interrupt */
226
227 u8 __iomem *tail;
228
229 unsigned int total_bytes;
230 unsigned int total_packets;
175 231
176 struct ixgbe_queue_stats stats; 232 struct ixgbe_queue_stats stats;
177 unsigned long reinit_state; 233 struct u64_stats_sync syncp;
234 union {
235 struct ixgbe_tx_queue_stats tx_stats;
236 struct ixgbe_rx_queue_stats rx_stats;
237 };
178 int numa_node; 238 int numa_node;
179 u64 rsc_count; /* stat for coalesced packets */
180 u64 rsc_flush; /* stats for flushed packets */
181 u32 restart_queue; /* track tx queue restarts */
182 u32 non_eop_descs; /* track hardware descriptor chaining */
183
184 unsigned int size; /* length in bytes */ 239 unsigned int size; /* length in bytes */
185 dma_addr_t dma; /* phys. address of descriptor ring */ 240 dma_addr_t dma; /* phys. address of descriptor ring */
241 struct rcu_head rcu;
242 struct ixgbe_q_vector *q_vector; /* back-pointer to host q_vector */
186} ____cacheline_internodealigned_in_smp; 243} ____cacheline_internodealigned_in_smp;
187 244
188enum ixgbe_ring_f_enum { 245enum ixgbe_ring_f_enum {
@@ -198,7 +255,7 @@ enum ixgbe_ring_f_enum {
198 RING_F_ARRAY_SIZE /* must be last in enum set */ 255 RING_F_ARRAY_SIZE /* must be last in enum set */
199}; 256};
200 257
201#define IXGBE_MAX_DCB_INDICES 8 258#define IXGBE_MAX_DCB_INDICES 64
202#define IXGBE_MAX_RSS_INDICES 16 259#define IXGBE_MAX_RSS_INDICES 16
203#define IXGBE_MAX_VMDQ_INDICES 64 260#define IXGBE_MAX_VMDQ_INDICES 64
204#define IXGBE_MAX_FDIR_INDICES 64 261#define IXGBE_MAX_FDIR_INDICES 64
@@ -228,6 +285,9 @@ struct ixgbe_q_vector {
228 unsigned int v_idx; /* index of q_vector within array, also used for 285 unsigned int v_idx; /* index of q_vector within array, also used for
229 * finding the bit in EICR and friends that 286 * finding the bit in EICR and friends that
230 * represents the vector for this ring */ 287 * represents the vector for this ring */
288#ifdef CONFIG_IXGBE_DCA
289 int cpu; /* CPU for DCA */
290#endif
231 struct napi_struct napi; 291 struct napi_struct napi;
232 DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */ 292 DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */
233 DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */ 293 DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
@@ -236,6 +296,8 @@ struct ixgbe_q_vector {
236 u8 tx_itr; 296 u8 tx_itr;
237 u8 rx_itr; 297 u8 rx_itr;
238 u32 eitr; 298 u32 eitr;
299 cpumask_var_t affinity_mask;
300 char name[IFNAMSIZ + 9];
239}; 301};
240 302
241/* Helper macros to switch between ints/sec and what the register uses. 303/* Helper macros to switch between ints/sec and what the register uses.
@@ -251,11 +313,11 @@ struct ixgbe_q_vector {
251 (R)->next_to_clean - (R)->next_to_use - 1) 313 (R)->next_to_clean - (R)->next_to_use - 1)
252 314
253#define IXGBE_RX_DESC_ADV(R, i) \ 315#define IXGBE_RX_DESC_ADV(R, i) \
254 (&(((union ixgbe_adv_rx_desc *)((R).desc))[i])) 316 (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
255#define IXGBE_TX_DESC_ADV(R, i) \ 317#define IXGBE_TX_DESC_ADV(R, i) \
256 (&(((union ixgbe_adv_tx_desc *)((R).desc))[i])) 318 (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i]))
257#define IXGBE_TX_CTXTDESC_ADV(R, i) \ 319#define IXGBE_TX_CTXTDESC_ADV(R, i) \
258 (&(((struct ixgbe_adv_tx_context_desc *)((R).desc))[i])) 320 (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
259 321
260#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128 322#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128
261#ifdef IXGBE_FCOE 323#ifdef IXGBE_FCOE
@@ -279,15 +341,61 @@ struct ixgbe_q_vector {
279 341
280/* board specific private data structure */ 342/* board specific private data structure */
281struct ixgbe_adapter { 343struct ixgbe_adapter {
282 struct timer_list watchdog_timer; 344 unsigned long state;
283 struct vlan_group *vlgrp; 345
346 /* Some features need tri-state capability,
347 * thus the additional *_CAPABLE flags.
348 */
349 u32 flags;
350#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1)
351#define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 1)
352#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 2)
353#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 3)
354#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 4)
355#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 6)
356#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 7)
357#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 8)
358#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 9)
359#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 10)
360#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 11)
361#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 12)
362#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 13)
363#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 14)
364#define IXGBE_FLAG_RSS_ENABLED (u32)(1 << 16)
365#define IXGBE_FLAG_RSS_CAPABLE (u32)(1 << 17)
366#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 18)
367#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 19)
368#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 20)
369#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22)
370#define IXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 23)
371#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 24)
372#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 25)
373#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 26)
374#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 27)
375#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 28)
376#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 29)
377
378 u32 flags2;
379#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1)
380#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1)
381#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 2)
382#define IXGBE_FLAG2_TEMP_SENSOR_EVENT (u32)(1 << 3)
383#define IXGBE_FLAG2_SEARCH_FOR_SFP (u32)(1 << 4)
384#define IXGBE_FLAG2_SFP_NEEDS_RESET (u32)(1 << 5)
385#define IXGBE_FLAG2_RESET_REQUESTED (u32)(1 << 6)
386#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7)
387
388 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
284 u16 bd_number; 389 u16 bd_number;
285 struct work_struct reset_task;
286 struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; 390 struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
287 char name[MAX_MSIX_COUNT][IFNAMSIZ + 9]; 391
392 /* DCB parameters */
393 struct ieee_pfc *ixgbe_ieee_pfc;
394 struct ieee_ets *ixgbe_ieee_ets;
288 struct ixgbe_dcb_config dcb_cfg; 395 struct ixgbe_dcb_config dcb_cfg;
289 struct ixgbe_dcb_config temp_dcb_cfg; 396 struct ixgbe_dcb_config temp_dcb_cfg;
290 u8 dcb_set_bitmap; 397 u8 dcb_set_bitmap;
398 u8 dcbx_cap;
291 enum ixgbe_fc_mode last_lfc_mode; 399 enum ixgbe_fc_mode last_lfc_mode;
292 400
293 /* Interrupt Throttle Rate */ 401 /* Interrupt Throttle Rate */
@@ -321,43 +429,6 @@ struct ixgbe_adapter {
321 u32 alloc_rx_page_failed; 429 u32 alloc_rx_page_failed;
322 u32 alloc_rx_buff_failed; 430 u32 alloc_rx_buff_failed;
323 431
324 /* Some features need tri-state capability,
325 * thus the additional *_CAPABLE flags.
326 */
327 u32 flags;
328#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1)
329#define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 1)
330#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 2)
331#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 3)
332#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 4)
333#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 6)
334#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 7)
335#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 8)
336#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 9)
337#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 10)
338#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 11)
339#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 12)
340#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 13)
341#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 14)
342#define IXGBE_FLAG_RSS_ENABLED (u32)(1 << 16)
343#define IXGBE_FLAG_RSS_CAPABLE (u32)(1 << 17)
344#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 18)
345#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 19)
346#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 20)
347#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22)
348#define IXGBE_FLAG_IN_SFP_LINK_TASK (u32)(1 << 23)
349#define IXGBE_FLAG_IN_SFP_MOD_TASK (u32)(1 << 24)
350#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 25)
351#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 26)
352#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 27)
353#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 28)
354#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 29)
355#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 30)
356
357 u32 flags2;
358#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1)
359#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1)
360#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 2)
361/* default to trying for four seconds */ 432/* default to trying for four seconds */
362#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ) 433#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
363 434
@@ -378,7 +449,6 @@ struct ixgbe_adapter {
378 u32 rx_eitr_param; 449 u32 rx_eitr_param;
379 u32 tx_eitr_param; 450 u32 tx_eitr_param;
380 451
381 unsigned long state;
382 u64 tx_busy; 452 u64 tx_busy;
383 unsigned int tx_ring_count; 453 unsigned int tx_ring_count;
384 unsigned int rx_ring_count; 454 unsigned int rx_ring_count;
@@ -387,15 +457,12 @@ struct ixgbe_adapter {
387 bool link_up; 457 bool link_up;
388 unsigned long link_check_timeout; 458 unsigned long link_check_timeout;
389 459
390 struct work_struct watchdog_task; 460 struct work_struct service_task;
391 struct work_struct sfp_task; 461 struct timer_list service_timer;
392 struct timer_list sfp_timer;
393 struct work_struct multispeed_fiber_task;
394 struct work_struct sfp_config_module_task;
395 u32 fdir_pballoc; 462 u32 fdir_pballoc;
396 u32 atr_sample_rate; 463 u32 atr_sample_rate;
464 unsigned long fdir_overflow; /* number of times ATR was backed off */
397 spinlock_t fdir_perfect_lock; 465 spinlock_t fdir_perfect_lock;
398 struct work_struct fdir_reinit_task;
399#ifdef IXGBE_FCOE 466#ifdef IXGBE_FCOE
400 struct ixgbe_fcoe fcoe; 467 struct ixgbe_fcoe fcoe;
401#endif /* IXGBE_FCOE */ 468#endif /* IXGBE_FCOE */
@@ -405,30 +472,44 @@ struct ixgbe_adapter {
405 u16 eeprom_version; 472 u16 eeprom_version;
406 473
407 int node; 474 int node;
408 struct work_struct check_overtemp_task; 475 u32 led_reg;
409 u32 interrupt_event; 476 u32 interrupt_event;
477 char lsc_int_name[IFNAMSIZ + 9];
410 478
411 /* SR-IOV */ 479 /* SR-IOV */
412 DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS); 480 DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
413 unsigned int num_vfs; 481 unsigned int num_vfs;
414 struct vf_data_storage *vfinfo; 482 struct vf_data_storage *vfinfo;
483 int vf_rate_link_speed;
484 struct vf_macvlans vf_mvs;
485 struct vf_macvlans *mv_list;
486 bool antispoofing_enabled;
415}; 487};
416 488
417enum ixbge_state_t { 489enum ixbge_state_t {
418 __IXGBE_TESTING, 490 __IXGBE_TESTING,
419 __IXGBE_RESETTING, 491 __IXGBE_RESETTING,
420 __IXGBE_DOWN, 492 __IXGBE_DOWN,
421 __IXGBE_FDIR_INIT_DONE, 493 __IXGBE_SERVICE_SCHED,
422 __IXGBE_SFP_MODULE_NOT_FOUND 494 __IXGBE_IN_SFP_INIT,
495};
496
497struct ixgbe_rsc_cb {
498 dma_addr_t dma;
499 u16 skb_cnt;
500 bool delay_unmap;
423}; 501};
502#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
424 503
425enum ixgbe_boards { 504enum ixgbe_boards {
426 board_82598, 505 board_82598,
427 board_82599, 506 board_82599,
507 board_X540,
428}; 508};
429 509
430extern struct ixgbe_info ixgbe_82598_info; 510extern struct ixgbe_info ixgbe_82598_info;
431extern struct ixgbe_info ixgbe_82599_info; 511extern struct ixgbe_info ixgbe_82599_info;
512extern struct ixgbe_info ixgbe_X540_info;
432#ifdef CONFIG_IXGBE_DCB 513#ifdef CONFIG_IXGBE_DCB
433extern const struct dcbnl_rtnl_ops dcbnl_ops; 514extern const struct dcbnl_rtnl_ops dcbnl_ops;
434extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg, 515extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
@@ -444,40 +525,42 @@ extern void ixgbe_down(struct ixgbe_adapter *adapter);
444extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter); 525extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
445extern void ixgbe_reset(struct ixgbe_adapter *adapter); 526extern void ixgbe_reset(struct ixgbe_adapter *adapter);
446extern void ixgbe_set_ethtool_ops(struct net_device *netdev); 527extern void ixgbe_set_ethtool_ops(struct net_device *netdev);
447extern int ixgbe_setup_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); 528extern int ixgbe_setup_rx_resources(struct ixgbe_ring *);
448extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); 529extern int ixgbe_setup_tx_resources(struct ixgbe_ring *);
449extern void ixgbe_free_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); 530extern void ixgbe_free_rx_resources(struct ixgbe_ring *);
450extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); 531extern void ixgbe_free_tx_resources(struct ixgbe_ring *);
532extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
533extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
534extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
535 struct ixgbe_ring *);
451extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); 536extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
452extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); 537extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
453extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); 538extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
539extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *,
540 struct ixgbe_adapter *,
541 struct ixgbe_ring *);
542extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
543 struct ixgbe_tx_buffer *);
544extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
454extern void ixgbe_write_eitr(struct ixgbe_q_vector *); 545extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
455extern int ethtool_ioctl(struct ifreq *ifr); 546extern int ethtool_ioctl(struct ifreq *ifr);
456extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); 547extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
457extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc); 548extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc);
458extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc); 549extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc);
459extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, 550extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
460 struct ixgbe_atr_input *input, 551 union ixgbe_atr_hash_dword input,
552 union ixgbe_atr_hash_dword common,
461 u8 queue); 553 u8 queue);
462extern s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, 554extern s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
463 struct ixgbe_atr_input *input, 555 union ixgbe_atr_input *input,
464 struct ixgbe_atr_input_masks *input_masks, 556 struct ixgbe_atr_input_masks *input_masks,
465 u16 soft_id, u8 queue); 557 u16 soft_id, u8 queue);
466extern s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, 558extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
467 u16 vlan_id); 559 struct ixgbe_ring *ring);
468extern s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, 560extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
469 u32 src_addr); 561 struct ixgbe_ring *ring);
470extern s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input,
471 u32 dst_addr);
472extern s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input,
473 u16 src_port);
474extern s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input,
475 u16 dst_port);
476extern s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input,
477 u16 flex_byte);
478extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input,
479 u8 l4type);
480extern void ixgbe_set_rx_mode(struct net_device *netdev); 562extern void ixgbe_set_rx_mode(struct net_device *netdev);
563extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
481#ifdef IXGBE_FCOE 564#ifdef IXGBE_FCOE
482extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); 565extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
483extern int ixgbe_fso(struct ixgbe_adapter *adapter, 566extern int ixgbe_fso(struct ixgbe_adapter *adapter,
@@ -489,6 +572,8 @@ extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
489 struct sk_buff *skb); 572 struct sk_buff *skb);
490extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, 573extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
491 struct scatterlist *sgl, unsigned int sgc); 574 struct scatterlist *sgl, unsigned int sgc);
575extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
576 struct scatterlist *sgl, unsigned int sgc);
492extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid); 577extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
493extern int ixgbe_fcoe_enable(struct net_device *netdev); 578extern int ixgbe_fcoe_enable(struct net_device *netdev);
494extern int ixgbe_fcoe_disable(struct net_device *netdev); 579extern int ixgbe_fcoe_disable(struct net_device *netdev);
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index 9c02d6014cc4..8179e5060a18 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -37,10 +37,8 @@
37#define IXGBE_82598_RAR_ENTRIES 16 37#define IXGBE_82598_RAR_ENTRIES 16
38#define IXGBE_82598_MC_TBL_SIZE 128 38#define IXGBE_82598_MC_TBL_SIZE 128
39#define IXGBE_82598_VFT_TBL_SIZE 128 39#define IXGBE_82598_VFT_TBL_SIZE 128
40#define IXGBE_82598_RX_PB_SIZE 512
40 41
41static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
42 ixgbe_link_speed *speed,
43 bool *autoneg);
44static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, 42static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
45 ixgbe_link_speed speed, 43 ixgbe_link_speed speed,
46 bool autoneg, 44 bool autoneg,
@@ -156,11 +154,12 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
156 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { 154 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
157 mac->ops.setup_link = &ixgbe_setup_copper_link_82598; 155 mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
158 mac->ops.get_link_capabilities = 156 mac->ops.get_link_capabilities =
159 &ixgbe_get_copper_link_capabilities_82598; 157 &ixgbe_get_copper_link_capabilities_generic;
160 } 158 }
161 159
162 switch (hw->phy.type) { 160 switch (hw->phy.type) {
163 case ixgbe_phy_tn: 161 case ixgbe_phy_tn:
162 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
164 phy->ops.check_link = &ixgbe_check_phy_link_tnx; 163 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
165 phy->ops.get_firmware_version = 164 phy->ops.get_firmware_version =
166 &ixgbe_get_phy_firmware_version_tnx; 165 &ixgbe_get_phy_firmware_version_tnx;
@@ -199,14 +198,35 @@ out:
199 * @hw: pointer to hardware structure 198 * @hw: pointer to hardware structure
200 * 199 *
201 * Starts the hardware using the generic start_hw function. 200 * Starts the hardware using the generic start_hw function.
202 * Then set pcie completion timeout 201 * Disables relaxed ordering Then set pcie completion timeout
202 *
203 **/ 203 **/
204static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) 204static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
205{ 205{
206 u32 regval;
207 u32 i;
206 s32 ret_val = 0; 208 s32 ret_val = 0;
207 209
208 ret_val = ixgbe_start_hw_generic(hw); 210 ret_val = ixgbe_start_hw_generic(hw);
209 211
212 /* Disable relaxed ordering */
213 for (i = 0; ((i < hw->mac.max_tx_queues) &&
214 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
215 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
216 regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
217 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
218 }
219
220 for (i = 0; ((i < hw->mac.max_rx_queues) &&
221 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
222 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
223 regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
224 IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
225 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
226 }
227
228 hw->mac.rx_pb_size = IXGBE_82598_RX_PB_SIZE;
229
210 /* set the completion timeout for interface */ 230 /* set the completion timeout for interface */
211 if (ret_val == 0) 231 if (ret_val == 0)
212 ixgbe_set_pcie_completion_timeout(hw); 232 ixgbe_set_pcie_completion_timeout(hw);
@@ -274,37 +294,6 @@ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
274} 294}
275 295
276/** 296/**
277 * ixgbe_get_copper_link_capabilities_82598 - Determines link capabilities
278 * @hw: pointer to hardware structure
279 * @speed: pointer to link speed
280 * @autoneg: boolean auto-negotiation value
281 *
282 * Determines the link capabilities by reading the AUTOC register.
283 **/
284static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
285 ixgbe_link_speed *speed,
286 bool *autoneg)
287{
288 s32 status = IXGBE_ERR_LINK_SETUP;
289 u16 speed_ability;
290
291 *speed = 0;
292 *autoneg = true;
293
294 status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
295 &speed_ability);
296
297 if (status == 0) {
298 if (speed_ability & MDIO_SPEED_10G)
299 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
300 if (speed_ability & MDIO_PMA_SPEED_1000)
301 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
302 }
303
304 return status;
305}
306
307/**
308 * ixgbe_get_media_type_82598 - Determines media type 297 * ixgbe_get_media_type_82598 - Determines media type
309 * @hw: pointer to hardware structure 298 * @hw: pointer to hardware structure
310 * 299 *
@@ -314,10 +303,22 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
314{ 303{
315 enum ixgbe_media_type media_type; 304 enum ixgbe_media_type media_type;
316 305
306 /* Detect if there is a copper PHY attached. */
307 switch (hw->phy.type) {
308 case ixgbe_phy_cu_unknown:
309 case ixgbe_phy_tn:
310 case ixgbe_phy_aq:
311 media_type = ixgbe_media_type_copper;
312 goto out;
313 default:
314 break;
315 }
316
317 /* Media type for I82598 is based on device ID */ 317 /* Media type for I82598 is based on device ID */
318 switch (hw->device_id) { 318 switch (hw->device_id) {
319 case IXGBE_DEV_ID_82598: 319 case IXGBE_DEV_ID_82598:
320 case IXGBE_DEV_ID_82598_BX: 320 case IXGBE_DEV_ID_82598_BX:
321 /* Default device ID is mezzanine card KX/KX4 */
321 media_type = ixgbe_media_type_backplane; 322 media_type = ixgbe_media_type_backplane;
322 break; 323 break;
323 case IXGBE_DEV_ID_82598AF_DUAL_PORT: 324 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
@@ -340,7 +341,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
340 media_type = ixgbe_media_type_unknown; 341 media_type = ixgbe_media_type_unknown;
341 break; 342 break;
342 } 343 }
343 344out:
344 return media_type; 345 return media_type;
345} 346}
346 347
@@ -357,6 +358,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
357 u32 fctrl_reg; 358 u32 fctrl_reg;
358 u32 rmcs_reg; 359 u32 rmcs_reg;
359 u32 reg; 360 u32 reg;
361 u32 rx_pba_size;
360 u32 link_speed = 0; 362 u32 link_speed = 0;
361 bool link_up; 363 bool link_up;
362 364
@@ -387,7 +389,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
387 389
388 /* Negotiate the fc mode to use */ 390 /* Negotiate the fc mode to use */
389 ret_val = ixgbe_fc_autoneg(hw); 391 ret_val = ixgbe_fc_autoneg(hw);
390 if (ret_val) 392 if (ret_val == IXGBE_ERR_FLOW_CONTROL)
391 goto out; 393 goto out;
392 394
393 /* Disable any previous flow control settings */ 395 /* Disable any previous flow control settings */
@@ -405,10 +407,10 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
405 * 2: Tx flow control is enabled (we can send pause frames but 407 * 2: Tx flow control is enabled (we can send pause frames but
406 * we do not support receiving pause frames). 408 * we do not support receiving pause frames).
407 * 3: Both Rx and Tx flow control (symmetric) are enabled. 409 * 3: Both Rx and Tx flow control (symmetric) are enabled.
408 * other: Invalid.
409#ifdef CONFIG_DCB 410#ifdef CONFIG_DCB
410 * 4: Priority Flow Control is enabled. 411 * 4: Priority Flow Control is enabled.
411#endif 412#endif
413 * other: Invalid.
412 */ 414 */
413 switch (hw->fc.current_mode) { 415 switch (hw->fc.current_mode) {
414 case ixgbe_fc_none: 416 case ixgbe_fc_none:
@@ -459,16 +461,19 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
459 461
460 /* Set up and enable Rx high/low water mark thresholds, enable XON. */ 462 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
461 if (hw->fc.current_mode & ixgbe_fc_tx_pause) { 463 if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
462 if (hw->fc.send_xon) { 464 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
463 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), 465 rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
464 (hw->fc.low_water | IXGBE_FCRTL_XONE)); 466
465 } else { 467 reg = (rx_pba_size - hw->fc.low_water) << 6;
466 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), 468 if (hw->fc.send_xon)
467 hw->fc.low_water); 469 reg |= IXGBE_FCRTL_XONE;
468 } 470
471 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg);
469 472
470 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), 473 reg = (rx_pba_size - hw->fc.high_water) << 6;
471 (hw->fc.high_water | IXGBE_FCRTH_FCEN)); 474 reg |= IXGBE_FCRTH_FCEN;
475
476 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg);
472 } 477 }
473 478
474 /* Configure pause time (2 TCs per register) */ 479 /* Configure pause time (2 TCs per register) */
@@ -658,13 +663,12 @@ out:
658 return 0; 663 return 0;
659} 664}
660 665
661
662/** 666/**
663 * ixgbe_setup_mac_link_82598 - Set MAC link speed 667 * ixgbe_setup_mac_link_82598 - Set MAC link speed
664 * @hw: pointer to hardware structure 668 * @hw: pointer to hardware structure
665 * @speed: new link speed 669 * @speed: new link speed
666 * @autoneg: true if auto-negotiation enabled 670 * @autoneg: true if auto-negotiation enabled
667 * @autoneg_wait_to_complete: true if waiting is needed to complete 671 * @autoneg_wait_to_complete: true when waiting for completion is needed
668 * 672 *
669 * Set the link speed in the AUTOC register and restarts link. 673 * Set the link speed in the AUTOC register and restarts link.
670 **/ 674 **/
@@ -703,7 +707,8 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
703 * ixgbe_hw This will write the AUTOC register based on the new 707 * ixgbe_hw This will write the AUTOC register based on the new
704 * stored values 708 * stored values
705 */ 709 */
706 status = ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete); 710 status = ixgbe_start_mac_link_82598(hw,
711 autoneg_wait_to_complete);
707 } 712 }
708 713
709 return status; 714 return status;
@@ -729,7 +734,6 @@ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
729 /* Setup the PHY according to input speed */ 734 /* Setup the PHY according to input speed */
730 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, 735 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
731 autoneg_wait_to_complete); 736 autoneg_wait_to_complete);
732
733 /* Set up MAC */ 737 /* Set up MAC */
734 ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete); 738 ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
735 739
@@ -801,7 +805,6 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
801 else if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT) 805 else if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
802 goto no_phy_reset; 806 goto no_phy_reset;
803 807
804
805 hw->phy.ops.reset(hw); 808 hw->phy.ops.reset(hw);
806 } 809 }
807 810
@@ -810,12 +813,9 @@ no_phy_reset:
810 * Prevent the PCI-E bus from from hanging by disabling PCI-E master 813 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
811 * access and verify no pending requests before reset 814 * access and verify no pending requests before reset
812 */ 815 */
813 status = ixgbe_disable_pcie_master(hw); 816 ixgbe_disable_pcie_master(hw);
814 if (status != 0) {
815 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
816 hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
817 }
818 817
818mac_reset_top:
819 /* 819 /*
820 * Issue global reset to the MAC. This needs to be a SW reset. 820 * Issue global reset to the MAC. This needs to be a SW reset.
821 * If link reset is used, it might reset the MAC when mng is using it 821 * If link reset is used, it might reset the MAC when mng is using it
@@ -836,6 +836,19 @@ no_phy_reset:
836 hw_dbg(hw, "Reset polling failed to complete.\n"); 836 hw_dbg(hw, "Reset polling failed to complete.\n");
837 } 837 }
838 838
839 /*
840 * Double resets are required for recovery from certain error
841 * conditions. Between resets, it is necessary to stall to allow time
842 * for any pending HW events to complete. We use 1usec since that is
843 * what is needed for ixgbe_disable_pcie_master(). The second reset
844 * then clears out any effects of those events.
845 */
846 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
847 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
848 udelay(1);
849 goto mac_reset_top;
850 }
851
839 msleep(50); 852 msleep(50);
840 853
841 gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR); 854 gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
@@ -855,15 +868,15 @@ no_phy_reset:
855 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc); 868 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
856 } 869 }
857 870
871 /* Store the permanent mac address */
872 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
873
858 /* 874 /*
859 * Store MAC address from RAR0, clear receive address registers, and 875 * Store MAC address from RAR0, clear receive address registers, and
860 * clear the multicast table 876 * clear the multicast table
861 */ 877 */
862 hw->mac.ops.init_rx_addrs(hw); 878 hw->mac.ops.init_rx_addrs(hw);
863 879
864 /* Store the permanent mac address */
865 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
866
867reset_hw_out: 880reset_hw_out:
868 if (phy_status) 881 if (phy_status)
869 status = phy_status; 882 status = phy_status;
@@ -880,6 +893,13 @@ reset_hw_out:
880static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 893static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
881{ 894{
882 u32 rar_high; 895 u32 rar_high;
896 u32 rar_entries = hw->mac.num_rar_entries;
897
898 /* Make sure we are using a valid rar index range */
899 if (rar >= rar_entries) {
900 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
901 return IXGBE_ERR_INVALID_ARGUMENT;
902 }
883 903
884 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); 904 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
885 rar_high &= ~IXGBE_RAH_VIND_MASK; 905 rar_high &= ~IXGBE_RAH_VIND_MASK;
@@ -899,14 +919,17 @@ static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
899 u32 rar_high; 919 u32 rar_high;
900 u32 rar_entries = hw->mac.num_rar_entries; 920 u32 rar_entries = hw->mac.num_rar_entries;
901 921
902 if (rar < rar_entries) { 922
903 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); 923 /* Make sure we are using a valid rar index range */
904 if (rar_high & IXGBE_RAH_VIND_MASK) { 924 if (rar >= rar_entries) {
905 rar_high &= ~IXGBE_RAH_VIND_MASK;
906 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
907 }
908 } else {
909 hw_dbg(hw, "RAR index %d is out of range.\n", rar); 925 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
926 return IXGBE_ERR_INVALID_ARGUMENT;
927 }
928
929 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
930 if (rar_high & IXGBE_RAH_VIND_MASK) {
931 rar_high &= ~IXGBE_RAH_VIND_MASK;
932 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
910 } 933 }
911 934
912 return 0; 935 return 0;
@@ -1025,13 +1048,12 @@ static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
1025} 1048}
1026 1049
1027/** 1050/**
1028 * ixgbe_read_i2c_eeprom_82598 - Read 8 bit EEPROM word of an SFP+ module 1051 * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
1029 * over I2C interface through an intermediate phy.
1030 * @hw: pointer to hardware structure 1052 * @hw: pointer to hardware structure
1031 * @byte_offset: EEPROM byte offset to read 1053 * @byte_offset: EEPROM byte offset to read
1032 * @eeprom_data: value read 1054 * @eeprom_data: value read
1033 * 1055 *
1034 * Performs byte read operation to SFP module's EEPROM over I2C interface. 1056 * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1035 **/ 1057 **/
1036static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, 1058static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1037 u8 *eeprom_data) 1059 u8 *eeprom_data)
@@ -1064,7 +1086,7 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1064 sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK; 1086 sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
1065 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS) 1087 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
1066 break; 1088 break;
1067 msleep(10); 1089 usleep_range(10000, 20000);
1068 } 1090 }
1069 1091
1070 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) { 1092 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
@@ -1105,10 +1127,12 @@ static u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
1105 1127
1106 /* Copper PHY must be checked before AUTOC LMS to determine correct 1128 /* Copper PHY must be checked before AUTOC LMS to determine correct
1107 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */ 1129 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
1108 if (hw->phy.type == ixgbe_phy_tn || 1130 switch (hw->phy.type) {
1109 hw->phy.type == ixgbe_phy_cu_unknown) { 1131 case ixgbe_phy_tn:
1110 hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD, 1132 case ixgbe_phy_aq:
1111 &ext_ability); 1133 case ixgbe_phy_cu_unknown:
1134 hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE,
1135 MDIO_MMD_PMAPMD, &ext_ability);
1112 if (ext_ability & MDIO_PMA_EXTABLE_10GBT) 1136 if (ext_ability & MDIO_PMA_EXTABLE_10GBT)
1113 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; 1137 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1114 if (ext_ability & MDIO_PMA_EXTABLE_1000BT) 1138 if (ext_ability & MDIO_PMA_EXTABLE_1000BT)
@@ -1116,6 +1140,8 @@ static u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
1116 if (ext_ability & MDIO_PMA_EXTABLE_100BTX) 1140 if (ext_ability & MDIO_PMA_EXTABLE_100BTX)
1117 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; 1141 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1118 goto out; 1142 goto out;
1143 default:
1144 break;
1119 } 1145 }
1120 1146
1121 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 1147 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
@@ -1184,6 +1210,38 @@ out:
1184 return physical_layer; 1210 return physical_layer;
1185} 1211}
1186 1212
1213/**
1214 * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
1215 * port devices.
1216 * @hw: pointer to the HW structure
1217 *
1218 * Calls common function and corrects issue with some single port devices
1219 * that enable LAN1 but not LAN0.
1220 **/
1221static void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
1222{
1223 struct ixgbe_bus_info *bus = &hw->bus;
1224 u16 pci_gen = 0;
1225 u16 pci_ctrl2 = 0;
1226
1227 ixgbe_set_lan_id_multi_port_pcie(hw);
1228
1229 /* check if LAN0 is disabled */
1230 hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
1231 if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
1232
1233 hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
1234
1235 /* if LAN0 is completely disabled force function to 0 */
1236 if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
1237 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
1238 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
1239
1240 bus->func = 0;
1241 }
1242 }
1243}
1244
1187static struct ixgbe_mac_operations mac_ops_82598 = { 1245static struct ixgbe_mac_operations mac_ops_82598 = {
1188 .init_hw = &ixgbe_init_hw_generic, 1246 .init_hw = &ixgbe_init_hw_generic,
1189 .reset_hw = &ixgbe_reset_hw_82598, 1247 .reset_hw = &ixgbe_reset_hw_82598,
@@ -1195,7 +1253,7 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
1195 .get_mac_addr = &ixgbe_get_mac_addr_generic, 1253 .get_mac_addr = &ixgbe_get_mac_addr_generic,
1196 .stop_adapter = &ixgbe_stop_adapter_generic, 1254 .stop_adapter = &ixgbe_stop_adapter_generic,
1197 .get_bus_info = &ixgbe_get_bus_info_generic, 1255 .get_bus_info = &ixgbe_get_bus_info_generic,
1198 .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, 1256 .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598,
1199 .read_analog_reg8 = &ixgbe_read_analog_reg8_82598, 1257 .read_analog_reg8 = &ixgbe_read_analog_reg8_82598,
1200 .write_analog_reg8 = &ixgbe_write_analog_reg8_82598, 1258 .write_analog_reg8 = &ixgbe_write_analog_reg8_82598,
1201 .setup_link = &ixgbe_setup_mac_link_82598, 1259 .setup_link = &ixgbe_setup_mac_link_82598,
@@ -1210,18 +1268,21 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
1210 .set_vmdq = &ixgbe_set_vmdq_82598, 1268 .set_vmdq = &ixgbe_set_vmdq_82598,
1211 .clear_vmdq = &ixgbe_clear_vmdq_82598, 1269 .clear_vmdq = &ixgbe_clear_vmdq_82598,
1212 .init_rx_addrs = &ixgbe_init_rx_addrs_generic, 1270 .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
1213 .update_uc_addr_list = &ixgbe_update_uc_addr_list_generic,
1214 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, 1271 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
1215 .enable_mc = &ixgbe_enable_mc_generic, 1272 .enable_mc = &ixgbe_enable_mc_generic,
1216 .disable_mc = &ixgbe_disable_mc_generic, 1273 .disable_mc = &ixgbe_disable_mc_generic,
1217 .clear_vfta = &ixgbe_clear_vfta_82598, 1274 .clear_vfta = &ixgbe_clear_vfta_82598,
1218 .set_vfta = &ixgbe_set_vfta_82598, 1275 .set_vfta = &ixgbe_set_vfta_82598,
1219 .fc_enable = &ixgbe_fc_enable_82598, 1276 .fc_enable = &ixgbe_fc_enable_82598,
1277 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync,
1278 .release_swfw_sync = &ixgbe_release_swfw_sync,
1220}; 1279};
1221 1280
1222static struct ixgbe_eeprom_operations eeprom_ops_82598 = { 1281static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
1223 .init_params = &ixgbe_init_eeprom_params_generic, 1282 .init_params = &ixgbe_init_eeprom_params_generic,
1224 .read = &ixgbe_read_eerd_generic, 1283 .read = &ixgbe_read_eerd_generic,
1284 .read_buffer = &ixgbe_read_eerd_buffer_generic,
1285 .calc_checksum = &ixgbe_calc_eeprom_checksum_generic,
1225 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, 1286 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
1226 .update_checksum = &ixgbe_update_eeprom_checksum_generic, 1287 .update_checksum = &ixgbe_update_eeprom_checksum_generic,
1227}; 1288};
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 3e06a61da921..8ee661245af3 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -38,39 +38,38 @@
38#define IXGBE_82599_RAR_ENTRIES 128 38#define IXGBE_82599_RAR_ENTRIES 128
39#define IXGBE_82599_MC_TBL_SIZE 128 39#define IXGBE_82599_MC_TBL_SIZE 128
40#define IXGBE_82599_VFT_TBL_SIZE 128 40#define IXGBE_82599_VFT_TBL_SIZE 128
41 41#define IXGBE_82599_RX_PB_SIZE 512
42void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); 42
43void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); 43static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
44void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); 44static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
45s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, 45static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
46 ixgbe_link_speed speed, 46static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
47 bool autoneg, 47 ixgbe_link_speed speed,
48 bool autoneg_wait_to_complete); 48 bool autoneg,
49 bool autoneg_wait_to_complete);
49static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, 50static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
50 ixgbe_link_speed speed, 51 ixgbe_link_speed speed,
51 bool autoneg, 52 bool autoneg,
52 bool autoneg_wait_to_complete); 53 bool autoneg_wait_to_complete);
53s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, 54static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
54 bool autoneg_wait_to_complete); 55 bool autoneg_wait_to_complete);
55s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, 56static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
56 ixgbe_link_speed speed, 57 ixgbe_link_speed speed,
57 bool autoneg, 58 bool autoneg,
58 bool autoneg_wait_to_complete); 59 bool autoneg_wait_to_complete);
59static s32 ixgbe_get_copper_link_capabilities_82599(struct ixgbe_hw *hw,
60 ixgbe_link_speed *speed,
61 bool *autoneg);
62static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, 60static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
63 ixgbe_link_speed speed, 61 ixgbe_link_speed speed,
64 bool autoneg, 62 bool autoneg,
65 bool autoneg_wait_to_complete); 63 bool autoneg_wait_to_complete);
66static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); 64static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
65static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
67 66
68static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) 67static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
69{ 68{
70 struct ixgbe_mac_info *mac = &hw->mac; 69 struct ixgbe_mac_info *mac = &hw->mac;
71 if (hw->phy.multispeed_fiber) { 70
72 /* Set up dual speed SFP+ support */ 71 /* enable the laser control functions for SFP+ fiber */
73 mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; 72 if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) {
74 mac->ops.disable_tx_laser = 73 mac->ops.disable_tx_laser =
75 &ixgbe_disable_tx_laser_multispeed_fiber; 74 &ixgbe_disable_tx_laser_multispeed_fiber;
76 mac->ops.enable_tx_laser = 75 mac->ops.enable_tx_laser =
@@ -80,10 +79,17 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
80 mac->ops.disable_tx_laser = NULL; 79 mac->ops.disable_tx_laser = NULL;
81 mac->ops.enable_tx_laser = NULL; 80 mac->ops.enable_tx_laser = NULL;
82 mac->ops.flap_tx_laser = NULL; 81 mac->ops.flap_tx_laser = NULL;
82 }
83
84 if (hw->phy.multispeed_fiber) {
85 /* Set up dual speed SFP+ support */
86 mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
87 } else {
83 if ((mac->ops.get_media_type(hw) == 88 if ((mac->ops.get_media_type(hw) ==
84 ixgbe_media_type_backplane) && 89 ixgbe_media_type_backplane) &&
85 (hw->phy.smart_speed == ixgbe_smart_speed_auto || 90 (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
86 hw->phy.smart_speed == ixgbe_smart_speed_on)) 91 hw->phy.smart_speed == ixgbe_smart_speed_on) &&
92 !ixgbe_verify_lesm_fw_enabled_82599(hw))
87 mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed; 93 mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
88 else 94 else
89 mac->ops.setup_link = &ixgbe_setup_mac_link_82599; 95 mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
@@ -93,6 +99,8 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
93static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) 99static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
94{ 100{
95 s32 ret_val = 0; 101 s32 ret_val = 0;
102 u32 reg_anlp1 = 0;
103 u32 i = 0;
96 u16 list_offset, data_offset, data_value; 104 u16 list_offset, data_offset, data_value;
97 105
98 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { 106 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
@@ -102,12 +110,12 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
102 110
103 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, 111 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
104 &data_offset); 112 &data_offset);
105
106 if (ret_val != 0) 113 if (ret_val != 0)
107 goto setup_sfp_out; 114 goto setup_sfp_out;
108 115
109 /* PHY config will finish before releasing the semaphore */ 116 /* PHY config will finish before releasing the semaphore */
110 ret_val = ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); 117 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
118 IXGBE_GSSR_MAC_CSR_SM);
111 if (ret_val != 0) { 119 if (ret_val != 0) {
112 ret_val = IXGBE_ERR_SWFW_SYNC; 120 ret_val = IXGBE_ERR_SWFW_SYNC;
113 goto setup_sfp_out; 121 goto setup_sfp_out;
@@ -119,14 +127,38 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
119 IXGBE_WRITE_FLUSH(hw); 127 IXGBE_WRITE_FLUSH(hw);
120 hw->eeprom.ops.read(hw, ++data_offset, &data_value); 128 hw->eeprom.ops.read(hw, ++data_offset, &data_value);
121 } 129 }
122 /* Now restart DSP by setting Restart_AN */
123 IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
124 (IXGBE_READ_REG(hw, IXGBE_AUTOC) | IXGBE_AUTOC_AN_RESTART));
125 130
126 /* Release the semaphore */ 131 /* Release the semaphore */
127 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); 132 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
128 /* Delay obtaining semaphore again to allow FW access */ 133 /*
129 msleep(hw->eeprom.semaphore_delay); 134 * Delay obtaining semaphore again to allow FW access,
135 * semaphore_delay is in ms usleep_range needs us.
136 */
137 usleep_range(hw->eeprom.semaphore_delay * 1000,
138 hw->eeprom.semaphore_delay * 2000);
139
140 /* Now restart DSP by setting Restart_AN and clearing LMS */
141 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw,
142 IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) |
143 IXGBE_AUTOC_AN_RESTART));
144
145 /* Wait for AN to leave state 0 */
146 for (i = 0; i < 10; i++) {
147 usleep_range(4000, 8000);
148 reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1);
149 if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)
150 break;
151 }
152 if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) {
153 hw_dbg(hw, "sfp module setup not complete\n");
154 ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
155 goto setup_sfp_out;
156 }
157
158 /* Restart DSP by setting Restart_AN and return to SFI mode */
159 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
160 IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
161 IXGBE_AUTOC_AN_RESTART));
130 } 162 }
131 163
132setup_sfp_out: 164setup_sfp_out:
@@ -174,7 +206,7 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
174 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { 206 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
175 mac->ops.setup_link = &ixgbe_setup_copper_link_82599; 207 mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
176 mac->ops.get_link_capabilities = 208 mac->ops.get_link_capabilities =
177 &ixgbe_get_copper_link_capabilities_82599; 209 &ixgbe_get_copper_link_capabilities_generic;
178 } 210 }
179 211
180 /* Set necessary function pointers based on phy type */ 212 /* Set necessary function pointers based on phy type */
@@ -184,6 +216,10 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
184 phy->ops.get_firmware_version = 216 phy->ops.get_firmware_version =
185 &ixgbe_get_phy_firmware_version_tnx; 217 &ixgbe_get_phy_firmware_version_tnx;
186 break; 218 break;
219 case ixgbe_phy_aq:
220 phy->ops.get_firmware_version =
221 &ixgbe_get_phy_firmware_version_generic;
222 break;
187 default: 223 default:
188 break; 224 break;
189 } 225 }
@@ -290,37 +326,6 @@ out:
290} 326}
291 327
292/** 328/**
293 * ixgbe_get_copper_link_capabilities_82599 - Determines link capabilities
294 * @hw: pointer to hardware structure
295 * @speed: pointer to link speed
296 * @autoneg: boolean auto-negotiation value
297 *
298 * Determines the link capabilities by reading the AUTOC register.
299 **/
300static s32 ixgbe_get_copper_link_capabilities_82599(struct ixgbe_hw *hw,
301 ixgbe_link_speed *speed,
302 bool *autoneg)
303{
304 s32 status = IXGBE_ERR_LINK_SETUP;
305 u16 speed_ability;
306
307 *speed = 0;
308 *autoneg = true;
309
310 status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
311 &speed_ability);
312
313 if (status == 0) {
314 if (speed_ability & MDIO_SPEED_10G)
315 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
316 if (speed_ability & MDIO_PMA_SPEED_1000)
317 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
318 }
319
320 return status;
321}
322
323/**
324 * ixgbe_get_media_type_82599 - Get media type 329 * ixgbe_get_media_type_82599 - Get media type
325 * @hw: pointer to hardware structure 330 * @hw: pointer to hardware structure
326 * 331 *
@@ -331,10 +336,14 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
331 enum ixgbe_media_type media_type; 336 enum ixgbe_media_type media_type;
332 337
333 /* Detect if there is a copper PHY attached. */ 338 /* Detect if there is a copper PHY attached. */
334 if (hw->phy.type == ixgbe_phy_cu_unknown || 339 switch (hw->phy.type) {
335 hw->phy.type == ixgbe_phy_tn) { 340 case ixgbe_phy_cu_unknown:
341 case ixgbe_phy_tn:
342 case ixgbe_phy_aq:
336 media_type = ixgbe_media_type_copper; 343 media_type = ixgbe_media_type_copper;
337 goto out; 344 goto out;
345 default:
346 break;
338 } 347 }
339 348
340 switch (hw->device_id) { 349 switch (hw->device_id) {
@@ -342,17 +351,26 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
342 case IXGBE_DEV_ID_82599_KX4_MEZZ: 351 case IXGBE_DEV_ID_82599_KX4_MEZZ:
343 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: 352 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
344 case IXGBE_DEV_ID_82599_KR: 353 case IXGBE_DEV_ID_82599_KR:
354 case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
345 case IXGBE_DEV_ID_82599_XAUI_LOM: 355 case IXGBE_DEV_ID_82599_XAUI_LOM:
346 /* Default device ID is mezzanine card KX/KX4 */ 356 /* Default device ID is mezzanine card KX/KX4 */
347 media_type = ixgbe_media_type_backplane; 357 media_type = ixgbe_media_type_backplane;
348 break; 358 break;
349 case IXGBE_DEV_ID_82599_SFP: 359 case IXGBE_DEV_ID_82599_SFP:
360 case IXGBE_DEV_ID_82599_SFP_FCOE:
350 case IXGBE_DEV_ID_82599_SFP_EM: 361 case IXGBE_DEV_ID_82599_SFP_EM:
362 case IXGBE_DEV_ID_82599_SFP_SF2:
351 media_type = ixgbe_media_type_fiber; 363 media_type = ixgbe_media_type_fiber;
352 break; 364 break;
353 case IXGBE_DEV_ID_82599_CX4: 365 case IXGBE_DEV_ID_82599_CX4:
354 media_type = ixgbe_media_type_cx4; 366 media_type = ixgbe_media_type_cx4;
355 break; 367 break;
368 case IXGBE_DEV_ID_82599_T3_LOM:
369 media_type = ixgbe_media_type_copper;
370 break;
371 case IXGBE_DEV_ID_82599_LS:
372 media_type = ixgbe_media_type_fiber_lco;
373 break;
356 default: 374 default:
357 media_type = ixgbe_media_type_unknown; 375 media_type = ixgbe_media_type_unknown;
358 break; 376 break;
@@ -369,7 +387,7 @@ out:
369 * Configures link settings based on values in the ixgbe_hw struct. 387 * Configures link settings based on values in the ixgbe_hw struct.
370 * Restarts the link. Performs autonegotiation if needed. 388 * Restarts the link. Performs autonegotiation if needed.
371 **/ 389 **/
372s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, 390static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
373 bool autoneg_wait_to_complete) 391 bool autoneg_wait_to_complete)
374{ 392{
375 u32 autoc_reg; 393 u32 autoc_reg;
@@ -410,15 +428,15 @@ s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
410 return status; 428 return status;
411} 429}
412 430
413 /** 431/**
414 * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser 432 * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
415 * @hw: pointer to hardware structure 433 * @hw: pointer to hardware structure
416 * 434 *
417 * The base drivers may require better control over SFP+ module 435 * The base drivers may require better control over SFP+ module
418 * PHY states. This includes selectively shutting down the Tx 436 * PHY states. This includes selectively shutting down the Tx
419 * laser on the PHY, effectively halting physical link. 437 * laser on the PHY, effectively halting physical link.
420 **/ 438 **/
421void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 439static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
422{ 440{
423 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 441 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
424 442
@@ -437,7 +455,7 @@ void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
437 * PHY states. This includes selectively turning on the Tx 455 * PHY states. This includes selectively turning on the Tx
438 * laser on the PHY, effectively starting physical link. 456 * laser on the PHY, effectively starting physical link.
439 **/ 457 **/
440void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 458static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
441{ 459{
442 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 460 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
443 461
@@ -460,10 +478,8 @@ void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
460 * end. This is consistent with true clause 37 autoneg, which also 478 * end. This is consistent with true clause 37 autoneg, which also
461 * involves a loss of signal. 479 * involves a loss of signal.
462 **/ 480 **/
463void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 481static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
464{ 482{
465 hw_dbg(hw, "ixgbe_flap_tx_laser_multispeed_fiber\n");
466
467 if (hw->mac.autotry_restart) { 483 if (hw->mac.autotry_restart) {
468 ixgbe_disable_tx_laser_multispeed_fiber(hw); 484 ixgbe_disable_tx_laser_multispeed_fiber(hw);
469 ixgbe_enable_tx_laser_multispeed_fiber(hw); 485 ixgbe_enable_tx_laser_multispeed_fiber(hw);
@@ -480,23 +496,27 @@ void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
480 * 496 *
481 * Set the link speed in the AUTOC register and restarts link. 497 * Set the link speed in the AUTOC register and restarts link.
482 **/ 498 **/
483s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, 499static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
484 ixgbe_link_speed speed, 500 ixgbe_link_speed speed,
485 bool autoneg, 501 bool autoneg,
486 bool autoneg_wait_to_complete) 502 bool autoneg_wait_to_complete)
487{ 503{
488 s32 status = 0; 504 s32 status = 0;
489 ixgbe_link_speed phy_link_speed; 505 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
490 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; 506 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
491 u32 speedcnt = 0; 507 u32 speedcnt = 0;
492 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 508 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
509 u32 i = 0;
493 bool link_up = false; 510 bool link_up = false;
494 bool negotiation; 511 bool negotiation;
495 int i;
496 512
497 /* Mask off requested but non-supported speeds */ 513 /* Mask off requested but non-supported speeds */
498 hw->mac.ops.get_link_capabilities(hw, &phy_link_speed, &negotiation); 514 status = hw->mac.ops.get_link_capabilities(hw, &link_speed,
499 speed &= phy_link_speed; 515 &negotiation);
516 if (status != 0)
517 return status;
518
519 speed &= link_speed;
500 520
501 /* 521 /*
502 * Try each speed one by one, highest priority first. We do this in 522 * Try each speed one by one, highest priority first. We do this in
@@ -507,9 +527,12 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
507 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; 527 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
508 528
509 /* If we already have link at this speed, just jump out */ 529 /* If we already have link at this speed, just jump out */
510 hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false); 530 status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
531 false);
532 if (status != 0)
533 return status;
511 534
512 if ((phy_link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up) 535 if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
513 goto out; 536 goto out;
514 537
515 /* Set the module link speed */ 538 /* Set the module link speed */
@@ -521,9 +544,9 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
521 msleep(40); 544 msleep(40);
522 545
523 status = ixgbe_setup_mac_link_82599(hw, 546 status = ixgbe_setup_mac_link_82599(hw,
524 IXGBE_LINK_SPEED_10GB_FULL, 547 IXGBE_LINK_SPEED_10GB_FULL,
525 autoneg, 548 autoneg,
526 autoneg_wait_to_complete); 549 autoneg_wait_to_complete);
527 if (status != 0) 550 if (status != 0)
528 return status; 551 return status;
529 552
@@ -535,14 +558,16 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
535 * Section 73.10.2, we may have to wait up to 500ms if KR is 558 * Section 73.10.2, we may have to wait up to 500ms if KR is
536 * attempted. 82599 uses the same timing for 10g SFI. 559 * attempted. 82599 uses the same timing for 10g SFI.
537 */ 560 */
538
539 for (i = 0; i < 5; i++) { 561 for (i = 0; i < 5; i++) {
540 /* Wait for the link partner to also set speed */ 562 /* Wait for the link partner to also set speed */
541 msleep(100); 563 msleep(100);
542 564
543 /* If we have link, just jump out */ 565 /* If we have link, just jump out */
544 hw->mac.ops.check_link(hw, &phy_link_speed, 566 status = hw->mac.ops.check_link(hw, &link_speed,
545 &link_up, false); 567 &link_up, false);
568 if (status != 0)
569 return status;
570
546 if (link_up) 571 if (link_up)
547 goto out; 572 goto out;
548 } 573 }
@@ -554,9 +579,12 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
554 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; 579 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
555 580
556 /* If we already have link at this speed, just jump out */ 581 /* If we already have link at this speed, just jump out */
557 hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false); 582 status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
583 false);
584 if (status != 0)
585 return status;
558 586
559 if ((phy_link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up) 587 if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
560 goto out; 588 goto out;
561 589
562 /* Set the module link speed */ 590 /* Set the module link speed */
@@ -569,9 +597,9 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
569 msleep(40); 597 msleep(40);
570 598
571 status = ixgbe_setup_mac_link_82599(hw, 599 status = ixgbe_setup_mac_link_82599(hw,
572 IXGBE_LINK_SPEED_1GB_FULL, 600 IXGBE_LINK_SPEED_1GB_FULL,
573 autoneg, 601 autoneg,
574 autoneg_wait_to_complete); 602 autoneg_wait_to_complete);
575 if (status != 0) 603 if (status != 0)
576 return status; 604 return status;
577 605
@@ -582,7 +610,11 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
582 msleep(100); 610 msleep(100);
583 611
584 /* If we have link, just jump out */ 612 /* If we have link, just jump out */
585 hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false); 613 status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
614 false);
615 if (status != 0)
616 return status;
617
586 if (link_up) 618 if (link_up)
587 goto out; 619 goto out;
588 } 620 }
@@ -625,13 +657,10 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
625 bool autoneg_wait_to_complete) 657 bool autoneg_wait_to_complete)
626{ 658{
627 s32 status = 0; 659 s32 status = 0;
628 ixgbe_link_speed link_speed; 660 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
629 s32 i, j; 661 s32 i, j;
630 bool link_up = false; 662 bool link_up = false;
631 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 663 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
632 struct ixgbe_adapter *adapter = hw->back;
633
634 hw_dbg(hw, "ixgbe_setup_mac_link_smartspeed.\n");
635 664
636 /* Set autoneg_advertised value based on input link speed */ 665 /* Set autoneg_advertised value based on input link speed */
637 hw->phy.autoneg_advertised = 0; 666 hw->phy.autoneg_advertised = 0;
@@ -657,7 +686,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
657 for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) { 686 for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
658 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, 687 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
659 autoneg_wait_to_complete); 688 autoneg_wait_to_complete);
660 if (status) 689 if (status != 0)
661 goto out; 690 goto out;
662 691
663 /* 692 /*
@@ -670,8 +699,11 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
670 mdelay(100); 699 mdelay(100);
671 700
672 /* If we have link, just jump out */ 701 /* If we have link, just jump out */
673 hw->mac.ops.check_link(hw, &link_speed, 702 status = hw->mac.ops.check_link(hw, &link_speed,
674 &link_up, false); 703 &link_up, false);
704 if (status != 0)
705 goto out;
706
675 if (link_up) 707 if (link_up)
676 goto out; 708 goto out;
677 } 709 }
@@ -689,7 +721,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
689 hw->phy.smart_speed_active = true; 721 hw->phy.smart_speed_active = true;
690 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, 722 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
691 autoneg_wait_to_complete); 723 autoneg_wait_to_complete);
692 if (status) 724 if (status != 0)
693 goto out; 725 goto out;
694 726
695 /* 727 /*
@@ -702,8 +734,11 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
702 mdelay(100); 734 mdelay(100);
703 735
704 /* If we have link, just jump out */ 736 /* If we have link, just jump out */
705 hw->mac.ops.check_link(hw, &link_speed, 737 status = hw->mac.ops.check_link(hw, &link_speed,
706 &link_up, false); 738 &link_up, false);
739 if (status != 0)
740 goto out;
741
707 if (link_up) 742 if (link_up)
708 goto out; 743 goto out;
709 } 744 }
@@ -715,7 +750,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
715 750
716out: 751out:
717 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL)) 752 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
718 e_info(hw, "Smartspeed has downgraded the link speed from " 753 hw_dbg(hw, "Smartspeed has downgraded the link speed from "
719 "the maximum advertised\n"); 754 "the maximum advertised\n");
720 return status; 755 return status;
721} 756}
@@ -729,7 +764,7 @@ out:
729 * 764 *
730 * Set the link speed in the AUTOC register and restarts link. 765 * Set the link speed in the AUTOC register and restarts link.
731 **/ 766 **/
732s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, 767static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
733 ixgbe_link_speed speed, bool autoneg, 768 ixgbe_link_speed speed, bool autoneg,
734 bool autoneg_wait_to_complete) 769 bool autoneg_wait_to_complete)
735{ 770{
@@ -747,6 +782,9 @@ s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
747 782
748 /* Check to see if speed passed in is supported. */ 783 /* Check to see if speed passed in is supported. */
749 hw->mac.ops.get_link_capabilities(hw, &link_capabilities, &autoneg); 784 hw->mac.ops.get_link_capabilities(hw, &link_capabilities, &autoneg);
785 if (status != 0)
786 goto out;
787
750 speed &= link_capabilities; 788 speed &= link_capabilities;
751 789
752 if (speed == IXGBE_LINK_SPEED_UNKNOWN) { 790 if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
@@ -760,7 +798,6 @@ s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
760 else 798 else
761 orig_autoc = autoc; 799 orig_autoc = autoc;
762 800
763
764 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || 801 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
765 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || 802 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
766 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { 803 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
@@ -877,7 +914,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
877 914
878 /* PHY ops must be identified and initialized prior to reset */ 915 /* PHY ops must be identified and initialized prior to reset */
879 916
880 /* Init PHY and function pointers, perform SFP setup */ 917 /* Identify PHY and related function pointers */
881 status = hw->phy.ops.init(hw); 918 status = hw->phy.ops.init(hw);
882 919
883 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) 920 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
@@ -889,6 +926,9 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
889 hw->phy.sfp_setup_needed = false; 926 hw->phy.sfp_setup_needed = false;
890 } 927 }
891 928
929 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
930 goto reset_hw_out;
931
892 /* Reset PHY */ 932 /* Reset PHY */
893 if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL) 933 if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL)
894 hw->phy.ops.reset(hw); 934 hw->phy.ops.reset(hw);
@@ -897,12 +937,9 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
897 * Prevent the PCI-E bus from from hanging by disabling PCI-E master 937 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
898 * access and verify no pending requests before reset 938 * access and verify no pending requests before reset
899 */ 939 */
900 status = ixgbe_disable_pcie_master(hw); 940 ixgbe_disable_pcie_master(hw);
901 if (status != 0) {
902 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
903 hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
904 }
905 941
942mac_reset_top:
906 /* 943 /*
907 * Issue global reset to the MAC. This needs to be a SW reset. 944 * Issue global reset to the MAC. This needs to be a SW reset.
908 * If link reset is used, it might reset the MAC when mng is using it 945 * If link reset is used, it might reset the MAC when mng is using it
@@ -923,6 +960,19 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
923 hw_dbg(hw, "Reset polling failed to complete.\n"); 960 hw_dbg(hw, "Reset polling failed to complete.\n");
924 } 961 }
925 962
963 /*
964 * Double resets are required for recovery from certain error
965 * conditions. Between resets, it is necessary to stall to allow time
966 * for any pending HW events to complete. We use 1usec since that is
967 * what is needed for ixgbe_disable_pcie_master(). The second reset
968 * then clears out any effects of those events.
969 */
970 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
971 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
972 udelay(1);
973 goto mac_reset_top;
974 }
975
926 msleep(50); 976 msleep(50);
927 977
928 /* 978 /*
@@ -950,6 +1000,9 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
950 } 1000 }
951 } 1001 }
952 1002
1003 /* Store the permanent mac address */
1004 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
1005
953 /* 1006 /*
954 * Store MAC address from RAR0, clear receive address registers, and 1007 * Store MAC address from RAR0, clear receive address registers, and
955 * clear the multicast table. Also reset num_rar_entries to 128, 1008 * clear the multicast table. Also reset num_rar_entries to 128,
@@ -958,9 +1011,6 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
958 hw->mac.num_rar_entries = 128; 1011 hw->mac.num_rar_entries = 128;
959 hw->mac.ops.init_rx_addrs(hw); 1012 hw->mac.ops.init_rx_addrs(hw);
960 1013
961 /* Store the permanent mac address */
962 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
963
964 /* Store the permanent SAN mac address */ 1014 /* Store the permanent SAN mac address */
965 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); 1015 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
966 1016
@@ -1002,7 +1052,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1002 udelay(10); 1052 udelay(10);
1003 } 1053 }
1004 if (i >= IXGBE_FDIRCMD_CMD_POLL) { 1054 if (i >= IXGBE_FDIRCMD_CMD_POLL) {
1005 hw_dbg(hw ,"Flow Director previous command isn't complete, " 1055 hw_dbg(hw, "Flow Director previous command isn't complete, "
1006 "aborting table re-initialization.\n"); 1056 "aborting table re-initialization.\n");
1007 return IXGBE_ERR_FDIR_REINIT_FAILED; 1057 return IXGBE_ERR_FDIR_REINIT_FAILED;
1008 } 1058 }
@@ -1078,7 +1128,7 @@ s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc)
1078 1128
1079 /* 1129 /*
1080 * The defaults in the HW for RX PB 1-7 are not zero and so should be 1130 * The defaults in the HW for RX PB 1-7 are not zero and so should be
1081 * intialized to zero for non DCB mode otherwise actual total RX PB 1131 * initialized to zero for non DCB mode otherwise actual total RX PB
1082 * would be bigger than programmed and filter space would run into 1132 * would be bigger than programmed and filter space would run into
1083 * the PB 0 region. 1133 * the PB 0 region.
1084 */ 1134 */
@@ -1112,13 +1162,10 @@ s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc)
1112 /* Move the flexible bytes to use the ethertype - shift 6 words */ 1162 /* Move the flexible bytes to use the ethertype - shift 6 words */
1113 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); 1163 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
1114 1164
1115 fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
1116 1165
1117 /* Prime the keys for hashing */ 1166 /* Prime the keys for hashing */
1118 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, 1167 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
1119 htonl(IXGBE_ATR_BUCKET_HASH_KEY)); 1168 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
1120 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
1121 htonl(IXGBE_ATR_SIGNATURE_HASH_KEY));
1122 1169
1123 /* 1170 /*
1124 * Poll init-done after we write the register. Estimated times: 1171 * Poll init-done after we write the register. Estimated times:
@@ -1139,7 +1186,7 @@ s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc)
1139 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & 1186 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1140 IXGBE_FDIRCTRL_INIT_DONE) 1187 IXGBE_FDIRCTRL_INIT_DONE)
1141 break; 1188 break;
1142 msleep(1); 1189 usleep_range(1000, 2000);
1143 } 1190 }
1144 if (i >= IXGBE_FDIR_INIT_DONE_POLL) 1191 if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1145 hw_dbg(hw, "Flow Director Signature poll time exceeded!\n"); 1192 hw_dbg(hw, "Flow Director Signature poll time exceeded!\n");
@@ -1169,7 +1216,7 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
1169 1216
1170 /* 1217 /*
1171 * The defaults in the HW for RX PB 1-7 are not zero and so should be 1218 * The defaults in the HW for RX PB 1-7 are not zero and so should be
1172 * intialized to zero for non DCB mode otherwise actual total RX PB 1219 * initialized to zero for non DCB mode otherwise actual total RX PB
1173 * would be bigger than programmed and filter space would run into 1220 * would be bigger than programmed and filter space would run into
1174 * the PB 0 region. 1221 * the PB 0 region.
1175 */ 1222 */
@@ -1208,10 +1255,8 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
1208 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); 1255 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
1209 1256
1210 /* Prime the keys for hashing */ 1257 /* Prime the keys for hashing */
1211 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, 1258 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
1212 htonl(IXGBE_ATR_BUCKET_HASH_KEY)); 1259 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
1213 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
1214 htonl(IXGBE_ATR_SIGNATURE_HASH_KEY));
1215 1260
1216 /* 1261 /*
1217 * Poll init-done after we write the register. Estimated times: 1262 * Poll init-done after we write the register. Estimated times:
@@ -1236,7 +1281,7 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
1236 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & 1281 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1237 IXGBE_FDIRCTRL_INIT_DONE) 1282 IXGBE_FDIRCTRL_INIT_DONE)
1238 break; 1283 break;
1239 msleep(1); 1284 usleep_range(1000, 2000);
1240 } 1285 }
1241 if (i >= IXGBE_FDIR_INIT_DONE_POLL) 1286 if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1242 hw_dbg(hw, "Flow Director Perfect poll time exceeded!\n"); 1287 hw_dbg(hw, "Flow Director Perfect poll time exceeded!\n");
@@ -1250,8 +1295,8 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
1250 * @stream: input bitstream to compute the hash on 1295 * @stream: input bitstream to compute the hash on
1251 * @key: 32-bit hash key 1296 * @key: 32-bit hash key
1252 **/ 1297 **/
1253static u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input, 1298static u32 ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
1254 u32 key) 1299 u32 key)
1255{ 1300{
1256 /* 1301 /*
1257 * The algorithm is as follows: 1302 * The algorithm is as follows:
@@ -1271,557 +1316,250 @@ static u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input,
1271 * To simplify for programming, the algorithm is implemented 1316 * To simplify for programming, the algorithm is implemented
1272 * in software this way: 1317 * in software this way:
1273 * 1318 *
1274 * Key[31:0], Stream[335:0] 1319 * key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0]
1320 *
1321 * for (i = 0; i < 352; i+=32)
1322 * hi_hash_dword[31:0] ^= Stream[(i+31):i];
1323 *
1324 * lo_hash_dword[15:0] ^= Stream[15:0];
1325 * lo_hash_dword[15:0] ^= hi_hash_dword[31:16];
1326 * lo_hash_dword[31:16] ^= hi_hash_dword[15:0];
1327 *
1328 * hi_hash_dword[31:0] ^= Stream[351:320];
1275 * 1329 *
1276 * tmp_key[11 * 32 - 1:0] = 11{Key[31:0] = key concatenated 11 times 1330 * if(key[0])
1277 * int_key[350:0] = tmp_key[351:1] 1331 * hash[15:0] ^= Stream[15:0];
1278 * int_stream[365:0] = Stream[14:0] | Stream[335:0] | Stream[335:321]
1279 * 1332 *
1280 * hash[15:0] = 0; 1333 * for (i = 0; i < 16; i++) {
1281 * for (i = 0; i < 351; i++) { 1334 * if (key[i])
1282 * if (int_key[i]) 1335 * hash[15:0] ^= lo_hash_dword[(i+15):i];
1283 * hash ^= int_stream[(i + 15):i]; 1336 * if (key[i + 16])
1337 * hash[15:0] ^= hi_hash_dword[(i+15):i];
1284 * } 1338 * }
1339 *
1285 */ 1340 */
1341 __be32 common_hash_dword = 0;
1342 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1343 u32 hash_result = 0;
1344 u8 i;
1286 1345
1287 union { 1346 /* record the flow_vm_vlan bits as they are a key part to the hash */
1288 u64 fill[6]; 1347 flow_vm_vlan = ntohl(atr_input->dword_stream[0]);
1289 u32 key[11];
1290 u8 key_stream[44];
1291 } tmp_key;
1292 1348
1293 u8 *stream = (u8 *)atr_input; 1349 /* generate common hash dword */
1294 u8 int_key[44]; /* upper-most bit unused */ 1350 for (i = 10; i; i -= 2)
1295 u8 hash_str[46]; /* upper-most 2 bits unused */ 1351 common_hash_dword ^= atr_input->dword_stream[i] ^
1296 u16 hash_result = 0; 1352 atr_input->dword_stream[i - 1];
1297 int i, j, k, h;
1298 1353
1299 /* 1354 hi_hash_dword = ntohl(common_hash_dword);
1300 * Initialize the fill member to prevent warnings
1301 * on some compilers
1302 */
1303 tmp_key.fill[0] = 0;
1304 1355
1305 /* First load the temporary key stream */ 1356 /* low dword is word swapped version of common */
1306 for (i = 0; i < 6; i++) { 1357 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1307 u64 fillkey = ((u64)key << 32) | key;
1308 tmp_key.fill[i] = fillkey;
1309 }
1310 1358
1311 /* 1359 /* apply flow ID/VM pool/VLAN ID bits to hash words */
1312 * Set the interim key for the hashing. Bit 352 is unused, so we must 1360 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1313 * shift and compensate when building the key.
1314 */
1315
1316 int_key[0] = tmp_key.key_stream[0] >> 1;
1317 for (i = 1, j = 0; i < 44; i++) {
1318 unsigned int this_key = tmp_key.key_stream[j] << 7;
1319 j++;
1320 int_key[i] = (u8)(this_key | (tmp_key.key_stream[j] >> 1));
1321 }
1322 1361
1323 /* 1362 /* Process bits 0 and 16 */
1324 * Set the interim bit string for the hashing. Bits 368 and 367 are 1363 if (key & 0x0001) hash_result ^= lo_hash_dword;
1325 * unused, so shift and compensate when building the string. 1364 if (key & 0x00010000) hash_result ^= hi_hash_dword;
1326 */
1327 hash_str[0] = (stream[40] & 0x7f) >> 1;
1328 for (i = 1, j = 40; i < 46; i++) {
1329 unsigned int this_str = stream[j] << 7;
1330 j++;
1331 if (j > 41)
1332 j = 0;
1333 hash_str[i] = (u8)(this_str | (stream[j] >> 1));
1334 }
1335 1365
1336 /* 1366 /*
1337 * Now compute the hash. i is the index into hash_str, j is into our 1367 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1338 * key stream, k is counting the number of bits, and h interates within 1368 * delay this because bit 0 of the stream should not be processed
1339 * each byte. 1369 * so we do not add the vlan until after bit 0 was processed
1340 */ 1370 */
1341 for (i = 45, j = 43, k = 0; k < 351 && i >= 2 && j >= 0; i--, j--) { 1371 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1342 for (h = 0; h < 8 && k < 351; h++, k++) {
1343 if (int_key[j] & (1 << h)) {
1344 /*
1345 * Key bit is set, XOR in the current 16-bit
1346 * string. Example of processing:
1347 * h = 0,
1348 * tmp = (hash_str[i - 2] & 0 << 16) |
1349 * (hash_str[i - 1] & 0xff << 8) |
1350 * (hash_str[i] & 0xff >> 0)
1351 * So tmp = hash_str[15 + k:k], since the
1352 * i + 2 clause rolls off the 16-bit value
1353 * h = 7,
1354 * tmp = (hash_str[i - 2] & 0x7f << 9) |
1355 * (hash_str[i - 1] & 0xff << 1) |
1356 * (hash_str[i] & 0x80 >> 7)
1357 */
1358 int tmp = (hash_str[i] >> h);
1359 tmp |= (hash_str[i - 1] << (8 - h));
1360 tmp |= (int)(hash_str[i - 2] & ((1 << h) - 1))
1361 << (16 - h);
1362 hash_result ^= (u16)tmp;
1363 }
1364 }
1365 }
1366
1367 return hash_result;
1368}
1369
1370/**
1371 * ixgbe_atr_set_vlan_id_82599 - Sets the VLAN id in the ATR input stream
1372 * @input: input stream to modify
1373 * @vlan: the VLAN id to load
1374 **/
1375s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, u16 vlan)
1376{
1377 input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] = vlan >> 8;
1378 input->byte_stream[IXGBE_ATR_VLAN_OFFSET] = vlan & 0xff;
1379
1380 return 0;
1381}
1382
1383/**
1384 * ixgbe_atr_set_src_ipv4_82599 - Sets the source IPv4 address
1385 * @input: input stream to modify
1386 * @src_addr: the IP address to load
1387 **/
1388s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, u32 src_addr)
1389{
1390 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] = src_addr >> 24;
1391 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] =
1392 (src_addr >> 16) & 0xff;
1393 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] =
1394 (src_addr >> 8) & 0xff;
1395 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET] = src_addr & 0xff;
1396
1397 return 0;
1398}
1399
1400/**
1401 * ixgbe_atr_set_dst_ipv4_82599 - Sets the destination IPv4 address
1402 * @input: input stream to modify
1403 * @dst_addr: the IP address to load
1404 **/
1405s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr)
1406{
1407 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] = dst_addr >> 24;
1408 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] =
1409 (dst_addr >> 16) & 0xff;
1410 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] =
1411 (dst_addr >> 8) & 0xff;
1412 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET] = dst_addr & 0xff;
1413
1414 return 0;
1415}
1416
1417/**
1418 * ixgbe_atr_set_src_ipv6_82599 - Sets the source IPv6 address
1419 * @input: input stream to modify
1420 * @src_addr_1: the first 4 bytes of the IP address to load
1421 * @src_addr_2: the second 4 bytes of the IP address to load
1422 * @src_addr_3: the third 4 bytes of the IP address to load
1423 * @src_addr_4: the fourth 4 bytes of the IP address to load
1424 **/
1425s32 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input,
1426 u32 src_addr_1, u32 src_addr_2,
1427 u32 src_addr_3, u32 src_addr_4)
1428{
1429 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET] = src_addr_4 & 0xff;
1430 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] =
1431 (src_addr_4 >> 8) & 0xff;
1432 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] =
1433 (src_addr_4 >> 16) & 0xff;
1434 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] = src_addr_4 >> 24;
1435
1436 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4] = src_addr_3 & 0xff;
1437 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] =
1438 (src_addr_3 >> 8) & 0xff;
1439 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] =
1440 (src_addr_3 >> 16) & 0xff;
1441 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] = src_addr_3 >> 24;
1442
1443 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8] = src_addr_2 & 0xff;
1444 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] =
1445 (src_addr_2 >> 8) & 0xff;
1446 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] =
1447 (src_addr_2 >> 16) & 0xff;
1448 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] = src_addr_2 >> 24;
1449
1450 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12] = src_addr_1 & 0xff;
1451 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] =
1452 (src_addr_1 >> 8) & 0xff;
1453 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] =
1454 (src_addr_1 >> 16) & 0xff;
1455 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] = src_addr_1 >> 24;
1456
1457 return 0;
1458}
1459
1460/**
1461 * ixgbe_atr_set_dst_ipv6_82599 - Sets the destination IPv6 address
1462 * @input: input stream to modify
1463 * @dst_addr_1: the first 4 bytes of the IP address to load
1464 * @dst_addr_2: the second 4 bytes of the IP address to load
1465 * @dst_addr_3: the third 4 bytes of the IP address to load
1466 * @dst_addr_4: the fourth 4 bytes of the IP address to load
1467 **/
1468s32 ixgbe_atr_set_dst_ipv6_82599(struct ixgbe_atr_input *input,
1469 u32 dst_addr_1, u32 dst_addr_2,
1470 u32 dst_addr_3, u32 dst_addr_4)
1471{
1472 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET] = dst_addr_4 & 0xff;
1473 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] =
1474 (dst_addr_4 >> 8) & 0xff;
1475 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] =
1476 (dst_addr_4 >> 16) & 0xff;
1477 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] = dst_addr_4 >> 24;
1478
1479 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4] = dst_addr_3 & 0xff;
1480 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] =
1481 (dst_addr_3 >> 8) & 0xff;
1482 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] =
1483 (dst_addr_3 >> 16) & 0xff;
1484 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] = dst_addr_3 >> 24;
1485
1486 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8] = dst_addr_2 & 0xff;
1487 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] =
1488 (dst_addr_2 >> 8) & 0xff;
1489 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] =
1490 (dst_addr_2 >> 16) & 0xff;
1491 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] = dst_addr_2 >> 24;
1492
1493 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12] = dst_addr_1 & 0xff;
1494 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] =
1495 (dst_addr_1 >> 8) & 0xff;
1496 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] =
1497 (dst_addr_1 >> 16) & 0xff;
1498 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] = dst_addr_1 >> 24;
1499
1500 return 0;
1501}
1502
1503/**
1504 * ixgbe_atr_set_src_port_82599 - Sets the source port
1505 * @input: input stream to modify
1506 * @src_port: the source port to load
1507 **/
1508s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, u16 src_port)
1509{
1510 input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1] = src_port >> 8;
1511 input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] = src_port & 0xff;
1512
1513 return 0;
1514}
1515
1516/**
1517 * ixgbe_atr_set_dst_port_82599 - Sets the destination port
1518 * @input: input stream to modify
1519 * @dst_port: the destination port to load
1520 **/
1521s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, u16 dst_port)
1522{
1523 input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1] = dst_port >> 8;
1524 input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] = dst_port & 0xff;
1525
1526 return 0;
1527}
1528
1529/**
1530 * ixgbe_atr_set_flex_byte_82599 - Sets the flexible bytes
1531 * @input: input stream to modify
1532 * @flex_bytes: the flexible bytes to load
1533 **/
1534s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte)
1535{
1536 input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] = flex_byte >> 8;
1537 input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET] = flex_byte & 0xff;
1538
1539 return 0;
1540}
1541
1542/**
1543 * ixgbe_atr_set_vm_pool_82599 - Sets the Virtual Machine pool
1544 * @input: input stream to modify
1545 * @vm_pool: the Virtual Machine pool to load
1546 **/
1547s32 ixgbe_atr_set_vm_pool_82599(struct ixgbe_atr_input *input,
1548 u8 vm_pool)
1549{
1550 input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET] = vm_pool;
1551
1552 return 0;
1553}
1554
1555/**
1556 * ixgbe_atr_set_l4type_82599 - Sets the layer 4 packet type
1557 * @input: input stream to modify
1558 * @l4type: the layer 4 type value to load
1559 **/
1560s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type)
1561{
1562 input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET] = l4type;
1563
1564 return 0;
1565}
1566
1567/**
1568 * ixgbe_atr_get_vlan_id_82599 - Gets the VLAN id from the ATR input stream
1569 * @input: input stream to search
1570 * @vlan: the VLAN id to load
1571 **/
1572static s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan)
1573{
1574 *vlan = input->byte_stream[IXGBE_ATR_VLAN_OFFSET];
1575 *vlan |= input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] << 8;
1576
1577 return 0;
1578}
1579 1372
1580/**
1581 * ixgbe_atr_get_src_ipv4_82599 - Gets the source IPv4 address
1582 * @input: input stream to search
1583 * @src_addr: the IP address to load
1584 **/
1585static s32 ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input,
1586 u32 *src_addr)
1587{
1588 *src_addr = input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET];
1589 *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] << 8;
1590 *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] << 16;
1591 *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] << 24;
1592 1373
1593 return 0; 1374 /* process the remaining 30 bits in the key 2 bits at a time */
1594} 1375 for (i = 15; i; i-- ) {
1595 1376 if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i;
1596/** 1377 if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i;
1597 * ixgbe_atr_get_dst_ipv4_82599 - Gets the destination IPv4 address 1378 }
1598 * @input: input stream to search
1599 * @dst_addr: the IP address to load
1600 **/
1601static s32 ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input,
1602 u32 *dst_addr)
1603{
1604 *dst_addr = input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET];
1605 *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] << 8;
1606 *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] << 16;
1607 *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] << 24;
1608
1609 return 0;
1610}
1611
1612/**
1613 * ixgbe_atr_get_src_ipv6_82599 - Gets the source IPv6 address
1614 * @input: input stream to search
1615 * @src_addr_1: the first 4 bytes of the IP address to load
1616 * @src_addr_2: the second 4 bytes of the IP address to load
1617 * @src_addr_3: the third 4 bytes of the IP address to load
1618 * @src_addr_4: the fourth 4 bytes of the IP address to load
1619 **/
1620static s32 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input,
1621 u32 *src_addr_1, u32 *src_addr_2,
1622 u32 *src_addr_3, u32 *src_addr_4)
1623{
1624 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12];
1625 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] << 8;
1626 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] << 16;
1627 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] << 24;
1628
1629 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8];
1630 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] << 8;
1631 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] << 16;
1632 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] << 24;
1633
1634 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4];
1635 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] << 8;
1636 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] << 16;
1637 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] << 24;
1638
1639 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET];
1640 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] << 8;
1641 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] << 16;
1642 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] << 24;
1643 1379
1644 return 0; 1380 return hash_result & IXGBE_ATR_HASH_MASK;
1645} 1381}
1646 1382
1647/** 1383/*
1648 * ixgbe_atr_get_dst_ipv6_82599 - Gets the destination IPv6 address 1384 * These defines allow us to quickly generate all of the necessary instructions
1649 * @input: input stream to search 1385 * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
1650 * @dst_addr_1: the first 4 bytes of the IP address to load 1386 * for values 0 through 15
1651 * @dst_addr_2: the second 4 bytes of the IP address to load 1387 */
1652 * @dst_addr_3: the third 4 bytes of the IP address to load 1388#define IXGBE_ATR_COMMON_HASH_KEY \
1653 * @dst_addr_4: the fourth 4 bytes of the IP address to load 1389 (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
1654 **/ 1390#define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
1655s32 ixgbe_atr_get_dst_ipv6_82599(struct ixgbe_atr_input *input, 1391do { \
1656 u32 *dst_addr_1, u32 *dst_addr_2, 1392 u32 n = (_n); \
1657 u32 *dst_addr_3, u32 *dst_addr_4) 1393 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
1658{ 1394 common_hash ^= lo_hash_dword >> n; \
1659 *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12]; 1395 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1660 *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] << 8; 1396 bucket_hash ^= lo_hash_dword >> n; \
1661 *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] << 16; 1397 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
1662 *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] << 24; 1398 sig_hash ^= lo_hash_dword << (16 - n); \
1663 1399 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
1664 *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8]; 1400 common_hash ^= hi_hash_dword >> n; \
1665 *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] << 8; 1401 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1666 *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] << 16; 1402 bucket_hash ^= hi_hash_dword >> n; \
1667 *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] << 24; 1403 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
1668 1404 sig_hash ^= hi_hash_dword << (16 - n); \
1669 *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4]; 1405} while (0);
1670 *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] << 8;
1671 *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] << 16;
1672 *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] << 24;
1673
1674 *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET];
1675 *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] << 8;
1676 *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] << 16;
1677 *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] << 24;
1678
1679 return 0;
1680}
1681 1406
1682/** 1407/**
1683 * ixgbe_atr_get_src_port_82599 - Gets the source port 1408 * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
1684 * @input: input stream to modify 1409 * @stream: input bitstream to compute the hash on
1685 * @src_port: the source port to load
1686 * 1410 *
1687 * Even though the input is given in big-endian, the FDIRPORT registers 1411 * This function is almost identical to the function above but contains
1688 * expect the ports to be programmed in little-endian. Hence the need to swap 1412 * several optomizations such as unwinding all of the loops, letting the
1689 * endianness when retrieving the data. This can be confusing since the 1413 * compiler work out all of the conditional ifs since the keys are static
1690 * internal hash engine expects it to be big-endian. 1414 * defines, and computing two keys at once since the hashed dword stream
1415 * will be the same for both keys.
1691 **/ 1416 **/
1692static s32 ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input, 1417static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
1693 u16 *src_port) 1418 union ixgbe_atr_hash_dword common)
1694{ 1419{
1695 *src_port = input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] << 8; 1420 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1696 *src_port |= input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1]; 1421 u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
1697 1422
1698 return 0; 1423 /* record the flow_vm_vlan bits as they are a key part to the hash */
1699} 1424 flow_vm_vlan = ntohl(input.dword);
1700 1425
1701/** 1426 /* generate common hash dword */
1702 * ixgbe_atr_get_dst_port_82599 - Gets the destination port 1427 hi_hash_dword = ntohl(common.dword);
1703 * @input: input stream to modify
1704 * @dst_port: the destination port to load
1705 *
1706 * Even though the input is given in big-endian, the FDIRPORT registers
1707 * expect the ports to be programmed in little-endian. Hence the need to swap
1708 * endianness when retrieving the data. This can be confusing since the
1709 * internal hash engine expects it to be big-endian.
1710 **/
1711static s32 ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input,
1712 u16 *dst_port)
1713{
1714 *dst_port = input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] << 8;
1715 *dst_port |= input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1];
1716 1428
1717 return 0; 1429 /* low dword is word swapped version of common */
1718} 1430 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1719 1431
1720/** 1432 /* apply flow ID/VM pool/VLAN ID bits to hash words */
1721 * ixgbe_atr_get_flex_byte_82599 - Gets the flexible bytes 1433 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1722 * @input: input stream to modify
1723 * @flex_bytes: the flexible bytes to load
1724 **/
1725static s32 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input,
1726 u16 *flex_byte)
1727{
1728 *flex_byte = input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET];
1729 *flex_byte |= input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] << 8;
1730
1731 return 0;
1732}
1733 1434
1734/** 1435 /* Process bits 0 and 16 */
1735 * ixgbe_atr_get_vm_pool_82599 - Gets the Virtual Machine pool 1436 IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
1736 * @input: input stream to modify
1737 * @vm_pool: the Virtual Machine pool to load
1738 **/
1739s32 ixgbe_atr_get_vm_pool_82599(struct ixgbe_atr_input *input,
1740 u8 *vm_pool)
1741{
1742 *vm_pool = input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET];
1743 1437
1744 return 0; 1438 /*
1745} 1439 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1440 * delay this because bit 0 of the stream should not be processed
1441 * so we do not add the vlan until after bit 0 was processed
1442 */
1443 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1444
1445 /* Process remaining 30 bit of the key */
1446 IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
1447 IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
1448 IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
1449 IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
1450 IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
1451 IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
1452 IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
1453 IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
1454 IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
1455 IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
1456 IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
1457 IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
1458 IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
1459 IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
1460 IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
1461
1462 /* combine common_hash result with signature and bucket hashes */
1463 bucket_hash ^= common_hash;
1464 bucket_hash &= IXGBE_ATR_HASH_MASK;
1746 1465
1747/** 1466 sig_hash ^= common_hash << 16;
1748 * ixgbe_atr_get_l4type_82599 - Gets the layer 4 packet type 1467 sig_hash &= IXGBE_ATR_HASH_MASK << 16;
1749 * @input: input stream to modify
1750 * @l4type: the layer 4 type value to load
1751 **/
1752static s32 ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input,
1753 u8 *l4type)
1754{
1755 *l4type = input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET];
1756 1468
1757 return 0; 1469 /* return completed signature hash */
1470 return sig_hash ^ bucket_hash;
1758} 1471}
1759 1472
1760/** 1473/**
1761 * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter 1474 * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
1762 * @hw: pointer to hardware structure 1475 * @hw: pointer to hardware structure
1763 * @stream: input bitstream 1476 * @input: unique input dword
1477 * @common: compressed common input dword
1764 * @queue: queue index to direct traffic to 1478 * @queue: queue index to direct traffic to
1765 **/ 1479 **/
1766s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, 1480s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1767 struct ixgbe_atr_input *input, 1481 union ixgbe_atr_hash_dword input,
1482 union ixgbe_atr_hash_dword common,
1768 u8 queue) 1483 u8 queue)
1769{ 1484{
1770 u64 fdirhashcmd; 1485 u64 fdirhashcmd;
1771 u64 fdircmd; 1486 u32 fdircmd;
1772 u32 fdirhash;
1773 u16 bucket_hash, sig_hash;
1774 u8 l4type;
1775
1776 bucket_hash = ixgbe_atr_compute_hash_82599(input,
1777 IXGBE_ATR_BUCKET_HASH_KEY);
1778
1779 /* bucket_hash is only 15 bits */
1780 bucket_hash &= IXGBE_ATR_HASH_MASK;
1781
1782 sig_hash = ixgbe_atr_compute_hash_82599(input,
1783 IXGBE_ATR_SIGNATURE_HASH_KEY);
1784
1785 /* Get the l4type in order to program FDIRCMD properly */
1786 /* lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 */
1787 ixgbe_atr_get_l4type_82599(input, &l4type);
1788 1487
1789 /* 1488 /*
1790 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits 1489 * Get the flow_type in order to program FDIRCMD properly
1791 * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH. 1490 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
1792 */ 1491 */
1793 fdirhash = sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash; 1492 switch (input.formatted.flow_type) {
1794 1493 case IXGBE_ATR_FLOW_TYPE_TCPV4:
1795 fdircmd = (IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | 1494 case IXGBE_ATR_FLOW_TYPE_UDPV4:
1796 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN); 1495 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1797 1496 case IXGBE_ATR_FLOW_TYPE_TCPV6:
1798 switch (l4type & IXGBE_ATR_L4TYPE_MASK) { 1497 case IXGBE_ATR_FLOW_TYPE_UDPV6:
1799 case IXGBE_ATR_L4TYPE_TCP: 1498 case IXGBE_ATR_FLOW_TYPE_SCTPV6:
1800 fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
1801 break;
1802 case IXGBE_ATR_L4TYPE_UDP:
1803 fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
1804 break;
1805 case IXGBE_ATR_L4TYPE_SCTP:
1806 fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
1807 break; 1499 break;
1808 default: 1500 default:
1809 hw_dbg(hw, "Error on l4type input\n"); 1501 hw_dbg(hw, " Error on flow type input\n");
1810 return IXGBE_ERR_CONFIG; 1502 return IXGBE_ERR_CONFIG;
1811 } 1503 }
1812 1504
1813 if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) 1505 /* configure FDIRCMD register */
1814 fdircmd |= IXGBE_FDIRCMD_IPV6; 1506 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1507 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1508 fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1509 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1815 1510
1816 fdircmd |= ((u64)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT); 1511 /*
1817 fdirhashcmd = ((fdircmd << 32) | fdirhash); 1512 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
1513 * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
1514 */
1515 fdirhashcmd = (u64)fdircmd << 32;
1516 fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
1818 1517
1819 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); 1518 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
1820 1519
1520 hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
1521
1821 return 0; 1522 return 0;
1822} 1523}
1823 1524
1824/** 1525/**
1526 * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks
1527 * @input_mask: mask to be bit swapped
1528 *
1529 * The source and destination port masks for flow director are bit swapped
1530 * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to
1531 * generate a correctly swapped value we need to bit swap the mask and that
1532 * is what is accomplished by this function.
1533 **/
1534static u32 ixgbe_get_fdirtcpm_82599(struct ixgbe_atr_input_masks *input_masks)
1535{
1536 u32 mask = ntohs(input_masks->dst_port_mask);
1537 mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
1538 mask |= ntohs(input_masks->src_port_mask);
1539 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
1540 mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
1541 mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
1542 return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
1543}
1544
1545/*
1546 * These two macros are meant to address the fact that we have registers
1547 * that are either all or in part big-endian. As a result on big-endian
1548 * systems we will end up byte swapping the value to little-endian before
1549 * it is byte swapped again and written to the hardware in the original
1550 * big-endian format.
1551 */
1552#define IXGBE_STORE_AS_BE32(_value) \
1553 (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
1554 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
1555
1556#define IXGBE_WRITE_REG_BE32(a, reg, value) \
1557 IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value)))
1558
1559#define IXGBE_STORE_AS_BE16(_value) \
1560 (((u16)(_value) >> 8) | ((u16)(_value) << 8))
1561
1562/**
1825 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter 1563 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
1826 * @hw: pointer to hardware structure 1564 * @hw: pointer to hardware structure
1827 * @input: input bitstream 1565 * @input: input bitstream
@@ -1833,82 +1571,39 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1833 * hardware writes must be protected from one another. 1571 * hardware writes must be protected from one another.
1834 **/ 1572 **/
1835s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, 1573s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
1836 struct ixgbe_atr_input *input, 1574 union ixgbe_atr_input *input,
1837 struct ixgbe_atr_input_masks *input_masks, 1575 struct ixgbe_atr_input_masks *input_masks,
1838 u16 soft_id, u8 queue) 1576 u16 soft_id, u8 queue)
1839{ 1577{
1840 u32 fdircmd = 0;
1841 u32 fdirhash; 1578 u32 fdirhash;
1842 u32 src_ipv4 = 0, dst_ipv4 = 0; 1579 u32 fdircmd;
1843 u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4; 1580 u32 fdirport, fdirtcpm;
1844 u16 src_port, dst_port, vlan_id, flex_bytes; 1581 u32 fdirvlan;
1845 u16 bucket_hash; 1582 /* start with VLAN, flex bytes, VM pool, and IPv6 destination masked */
1846 u8 l4type; 1583 u32 fdirm = IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP | IXGBE_FDIRM_FLEX |
1847 u8 fdirm = 0; 1584 IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
1848
1849 /* Get our input values */
1850 ixgbe_atr_get_l4type_82599(input, &l4type);
1851 1585
1852 /* 1586 /*
1853 * Check l4type formatting, and bail out before we touch the hardware 1587 * Check flow_type formatting, and bail out before we touch the hardware
1854 * if there's a configuration issue 1588 * if there's a configuration issue
1855 */ 1589 */
1856 switch (l4type & IXGBE_ATR_L4TYPE_MASK) { 1590 switch (input->formatted.flow_type) {
1857 case IXGBE_ATR_L4TYPE_TCP: 1591 case IXGBE_ATR_FLOW_TYPE_IPV4:
1858 fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP; 1592 /* use the L4 protocol mask for raw IPv4/IPv6 traffic */
1859 break; 1593 fdirm |= IXGBE_FDIRM_L4P;
1860 case IXGBE_ATR_L4TYPE_UDP: 1594 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1861 fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP; 1595 if (input_masks->dst_port_mask || input_masks->src_port_mask) {
1862 break; 1596 hw_dbg(hw, " Error on src/dst port mask\n");
1863 case IXGBE_ATR_L4TYPE_SCTP: 1597 return IXGBE_ERR_CONFIG;
1864 fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP; 1598 }
1599 case IXGBE_ATR_FLOW_TYPE_TCPV4:
1600 case IXGBE_ATR_FLOW_TYPE_UDPV4:
1865 break; 1601 break;
1866 default: 1602 default:
1867 hw_dbg(hw, "Error on l4type input\n"); 1603 hw_dbg(hw, " Error on flow type input\n");
1868 return IXGBE_ERR_CONFIG; 1604 return IXGBE_ERR_CONFIG;
1869 } 1605 }
1870 1606
1871 bucket_hash = ixgbe_atr_compute_hash_82599(input,
1872 IXGBE_ATR_BUCKET_HASH_KEY);
1873
1874 /* bucket_hash is only 15 bits */
1875 bucket_hash &= IXGBE_ATR_HASH_MASK;
1876
1877 ixgbe_atr_get_vlan_id_82599(input, &vlan_id);
1878 ixgbe_atr_get_src_port_82599(input, &src_port);
1879 ixgbe_atr_get_dst_port_82599(input, &dst_port);
1880 ixgbe_atr_get_flex_byte_82599(input, &flex_bytes);
1881
1882 fdirhash = soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
1883
1884 /* Now figure out if we're IPv4 or IPv6 */
1885 if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
1886 /* IPv6 */
1887 ixgbe_atr_get_src_ipv6_82599(input, &src_ipv6_1, &src_ipv6_2,
1888 &src_ipv6_3, &src_ipv6_4);
1889
1890 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), src_ipv6_1);
1891 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), src_ipv6_2);
1892 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), src_ipv6_3);
1893 /* The last 4 bytes is the same register as IPv4 */
1894 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv6_4);
1895
1896 fdircmd |= IXGBE_FDIRCMD_IPV6;
1897 fdircmd |= IXGBE_FDIRCMD_IPv6DMATCH;
1898 } else {
1899 /* IPv4 */
1900 ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4);
1901 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4);
1902 }
1903
1904 ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4);
1905 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, dst_ipv4);
1906
1907 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id |
1908 (flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT)));
1909 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port |
1910 (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
1911
1912 /* 1607 /*
1913 * Program the relevant mask registers. If src/dst_port or src/dst_addr 1608 * Program the relevant mask registers. If src/dst_port or src/dst_addr
1914 * are zero, then assume a full mask for that field. Also assume that 1609 * are zero, then assume a full mask for that field. Also assume that
@@ -1918,79 +1613,97 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
1918 * This also assumes IPv4 only. IPv6 masking isn't supported at this 1613 * This also assumes IPv4 only. IPv6 masking isn't supported at this
1919 * point in time. 1614 * point in time.
1920 */ 1615 */
1921 if (src_ipv4 == 0)
1922 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, 0xffffffff);
1923 else
1924 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, input_masks->src_ip_mask);
1925 1616
1926 if (dst_ipv4 == 0) 1617 /* Program FDIRM */
1927 IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, 0xffffffff); 1618 switch (ntohs(input_masks->vlan_id_mask) & 0xEFFF) {
1928 else 1619 case 0xEFFF:
1929 IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, input_masks->dst_ip_mask); 1620 /* Unmask VLAN ID - bit 0 and fall through to unmask prio */
1930 1621 fdirm &= ~IXGBE_FDIRM_VLANID;
1931 switch (l4type & IXGBE_ATR_L4TYPE_MASK) { 1622 case 0xE000:
1932 case IXGBE_ATR_L4TYPE_TCP: 1623 /* Unmask VLAN prio - bit 1 */
1933 if (src_port == 0) 1624 fdirm &= ~IXGBE_FDIRM_VLANP;
1934 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xffff);
1935 else
1936 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
1937 input_masks->src_port_mask);
1938
1939 if (dst_port == 0)
1940 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
1941 (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
1942 (0xffff << 16)));
1943 else
1944 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
1945 (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
1946 (input_masks->dst_port_mask << 16)));
1947 break; 1625 break;
1948 case IXGBE_ATR_L4TYPE_UDP: 1626 case 0x0FFF:
1949 if (src_port == 0) 1627 /* Unmask VLAN ID - bit 0 */
1950 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xffff); 1628 fdirm &= ~IXGBE_FDIRM_VLANID;
1951 else
1952 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
1953 input_masks->src_port_mask);
1954
1955 if (dst_port == 0)
1956 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
1957 (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
1958 (0xffff << 16)));
1959 else
1960 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
1961 (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
1962 (input_masks->src_port_mask << 16)));
1963 break; 1629 break;
1964 default: 1630 case 0x0000:
1965 /* this already would have failed above */ 1631 /* do nothing, vlans already masked */
1966 break; 1632 break;
1633 default:
1634 hw_dbg(hw, " Error on VLAN mask\n");
1635 return IXGBE_ERR_CONFIG;
1967 } 1636 }
1968 1637
1969 /* Program the last mask register, FDIRM */ 1638 if (input_masks->flex_mask & 0xFFFF) {
1970 if (input_masks->vlan_id_mask || !vlan_id) 1639 if ((input_masks->flex_mask & 0xFFFF) != 0xFFFF) {
1971 /* Mask both VLAN and VLANP - bits 0 and 1 */ 1640 hw_dbg(hw, " Error on flexible byte mask\n");
1972 fdirm |= 0x3; 1641 return IXGBE_ERR_CONFIG;
1973 1642 }
1974 if (input_masks->data_mask || !flex_bytes) 1643 /* Unmask Flex Bytes - bit 4 */
1975 /* Flex bytes need masking, so mask the whole thing - bit 4 */ 1644 fdirm &= ~IXGBE_FDIRM_FLEX;
1976 fdirm |= 0x10; 1645 }
1977 1646
1978 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ 1647 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
1979 fdirm |= 0x24;
1980
1981 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); 1648 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
1982 1649
1983 fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW; 1650 /* store the TCP/UDP port masks, bit reversed from port layout */
1984 fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE; 1651 fdirtcpm = ixgbe_get_fdirtcpm_82599(input_masks);
1985 fdircmd |= IXGBE_FDIRCMD_LAST; 1652
1986 fdircmd |= IXGBE_FDIRCMD_QUEUE_EN; 1653 /* write both the same so that UDP and TCP use the same mask */
1987 fdircmd |= queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; 1654 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
1655 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
1656
1657 /* store source and destination IP masks (big-enian) */
1658 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
1659 ~input_masks->src_ip_mask[0]);
1660 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
1661 ~input_masks->dst_ip_mask[0]);
1662
1663 /* Apply masks to input data */
1664 input->formatted.vlan_id &= input_masks->vlan_id_mask;
1665 input->formatted.flex_bytes &= input_masks->flex_mask;
1666 input->formatted.src_port &= input_masks->src_port_mask;
1667 input->formatted.dst_port &= input_masks->dst_port_mask;
1668 input->formatted.src_ip[0] &= input_masks->src_ip_mask[0];
1669 input->formatted.dst_ip[0] &= input_masks->dst_ip_mask[0];
1670
1671 /* record vlan (little-endian) and flex_bytes(big-endian) */
1672 fdirvlan =
1673 IXGBE_STORE_AS_BE16(ntohs(input->formatted.flex_bytes));
1674 fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
1675 fdirvlan |= ntohs(input->formatted.vlan_id);
1676 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
1677
1678 /* record source and destination port (little-endian)*/
1679 fdirport = ntohs(input->formatted.dst_port);
1680 fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
1681 fdirport |= ntohs(input->formatted.src_port);
1682 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
1683
1684 /* record the first 32 bits of the destination address (big-endian) */
1685 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
1686
1687 /* record the source address (big-endian) */
1688 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
1689
1690 /* configure FDIRCMD register */
1691 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1692 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1693 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1694 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1695
1696 /* we only want the bucket hash so drop the upper 16 bits */
1697 fdirhash = ixgbe_atr_compute_hash_82599(input,
1698 IXGBE_ATR_BUCKET_HASH_KEY);
1699 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1988 1700
1989 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); 1701 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1990 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); 1702 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
1991 1703
1992 return 0; 1704 return 0;
1993} 1705}
1706
1994/** 1707/**
1995 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register 1708 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
1996 * @hw: pointer to hardware structure 1709 * @hw: pointer to hardware structure
@@ -2037,30 +1750,29 @@ static s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
2037 * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx 1750 * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx
2038 * @hw: pointer to hardware structure 1751 * @hw: pointer to hardware structure
2039 * 1752 *
2040 * Starts the hardware using the generic start_hw function. 1753 * Starts the hardware using the generic start_hw function
2041 * Then performs device-specific: 1754 * and the generation start_hw function.
2042 * Clears the rate limiter registers. 1755 * Then performs revision-specific operations, if any.
2043 **/ 1756 **/
2044static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw) 1757static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
2045{ 1758{
2046 u32 q_num; 1759 s32 ret_val = 0;
2047 s32 ret_val;
2048 1760
2049 ret_val = ixgbe_start_hw_generic(hw); 1761 ret_val = ixgbe_start_hw_generic(hw);
1762 if (ret_val != 0)
1763 goto out;
2050 1764
2051 /* Clear the rate limiters */ 1765 ret_val = ixgbe_start_hw_gen2(hw);
2052 for (q_num = 0; q_num < hw->mac.max_tx_queues; q_num++) { 1766 if (ret_val != 0)
2053 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, q_num); 1767 goto out;
2054 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
2055 }
2056 IXGBE_WRITE_FLUSH(hw);
2057 1768
2058 /* We need to run link autotry after the driver loads */ 1769 /* We need to run link autotry after the driver loads */
2059 hw->mac.autotry_restart = true; 1770 hw->mac.autotry_restart = true;
1771 hw->mac.rx_pb_size = IXGBE_82599_RX_PB_SIZE;
2060 1772
2061 if (ret_val == 0) 1773 if (ret_val == 0)
2062 ret_val = ixgbe_verify_fw_version_82599(hw); 1774 ret_val = ixgbe_verify_fw_version_82599(hw);
2063 1775out:
2064 return ret_val; 1776 return ret_val;
2065} 1777}
2066 1778
@@ -2069,13 +1781,34 @@ static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
2069 * @hw: pointer to hardware structure 1781 * @hw: pointer to hardware structure
2070 * 1782 *
2071 * Determines the physical layer module found on the current adapter. 1783 * Determines the physical layer module found on the current adapter.
1784 * If PHY already detected, maintains current PHY type in hw struct,
1785 * otherwise executes the PHY detection routine.
2072 **/ 1786 **/
2073static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) 1787static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
2074{ 1788{
2075 s32 status = IXGBE_ERR_PHY_ADDR_INVALID; 1789 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
1790
1791 /* Detect PHY if not unknown - returns success if already detected. */
2076 status = ixgbe_identify_phy_generic(hw); 1792 status = ixgbe_identify_phy_generic(hw);
2077 if (status != 0) 1793 if (status != 0) {
2078 status = ixgbe_identify_sfp_module_generic(hw); 1794 /* 82599 10GBASE-T requires an external PHY */
1795 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
1796 goto out;
1797 else
1798 status = ixgbe_identify_sfp_module_generic(hw);
1799 }
1800
1801 /* Set PHY type none if no PHY detected */
1802 if (hw->phy.type == ixgbe_phy_unknown) {
1803 hw->phy.type = ixgbe_phy_none;
1804 status = 0;
1805 }
1806
1807 /* Return error if SFP module has been detected but is not supported */
1808 if (hw->phy.type == ixgbe_phy_sfp_unsupported)
1809 status = IXGBE_ERR_SFP_NOT_SUPPORTED;
1810
1811out:
2079 return status; 1812 return status;
2080} 1813}
2081 1814
@@ -2099,10 +1832,12 @@ static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
2099 1832
2100 hw->phy.ops.identify(hw); 1833 hw->phy.ops.identify(hw);
2101 1834
2102 if (hw->phy.type == ixgbe_phy_tn || 1835 switch (hw->phy.type) {
2103 hw->phy.type == ixgbe_phy_cu_unknown) { 1836 case ixgbe_phy_tn:
1837 case ixgbe_phy_aq:
1838 case ixgbe_phy_cu_unknown:
2104 hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD, 1839 hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD,
2105 &ext_ability); 1840 &ext_ability);
2106 if (ext_ability & MDIO_PMA_EXTABLE_10GBT) 1841 if (ext_ability & MDIO_PMA_EXTABLE_10GBT)
2107 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; 1842 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
2108 if (ext_ability & MDIO_PMA_EXTABLE_1000BT) 1843 if (ext_ability & MDIO_PMA_EXTABLE_1000BT)
@@ -2110,6 +1845,8 @@ static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
2110 if (ext_ability & MDIO_PMA_EXTABLE_100BTX) 1845 if (ext_ability & MDIO_PMA_EXTABLE_100BTX)
2111 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; 1846 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
2112 goto out; 1847 goto out;
1848 default:
1849 break;
2113 } 1850 }
2114 1851
2115 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 1852 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
@@ -2221,6 +1958,7 @@ static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
2221 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY) 1958 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
2222 break; 1959 break;
2223 else 1960 else
1961 /* Use interrupt-safe sleep just in case */
2224 udelay(10); 1962 udelay(10);
2225 } 1963 }
2226 1964
@@ -2239,21 +1977,6 @@ static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
2239} 1977}
2240 1978
2241/** 1979/**
2242 * ixgbe_get_device_caps_82599 - Get additional device capabilities
2243 * @hw: pointer to hardware structure
2244 * @device_caps: the EEPROM word with the extra device capabilities
2245 *
2246 * This function will read the EEPROM location for the device capabilities,
2247 * and return the word through device_caps.
2248 **/
2249static s32 ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, u16 *device_caps)
2250{
2251 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
2252
2253 return 0;
2254}
2255
2256/**
2257 * ixgbe_verify_fw_version_82599 - verify fw version for 82599 1980 * ixgbe_verify_fw_version_82599 - verify fw version for 82599
2258 * @hw: pointer to hardware structure 1981 * @hw: pointer to hardware structure
2259 * 1982 *
@@ -2302,48 +2025,107 @@ fw_version_out:
2302} 2025}
2303 2026
2304/** 2027/**
2305 * ixgbe_get_wwn_prefix_82599 - Get alternative WWNN/WWPN prefix from 2028 * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state.
2306 * the EEPROM
2307 * @hw: pointer to hardware structure 2029 * @hw: pointer to hardware structure
2308 * @wwnn_prefix: the alternative WWNN prefix
2309 * @wwpn_prefix: the alternative WWPN prefix
2310 * 2030 *
2311 * This function will read the EEPROM from the alternative SAN MAC address 2031 * Returns true if the LESM FW module is present and enabled. Otherwise
2312 * block to check the support for the alternative WWNN/WWPN prefix support. 2032 * returns false. Smart Speed must be disabled if LESM FW module is enabled.
2313 **/ 2033 **/
2314static s32 ixgbe_get_wwn_prefix_82599(struct ixgbe_hw *hw, u16 *wwnn_prefix, 2034static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
2315 u16 *wwpn_prefix)
2316{ 2035{
2317 u16 offset, caps; 2036 bool lesm_enabled = false;
2318 u16 alt_san_mac_blk_offset; 2037 u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
2038 s32 status;
2319 2039
2320 /* clear output first */ 2040 /* get the offset to the Firmware Module block */
2321 *wwnn_prefix = 0xFFFF; 2041 status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
2322 *wwpn_prefix = 0xFFFF;
2323 2042
2324 /* check if alternative SAN MAC is supported */ 2043 if ((status != 0) ||
2325 hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR, 2044 (fw_offset == 0) || (fw_offset == 0xFFFF))
2326 &alt_san_mac_blk_offset); 2045 goto out;
2327 2046
2328 if ((alt_san_mac_blk_offset == 0) || 2047 /* get the offset to the LESM Parameters block */
2329 (alt_san_mac_blk_offset == 0xFFFF)) 2048 status = hw->eeprom.ops.read(hw, (fw_offset +
2330 goto wwn_prefix_out; 2049 IXGBE_FW_LESM_PARAMETERS_PTR),
2050 &fw_lesm_param_offset);
2331 2051
2332 /* check capability in alternative san mac address block */ 2052 if ((status != 0) ||
2333 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET; 2053 (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
2334 hw->eeprom.ops.read(hw, offset, &caps); 2054 goto out;
2335 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
2336 goto wwn_prefix_out;
2337 2055
2338 /* get the corresponding prefix for WWNN/WWPN */ 2056 /* get the lesm state word */
2339 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET; 2057 status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
2340 hw->eeprom.ops.read(hw, offset, wwnn_prefix); 2058 IXGBE_FW_LESM_STATE_1),
2059 &fw_lesm_state);
2341 2060
2342 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET; 2061 if ((status == 0) &&
2343 hw->eeprom.ops.read(hw, offset, wwpn_prefix); 2062 (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
2063 lesm_enabled = true;
2344 2064
2345wwn_prefix_out: 2065out:
2346 return 0; 2066 return lesm_enabled;
2067}
2068
2069/**
2070 * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using
2071 * fastest available method
2072 *
2073 * @hw: pointer to hardware structure
2074 * @offset: offset of word in EEPROM to read
2075 * @words: number of words
2076 * @data: word(s) read from the EEPROM
2077 *
2078 * Retrieves 16 bit word(s) read from EEPROM
2079 **/
2080static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
2081 u16 words, u16 *data)
2082{
2083 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2084 s32 ret_val = IXGBE_ERR_CONFIG;
2085
2086 /*
2087 * If EEPROM is detected and can be addressed using 14 bits,
2088 * use EERD otherwise use bit bang
2089 */
2090 if ((eeprom->type == ixgbe_eeprom_spi) &&
2091 (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR))
2092 ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words,
2093 data);
2094 else
2095 ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
2096 words,
2097 data);
2098
2099 return ret_val;
2100}
2101
2102/**
2103 * ixgbe_read_eeprom_82599 - Read EEPROM word using
2104 * fastest available method
2105 *
2106 * @hw: pointer to hardware structure
2107 * @offset: offset of word in the EEPROM to read
2108 * @data: word read from the EEPROM
2109 *
2110 * Reads a 16 bit word from the EEPROM
2111 **/
2112static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
2113 u16 offset, u16 *data)
2114{
2115 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2116 s32 ret_val = IXGBE_ERR_CONFIG;
2117
2118 /*
2119 * If EEPROM is detected and can be addressed using 14 bits,
2120 * use EERD otherwise use bit bang
2121 */
2122 if ((eeprom->type == ixgbe_eeprom_spi) &&
2123 (offset <= IXGBE_EERD_MAX_ADDR))
2124 ret_val = ixgbe_read_eerd_generic(hw, offset, data);
2125 else
2126 ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
2127
2128 return ret_val;
2347} 2129}
2348 2130
2349static struct ixgbe_mac_operations mac_ops_82599 = { 2131static struct ixgbe_mac_operations mac_ops_82599 = {
@@ -2356,8 +2138,8 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
2356 .enable_rx_dma = &ixgbe_enable_rx_dma_82599, 2138 .enable_rx_dma = &ixgbe_enable_rx_dma_82599,
2357 .get_mac_addr = &ixgbe_get_mac_addr_generic, 2139 .get_mac_addr = &ixgbe_get_mac_addr_generic,
2358 .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, 2140 .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
2359 .get_device_caps = &ixgbe_get_device_caps_82599, 2141 .get_device_caps = &ixgbe_get_device_caps_generic,
2360 .get_wwn_prefix = &ixgbe_get_wwn_prefix_82599, 2142 .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic,
2361 .stop_adapter = &ixgbe_stop_adapter_generic, 2143 .stop_adapter = &ixgbe_stop_adapter_generic,
2362 .get_bus_info = &ixgbe_get_bus_info_generic, 2144 .get_bus_info = &ixgbe_get_bus_info_generic,
2363 .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, 2145 .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie,
@@ -2375,7 +2157,6 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
2375 .set_vmdq = &ixgbe_set_vmdq_generic, 2157 .set_vmdq = &ixgbe_set_vmdq_generic,
2376 .clear_vmdq = &ixgbe_clear_vmdq_generic, 2158 .clear_vmdq = &ixgbe_clear_vmdq_generic,
2377 .init_rx_addrs = &ixgbe_init_rx_addrs_generic, 2159 .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
2378 .update_uc_addr_list = &ixgbe_update_uc_addr_list_generic,
2379 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, 2160 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
2380 .enable_mc = &ixgbe_enable_mc_generic, 2161 .enable_mc = &ixgbe_enable_mc_generic,
2381 .disable_mc = &ixgbe_disable_mc_generic, 2162 .disable_mc = &ixgbe_disable_mc_generic,
@@ -2384,30 +2165,38 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
2384 .fc_enable = &ixgbe_fc_enable_generic, 2165 .fc_enable = &ixgbe_fc_enable_generic,
2385 .init_uta_tables = &ixgbe_init_uta_tables_generic, 2166 .init_uta_tables = &ixgbe_init_uta_tables_generic,
2386 .setup_sfp = &ixgbe_setup_sfp_modules_82599, 2167 .setup_sfp = &ixgbe_setup_sfp_modules_82599,
2168 .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing,
2169 .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
2170 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync,
2171 .release_swfw_sync = &ixgbe_release_swfw_sync,
2172
2387}; 2173};
2388 2174
2389static struct ixgbe_eeprom_operations eeprom_ops_82599 = { 2175static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
2390 .init_params = &ixgbe_init_eeprom_params_generic, 2176 .init_params = &ixgbe_init_eeprom_params_generic,
2391 .read = &ixgbe_read_eerd_generic, 2177 .read = &ixgbe_read_eeprom_82599,
2392 .write = &ixgbe_write_eeprom_generic, 2178 .read_buffer = &ixgbe_read_eeprom_buffer_82599,
2393 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, 2179 .write = &ixgbe_write_eeprom_generic,
2394 .update_checksum = &ixgbe_update_eeprom_checksum_generic, 2180 .write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic,
2181 .calc_checksum = &ixgbe_calc_eeprom_checksum_generic,
2182 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
2183 .update_checksum = &ixgbe_update_eeprom_checksum_generic,
2395}; 2184};
2396 2185
2397static struct ixgbe_phy_operations phy_ops_82599 = { 2186static struct ixgbe_phy_operations phy_ops_82599 = {
2398 .identify = &ixgbe_identify_phy_82599, 2187 .identify = &ixgbe_identify_phy_82599,
2399 .identify_sfp = &ixgbe_identify_sfp_module_generic, 2188 .identify_sfp = &ixgbe_identify_sfp_module_generic,
2400 .init = &ixgbe_init_phy_ops_82599, 2189 .init = &ixgbe_init_phy_ops_82599,
2401 .reset = &ixgbe_reset_phy_generic, 2190 .reset = &ixgbe_reset_phy_generic,
2402 .read_reg = &ixgbe_read_phy_reg_generic, 2191 .read_reg = &ixgbe_read_phy_reg_generic,
2403 .write_reg = &ixgbe_write_phy_reg_generic, 2192 .write_reg = &ixgbe_write_phy_reg_generic,
2404 .setup_link = &ixgbe_setup_phy_link_generic, 2193 .setup_link = &ixgbe_setup_phy_link_generic,
2405 .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, 2194 .setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
2406 .read_i2c_byte = &ixgbe_read_i2c_byte_generic, 2195 .read_i2c_byte = &ixgbe_read_i2c_byte_generic,
2407 .write_i2c_byte = &ixgbe_write_i2c_byte_generic, 2196 .write_i2c_byte = &ixgbe_write_i2c_byte_generic,
2408 .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, 2197 .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic,
2409 .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, 2198 .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic,
2410 .check_overtemp = &ixgbe_tn_check_overtemp, 2199 .check_overtemp = &ixgbe_tn_check_overtemp,
2411}; 2200};
2412 2201
2413struct ixgbe_info ixgbe_82599_info = { 2202struct ixgbe_info ixgbe_82599_info = {
@@ -2416,5 +2205,5 @@ struct ixgbe_info ixgbe_82599_info = {
2416 .mac_ops = &mac_ops_82599, 2205 .mac_ops = &mac_ops_82599,
2417 .eeprom_ops = &eeprom_ops_82599, 2206 .eeprom_ops = &eeprom_ops_82599,
2418 .phy_ops = &phy_ops_82599, 2207 .phy_ops = &phy_ops_82599,
2419 .mbx_ops = &mbx_ops_82599, 2208 .mbx_ops = &mbx_ops_generic,
2420}; 2209};
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index 9595b1bfb8dd..b894b42a741c 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -45,13 +45,22 @@ static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
45static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 45static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
46static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 46static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
47static void ixgbe_release_eeprom(struct ixgbe_hw *hw); 47static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
48static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw);
49 48
50static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index);
51static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index);
52static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); 49static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
53static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq); 50static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw);
51static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw);
52static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw);
53static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
54static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
55 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
54static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num); 56static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num);
57static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
58static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
59 u16 words, u16 *data);
60static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
61 u16 words, u16 *data);
62static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
63 u16 offset);
55 64
56/** 65/**
57 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx 66 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
@@ -94,6 +103,45 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
94} 103}
95 104
96/** 105/**
106 * ixgbe_start_hw_gen2 - Init sequence for common device family
107 * @hw: pointer to hw structure
108 *
109 * Performs the init sequence common to the second generation
110 * of 10 GbE devices.
111 * Devices in the second generation:
112 * 82599
113 * X540
114 **/
115s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
116{
117 u32 i;
118 u32 regval;
119
120 /* Clear the rate limiters */
121 for (i = 0; i < hw->mac.max_tx_queues; i++) {
122 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
123 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
124 }
125 IXGBE_WRITE_FLUSH(hw);
126
127 /* Disable relaxed ordering */
128 for (i = 0; i < hw->mac.max_tx_queues; i++) {
129 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
130 regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
131 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
132 }
133
134 for (i = 0; i < hw->mac.max_rx_queues; i++) {
135 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
136 regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
137 IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
138 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
139 }
140
141 return 0;
142}
143
144/**
97 * ixgbe_init_hw_generic - Generic hardware initialization 145 * ixgbe_init_hw_generic - Generic hardware initialization
98 * @hw: pointer to hardware structure 146 * @hw: pointer to hardware structure
99 * 147 *
@@ -140,17 +188,29 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
140 IXGBE_READ_REG(hw, IXGBE_MRFC); 188 IXGBE_READ_REG(hw, IXGBE_MRFC);
141 IXGBE_READ_REG(hw, IXGBE_RLEC); 189 IXGBE_READ_REG(hw, IXGBE_RLEC);
142 IXGBE_READ_REG(hw, IXGBE_LXONTXC); 190 IXGBE_READ_REG(hw, IXGBE_LXONTXC);
143 IXGBE_READ_REG(hw, IXGBE_LXONRXC);
144 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 191 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
145 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 192 if (hw->mac.type >= ixgbe_mac_82599EB) {
193 IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
194 IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
195 } else {
196 IXGBE_READ_REG(hw, IXGBE_LXONRXC);
197 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
198 }
146 199
147 for (i = 0; i < 8; i++) { 200 for (i = 0; i < 8; i++) {
148 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); 201 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
149 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
150 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); 202 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
151 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); 203 if (hw->mac.type >= ixgbe_mac_82599EB) {
204 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
205 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
206 } else {
207 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
208 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
209 }
152 } 210 }
153 211 if (hw->mac.type >= ixgbe_mac_82599EB)
212 for (i = 0; i < 8; i++)
213 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
154 IXGBE_READ_REG(hw, IXGBE_PRC64); 214 IXGBE_READ_REG(hw, IXGBE_PRC64);
155 IXGBE_READ_REG(hw, IXGBE_PRC127); 215 IXGBE_READ_REG(hw, IXGBE_PRC127);
156 IXGBE_READ_REG(hw, IXGBE_PRC255); 216 IXGBE_READ_REG(hw, IXGBE_PRC255);
@@ -188,39 +248,136 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
188 IXGBE_READ_REG(hw, IXGBE_BPTC); 248 IXGBE_READ_REG(hw, IXGBE_BPTC);
189 for (i = 0; i < 16; i++) { 249 for (i = 0; i < 16; i++) {
190 IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 250 IXGBE_READ_REG(hw, IXGBE_QPRC(i));
191 IXGBE_READ_REG(hw, IXGBE_QBRC(i));
192 IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 251 IXGBE_READ_REG(hw, IXGBE_QPTC(i));
193 IXGBE_READ_REG(hw, IXGBE_QBTC(i)); 252 if (hw->mac.type >= ixgbe_mac_82599EB) {
253 IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
254 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
255 IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
256 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
257 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
258 } else {
259 IXGBE_READ_REG(hw, IXGBE_QBRC(i));
260 IXGBE_READ_REG(hw, IXGBE_QBTC(i));
261 }
262 }
263
264 if (hw->mac.type == ixgbe_mac_X540) {
265 if (hw->phy.id == 0)
266 hw->phy.ops.identify(hw);
267 hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECL, &i);
268 hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECH, &i);
269 hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECL, &i);
270 hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECH, &i);
194 } 271 }
195 272
196 return 0; 273 return 0;
197} 274}
198 275
199/** 276/**
200 * ixgbe_read_pba_num_generic - Reads part number from EEPROM 277 * ixgbe_read_pba_string_generic - Reads part number string from EEPROM
201 * @hw: pointer to hardware structure 278 * @hw: pointer to hardware structure
202 * @pba_num: stores the part number from the EEPROM 279 * @pba_num: stores the part number string from the EEPROM
280 * @pba_num_size: part number string buffer length
203 * 281 *
204 * Reads the part number from the EEPROM. 282 * Reads the part number string from the EEPROM.
205 **/ 283 **/
206s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num) 284s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
285 u32 pba_num_size)
207{ 286{
208 s32 ret_val; 287 s32 ret_val;
209 u16 data; 288 u16 data;
289 u16 pba_ptr;
290 u16 offset;
291 u16 length;
292
293 if (pba_num == NULL) {
294 hw_dbg(hw, "PBA string buffer was null\n");
295 return IXGBE_ERR_INVALID_ARGUMENT;
296 }
210 297
211 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); 298 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
212 if (ret_val) { 299 if (ret_val) {
213 hw_dbg(hw, "NVM Read Error\n"); 300 hw_dbg(hw, "NVM Read Error\n");
214 return ret_val; 301 return ret_val;
215 } 302 }
216 *pba_num = (u32)(data << 16);
217 303
218 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data); 304 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
219 if (ret_val) { 305 if (ret_val) {
220 hw_dbg(hw, "NVM Read Error\n"); 306 hw_dbg(hw, "NVM Read Error\n");
221 return ret_val; 307 return ret_val;
222 } 308 }
223 *pba_num |= data; 309
310 /*
311 * if data is not ptr guard the PBA must be in legacy format which
312 * means pba_ptr is actually our second data word for the PBA number
313 * and we can decode it into an ascii string
314 */
315 if (data != IXGBE_PBANUM_PTR_GUARD) {
316 hw_dbg(hw, "NVM PBA number is not stored as string\n");
317
318 /* we will need 11 characters to store the PBA */
319 if (pba_num_size < 11) {
320 hw_dbg(hw, "PBA string buffer too small\n");
321 return IXGBE_ERR_NO_SPACE;
322 }
323
324 /* extract hex string from data and pba_ptr */
325 pba_num[0] = (data >> 12) & 0xF;
326 pba_num[1] = (data >> 8) & 0xF;
327 pba_num[2] = (data >> 4) & 0xF;
328 pba_num[3] = data & 0xF;
329 pba_num[4] = (pba_ptr >> 12) & 0xF;
330 pba_num[5] = (pba_ptr >> 8) & 0xF;
331 pba_num[6] = '-';
332 pba_num[7] = 0;
333 pba_num[8] = (pba_ptr >> 4) & 0xF;
334 pba_num[9] = pba_ptr & 0xF;
335
336 /* put a null character on the end of our string */
337 pba_num[10] = '\0';
338
339 /* switch all the data but the '-' to hex char */
340 for (offset = 0; offset < 10; offset++) {
341 if (pba_num[offset] < 0xA)
342 pba_num[offset] += '0';
343 else if (pba_num[offset] < 0x10)
344 pba_num[offset] += 'A' - 0xA;
345 }
346
347 return 0;
348 }
349
350 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
351 if (ret_val) {
352 hw_dbg(hw, "NVM Read Error\n");
353 return ret_val;
354 }
355
356 if (length == 0xFFFF || length == 0) {
357 hw_dbg(hw, "NVM PBA number section invalid length\n");
358 return IXGBE_ERR_PBA_SECTION;
359 }
360
361 /* check if pba_num buffer is big enough */
362 if (pba_num_size < (((u32)length * 2) - 1)) {
363 hw_dbg(hw, "PBA string buffer too small\n");
364 return IXGBE_ERR_NO_SPACE;
365 }
366
367 /* trim pba length from start of string */
368 pba_ptr++;
369 length--;
370
371 for (offset = 0; offset < length; offset++) {
372 ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
373 if (ret_val) {
374 hw_dbg(hw, "NVM Read Error\n");
375 return ret_val;
376 }
377 pba_num[offset * 2] = (u8)(data >> 8);
378 pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
379 }
380 pba_num[offset * 2] = '\0';
224 381
225 return 0; 382 return 0;
226} 383}
@@ -353,7 +510,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
353 reg_val &= ~(IXGBE_RXCTRL_RXEN); 510 reg_val &= ~(IXGBE_RXCTRL_RXEN);
354 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val); 511 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val);
355 IXGBE_WRITE_FLUSH(hw); 512 IXGBE_WRITE_FLUSH(hw);
356 msleep(2); 513 usleep_range(2000, 4000);
357 514
358 /* Clear interrupt mask to stop from interrupts being generated */ 515 /* Clear interrupt mask to stop from interrupts being generated */
359 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); 516 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
@@ -375,8 +532,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
375 * Prevent the PCI-E bus from from hanging by disabling PCI-E master 532 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
376 * access and verify no pending requests 533 * access and verify no pending requests
377 */ 534 */
378 if (ixgbe_disable_pcie_master(hw) != 0) 535 ixgbe_disable_pcie_master(hw);
379 hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
380 536
381 return 0; 537 return 0;
382} 538}
@@ -435,6 +591,8 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
435 /* Set default semaphore delay to 10ms which is a well 591 /* Set default semaphore delay to 10ms which is a well
436 * tested value */ 592 * tested value */
437 eeprom->semaphore_delay = 10; 593 eeprom->semaphore_delay = 10;
594 /* Clear EEPROM page size, it will be initialized as needed */
595 eeprom->word_page_size = 0;
438 596
439 /* 597 /*
440 * Check for EEPROM present first. 598 * Check for EEPROM present first.
@@ -467,26 +625,78 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
467} 625}
468 626
469/** 627/**
470 * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM 628 * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
471 * @hw: pointer to hardware structure 629 * @hw: pointer to hardware structure
472 * @offset: offset within the EEPROM to be written to 630 * @offset: offset within the EEPROM to write
473 * @data: 16 bit word to be written to the EEPROM 631 * @words: number of words
632 * @data: 16 bit word(s) to write to EEPROM
474 * 633 *
475 * If ixgbe_eeprom_update_checksum is not called after this function, the 634 * Reads 16 bit word(s) from EEPROM through bit-bang method
476 * EEPROM will most likely contain an invalid checksum.
477 **/ 635 **/
478s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) 636s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
637 u16 words, u16 *data)
479{ 638{
480 s32 status; 639 s32 status = 0;
481 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI; 640 u16 i, count;
482 641
483 hw->eeprom.ops.init_params(hw); 642 hw->eeprom.ops.init_params(hw);
484 643
485 if (offset >= hw->eeprom.word_size) { 644 if (words == 0) {
645 status = IXGBE_ERR_INVALID_ARGUMENT;
646 goto out;
647 }
648
649 if (offset + words > hw->eeprom.word_size) {
486 status = IXGBE_ERR_EEPROM; 650 status = IXGBE_ERR_EEPROM;
487 goto out; 651 goto out;
488 } 652 }
489 653
654 /*
655 * The EEPROM page size cannot be queried from the chip. We do lazy
656 * initialization. It is worth to do that when we write large buffer.
657 */
658 if ((hw->eeprom.word_page_size == 0) &&
659 (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
660 ixgbe_detect_eeprom_page_size_generic(hw, offset);
661
662 /*
663 * We cannot hold synchronization semaphores for too long
664 * to avoid other entity starvation. However it is more efficient
665 * to read in bursts than synchronizing access for each word.
666 */
667 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
668 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
669 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
670 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
671 count, &data[i]);
672
673 if (status != 0)
674 break;
675 }
676
677out:
678 return status;
679}
680
681/**
682 * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
683 * @hw: pointer to hardware structure
684 * @offset: offset within the EEPROM to be written to
685 * @words: number of word(s)
686 * @data: 16 bit word(s) to be written to the EEPROM
687 *
688 * If ixgbe_eeprom_update_checksum is not called after this function, the
689 * EEPROM will most likely contain an invalid checksum.
690 **/
691static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
692 u16 words, u16 *data)
693{
694 s32 status;
695 u16 word;
696 u16 page_size;
697 u16 i;
698 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
699
490 /* Prepare the EEPROM for writing */ 700 /* Prepare the EEPROM for writing */
491 status = ixgbe_acquire_eeprom(hw); 701 status = ixgbe_acquire_eeprom(hw);
492 702
@@ -498,63 +708,147 @@ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
498 } 708 }
499 709
500 if (status == 0) { 710 if (status == 0) {
501 ixgbe_standby_eeprom(hw); 711 for (i = 0; i < words; i++) {
712 ixgbe_standby_eeprom(hw);
502 713
503 /* Send the WRITE ENABLE command (8 bit opcode ) */ 714 /* Send the WRITE ENABLE command (8 bit opcode ) */
504 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_WREN_OPCODE_SPI, 715 ixgbe_shift_out_eeprom_bits(hw,
505 IXGBE_EEPROM_OPCODE_BITS); 716 IXGBE_EEPROM_WREN_OPCODE_SPI,
717 IXGBE_EEPROM_OPCODE_BITS);
506 718
507 ixgbe_standby_eeprom(hw); 719 ixgbe_standby_eeprom(hw);
508 720
509 /* 721 /*
510 * Some SPI eeproms use the 8th address bit embedded in the 722 * Some SPI eeproms use the 8th address bit embedded
511 * opcode 723 * in the opcode
512 */ 724 */
513 if ((hw->eeprom.address_bits == 8) && (offset >= 128)) 725 if ((hw->eeprom.address_bits == 8) &&
514 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; 726 ((offset + i) >= 128))
727 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
728
729 /* Send the Write command (8-bit opcode + addr) */
730 ixgbe_shift_out_eeprom_bits(hw, write_opcode,
731 IXGBE_EEPROM_OPCODE_BITS);
732 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
733 hw->eeprom.address_bits);
734
735 page_size = hw->eeprom.word_page_size;
736
737 /* Send the data in burst via SPI*/
738 do {
739 word = data[i];
740 word = (word >> 8) | (word << 8);
741 ixgbe_shift_out_eeprom_bits(hw, word, 16);
742
743 if (page_size == 0)
744 break;
745
746 /* do not wrap around page */
747 if (((offset + i) & (page_size - 1)) ==
748 (page_size - 1))
749 break;
750 } while (++i < words);
751
752 ixgbe_standby_eeprom(hw);
753 usleep_range(10000, 20000);
754 }
755 /* Done with writing - release the EEPROM */
756 ixgbe_release_eeprom(hw);
757 }
515 758
516 /* Send the Write command (8-bit opcode + addr) */ 759 return status;
517 ixgbe_shift_out_eeprom_bits(hw, write_opcode, 760}
518 IXGBE_EEPROM_OPCODE_BITS);
519 ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2),
520 hw->eeprom.address_bits);
521 761
522 /* Send the data */ 762/**
523 data = (data >> 8) | (data << 8); 763 * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
524 ixgbe_shift_out_eeprom_bits(hw, data, 16); 764 * @hw: pointer to hardware structure
525 ixgbe_standby_eeprom(hw); 765 * @offset: offset within the EEPROM to be written to
766 * @data: 16 bit word to be written to the EEPROM
767 *
768 * If ixgbe_eeprom_update_checksum is not called after this function, the
769 * EEPROM will most likely contain an invalid checksum.
770 **/
771s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
772{
773 s32 status;
526 774
527 msleep(hw->eeprom.semaphore_delay); 775 hw->eeprom.ops.init_params(hw);
528 /* Done with writing - release the EEPROM */ 776
529 ixgbe_release_eeprom(hw); 777 if (offset >= hw->eeprom.word_size) {
778 status = IXGBE_ERR_EEPROM;
779 goto out;
530 } 780 }
531 781
782 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
783
532out: 784out:
533 return status; 785 return status;
534} 786}
535 787
536/** 788/**
537 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang 789 * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
538 * @hw: pointer to hardware structure 790 * @hw: pointer to hardware structure
539 * @offset: offset within the EEPROM to be read 791 * @offset: offset within the EEPROM to be read
540 * @data: read 16 bit value from EEPROM 792 * @words: number of word(s)
793 * @data: read 16 bit words(s) from EEPROM
541 * 794 *
542 * Reads 16 bit value from EEPROM through bit-bang method 795 * Reads 16 bit word(s) from EEPROM through bit-bang method
543 **/ 796 **/
544s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 797s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
545 u16 *data) 798 u16 words, u16 *data)
546{ 799{
547 s32 status; 800 s32 status = 0;
548 u16 word_in; 801 u16 i, count;
549 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
550 802
551 hw->eeprom.ops.init_params(hw); 803 hw->eeprom.ops.init_params(hw);
552 804
553 if (offset >= hw->eeprom.word_size) { 805 if (words == 0) {
806 status = IXGBE_ERR_INVALID_ARGUMENT;
807 goto out;
808 }
809
810 if (offset + words > hw->eeprom.word_size) {
554 status = IXGBE_ERR_EEPROM; 811 status = IXGBE_ERR_EEPROM;
555 goto out; 812 goto out;
556 } 813 }
557 814
815 /*
816 * We cannot hold synchronization semaphores for too long
817 * to avoid other entity starvation. However it is more efficient
818 * to read in bursts than synchronizing access for each word.
819 */
820 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
821 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
822 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
823
824 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
825 count, &data[i]);
826
827 if (status != 0)
828 break;
829 }
830
831out:
832 return status;
833}
834
835/**
836 * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
837 * @hw: pointer to hardware structure
838 * @offset: offset within the EEPROM to be read
839 * @words: number of word(s)
840 * @data: read 16 bit word(s) from EEPROM
841 *
842 * Reads 16 bit word(s) from EEPROM through bit-bang method
843 **/
844static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
845 u16 words, u16 *data)
846{
847 s32 status;
848 u16 word_in;
849 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
850 u16 i;
851
558 /* Prepare the EEPROM for reading */ 852 /* Prepare the EEPROM for reading */
559 status = ixgbe_acquire_eeprom(hw); 853 status = ixgbe_acquire_eeprom(hw);
560 854
@@ -566,29 +860,145 @@ s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
566 } 860 }
567 861
568 if (status == 0) { 862 if (status == 0) {
569 ixgbe_standby_eeprom(hw); 863 for (i = 0; i < words; i++) {
864 ixgbe_standby_eeprom(hw);
865 /*
866 * Some SPI eeproms use the 8th address bit embedded
867 * in the opcode
868 */
869 if ((hw->eeprom.address_bits == 8) &&
870 ((offset + i) >= 128))
871 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
872
873 /* Send the READ command (opcode + addr) */
874 ixgbe_shift_out_eeprom_bits(hw, read_opcode,
875 IXGBE_EEPROM_OPCODE_BITS);
876 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
877 hw->eeprom.address_bits);
878
879 /* Read the data. */
880 word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
881 data[i] = (word_in >> 8) | (word_in << 8);
882 }
570 883
571 /* 884 /* End this read operation */
572 * Some SPI eeproms use the 8th address bit embedded in the 885 ixgbe_release_eeprom(hw);
573 * opcode 886 }
574 */
575 if ((hw->eeprom.address_bits == 8) && (offset >= 128))
576 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
577 887
578 /* Send the READ command (opcode + addr) */ 888 return status;
579 ixgbe_shift_out_eeprom_bits(hw, read_opcode, 889}
580 IXGBE_EEPROM_OPCODE_BITS);
581 ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2),
582 hw->eeprom.address_bits);
583 890
584 /* Read the data. */ 891/**
585 word_in = ixgbe_shift_in_eeprom_bits(hw, 16); 892 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
586 *data = (word_in >> 8) | (word_in << 8); 893 * @hw: pointer to hardware structure
894 * @offset: offset within the EEPROM to be read
895 * @data: read 16 bit value from EEPROM
896 *
897 * Reads 16 bit value from EEPROM through bit-bang method
898 **/
899s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
900 u16 *data)
901{
902 s32 status;
587 903
588 /* End this read operation */ 904 hw->eeprom.ops.init_params(hw);
589 ixgbe_release_eeprom(hw); 905
906 if (offset >= hw->eeprom.word_size) {
907 status = IXGBE_ERR_EEPROM;
908 goto out;
590 } 909 }
591 910
911 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
912
913out:
914 return status;
915}
916
917/**
918 * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
919 * @hw: pointer to hardware structure
920 * @offset: offset of word in the EEPROM to read
921 * @words: number of word(s)
922 * @data: 16 bit word(s) from the EEPROM
923 *
924 * Reads a 16 bit word(s) from the EEPROM using the EERD register.
925 **/
926s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
927 u16 words, u16 *data)
928{
929 u32 eerd;
930 s32 status = 0;
931 u32 i;
932
933 hw->eeprom.ops.init_params(hw);
934
935 if (words == 0) {
936 status = IXGBE_ERR_INVALID_ARGUMENT;
937 goto out;
938 }
939
940 if (offset >= hw->eeprom.word_size) {
941 status = IXGBE_ERR_EEPROM;
942 goto out;
943 }
944
945 for (i = 0; i < words; i++) {
946 eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) +
947 IXGBE_EEPROM_RW_REG_START;
948
949 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
950 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
951
952 if (status == 0) {
953 data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
954 IXGBE_EEPROM_RW_REG_DATA);
955 } else {
956 hw_dbg(hw, "Eeprom read timed out\n");
957 goto out;
958 }
959 }
960out:
961 return status;
962}
963
964/**
965 * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
966 * @hw: pointer to hardware structure
967 * @offset: offset within the EEPROM to be used as a scratch pad
968 *
969 * Discover EEPROM page size by writing marching data at given offset.
970 * This function is called only when we are writing a new large buffer
971 * at given offset so the data would be overwritten anyway.
972 **/
973static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
974 u16 offset)
975{
976 u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
977 s32 status = 0;
978 u16 i;
979
980 for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
981 data[i] = i;
982
983 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
984 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
985 IXGBE_EEPROM_PAGE_SIZE_MAX, data);
986 hw->eeprom.word_page_size = 0;
987 if (status != 0)
988 goto out;
989
990 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
991 if (status != 0)
992 goto out;
993
994 /*
995 * When writing in burst more than the actual page size
996 * EEPROM address wraps around current page.
997 */
998 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
999
1000 hw_dbg(hw, "Detected EEPROM page size = %d words.",
1001 hw->eeprom.word_page_size);
592out: 1002out:
593 return status; 1003 return status;
594} 1004}
@@ -603,33 +1013,75 @@ out:
603 **/ 1013 **/
604s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) 1014s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
605{ 1015{
606 u32 eerd; 1016 return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
607 s32 status; 1017}
1018
1019/**
1020 * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
1021 * @hw: pointer to hardware structure
1022 * @offset: offset of word in the EEPROM to write
1023 * @words: number of words
1024 * @data: word(s) write to the EEPROM
1025 *
1026 * Write a 16 bit word(s) to the EEPROM using the EEWR register.
1027 **/
1028s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1029 u16 words, u16 *data)
1030{
1031 u32 eewr;
1032 s32 status = 0;
1033 u16 i;
608 1034
609 hw->eeprom.ops.init_params(hw); 1035 hw->eeprom.ops.init_params(hw);
610 1036
1037 if (words == 0) {
1038 status = IXGBE_ERR_INVALID_ARGUMENT;
1039 goto out;
1040 }
1041
611 if (offset >= hw->eeprom.word_size) { 1042 if (offset >= hw->eeprom.word_size) {
612 status = IXGBE_ERR_EEPROM; 1043 status = IXGBE_ERR_EEPROM;
613 goto out; 1044 goto out;
614 } 1045 }
615 1046
616 eerd = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) + 1047 for (i = 0; i < words; i++) {
617 IXGBE_EEPROM_RW_REG_START; 1048 eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1049 (data[i] << IXGBE_EEPROM_RW_REG_DATA) |
1050 IXGBE_EEPROM_RW_REG_START;
618 1051
619 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd); 1052 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
620 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ); 1053 if (status != 0) {
1054 hw_dbg(hw, "Eeprom write EEWR timed out\n");
1055 goto out;
1056 }
621 1057
622 if (status == 0) 1058 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
623 *data = (IXGBE_READ_REG(hw, IXGBE_EERD) >> 1059
624 IXGBE_EEPROM_RW_REG_DATA); 1060 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
625 else 1061 if (status != 0) {
626 hw_dbg(hw, "Eeprom read timed out\n"); 1062 hw_dbg(hw, "Eeprom write EEWR timed out\n");
1063 goto out;
1064 }
1065 }
627 1066
628out: 1067out:
629 return status; 1068 return status;
630} 1069}
631 1070
632/** 1071/**
1072 * ixgbe_write_eewr_generic - Write EEPROM word using EEWR
1073 * @hw: pointer to hardware structure
1074 * @offset: offset of word in the EEPROM to write
1075 * @data: word write to the EEPROM
1076 *
1077 * Write a 16 bit word to the EEPROM using the EEWR register.
1078 **/
1079s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1080{
1081 return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
1082}
1083
1084/**
633 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status 1085 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
634 * @hw: pointer to hardware structure 1086 * @hw: pointer to hardware structure
635 * @ee_reg: EEPROM flag for polling 1087 * @ee_reg: EEPROM flag for polling
@@ -637,7 +1089,7 @@ out:
637 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the 1089 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
638 * read or write is done respectively. 1090 * read or write is done respectively.
639 **/ 1091 **/
640s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) 1092static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
641{ 1093{
642 u32 i; 1094 u32 i;
643 u32 reg; 1095 u32 reg;
@@ -668,10 +1120,10 @@ s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
668static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) 1120static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
669{ 1121{
670 s32 status = 0; 1122 s32 status = 0;
671 u32 eec = 0; 1123 u32 eec;
672 u32 i; 1124 u32 i;
673 1125
674 if (ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0) 1126 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0)
675 status = IXGBE_ERR_SWFW_SYNC; 1127 status = IXGBE_ERR_SWFW_SYNC;
676 1128
677 if (status == 0) { 1129 if (status == 0) {
@@ -694,18 +1146,18 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
694 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1146 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
695 hw_dbg(hw, "Could not acquire EEPROM grant\n"); 1147 hw_dbg(hw, "Could not acquire EEPROM grant\n");
696 1148
697 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 1149 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
698 status = IXGBE_ERR_EEPROM; 1150 status = IXGBE_ERR_EEPROM;
699 } 1151 }
700 }
701 1152
702 /* Setup EEPROM for Read/Write */ 1153 /* Setup EEPROM for Read/Write */
703 if (status == 0) { 1154 if (status == 0) {
704 /* Clear CS and SK */ 1155 /* Clear CS and SK */
705 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); 1156 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
706 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1157 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
707 IXGBE_WRITE_FLUSH(hw); 1158 IXGBE_WRITE_FLUSH(hw);
708 udelay(1); 1159 udelay(1);
1160 }
709 } 1161 }
710 return status; 1162 return status;
711} 1163}
@@ -719,13 +1171,10 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
719static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) 1171static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
720{ 1172{
721 s32 status = IXGBE_ERR_EEPROM; 1173 s32 status = IXGBE_ERR_EEPROM;
722 u32 timeout; 1174 u32 timeout = 2000;
723 u32 i; 1175 u32 i;
724 u32 swsm; 1176 u32 swsm;
725 1177
726 /* Set timeout value based on size of EEPROM */
727 timeout = hw->eeprom.word_size + 1;
728
729 /* Get SMBI software semaphore between device drivers first */ 1178 /* Get SMBI software semaphore between device drivers first */
730 for (i = 0; i < timeout; i++) { 1179 for (i = 0; i < timeout; i++) {
731 /* 1180 /*
@@ -737,7 +1186,29 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
737 status = 0; 1186 status = 0;
738 break; 1187 break;
739 } 1188 }
740 msleep(1); 1189 udelay(50);
1190 }
1191
1192 if (i == timeout) {
1193 hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore "
1194 "not granted.\n");
1195 /*
1196 * this release is particularly important because our attempts
1197 * above to get the semaphore may have succeeded, and if there
1198 * was a timeout, we should unconditionally clear the semaphore
1199 * bits to free the driver to make progress
1200 */
1201 ixgbe_release_eeprom_semaphore(hw);
1202
1203 udelay(50);
1204 /*
1205 * one last try
1206 * If the SMBI bit is 0 when we read it, then the bit will be
1207 * set and we have the semaphore
1208 */
1209 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1210 if (!(swsm & IXGBE_SWSM_SMBI))
1211 status = 0;
741 } 1212 }
742 1213
743 /* Now get the semaphore between SW/FW through the SWESMBI bit */ 1214 /* Now get the semaphore between SW/FW through the SWESMBI bit */
@@ -765,11 +1236,14 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
765 * was not granted because we don't have access to the EEPROM 1236 * was not granted because we don't have access to the EEPROM
766 */ 1237 */
767 if (i >= timeout) { 1238 if (i >= timeout) {
768 hw_dbg(hw, "Driver can't access the Eeprom - Semaphore " 1239 hw_dbg(hw, "SWESMBI Software EEPROM semaphore "
769 "not granted.\n"); 1240 "not granted.\n");
770 ixgbe_release_eeprom_semaphore(hw); 1241 ixgbe_release_eeprom_semaphore(hw);
771 status = IXGBE_ERR_EEPROM; 1242 status = IXGBE_ERR_EEPROM;
772 } 1243 }
1244 } else {
1245 hw_dbg(hw, "Software semaphore SMBI between device drivers "
1246 "not granted.\n");
773 } 1247 }
774 1248
775 return status; 1249 return status;
@@ -1001,14 +1475,21 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
1001 eec &= ~IXGBE_EEC_REQ; 1475 eec &= ~IXGBE_EEC_REQ;
1002 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1476 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1003 1477
1004 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 1478 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1479
1480 /*
1481 * Delay before attempt to obtain semaphore again to allow FW
1482 * access. semaphore_delay is in ms we need us for usleep_range
1483 */
1484 usleep_range(hw->eeprom.semaphore_delay * 1000,
1485 hw->eeprom.semaphore_delay * 2000);
1005} 1486}
1006 1487
1007/** 1488/**
1008 * ixgbe_calc_eeprom_checksum - Calculates and returns the checksum 1489 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
1009 * @hw: pointer to hardware structure 1490 * @hw: pointer to hardware structure
1010 **/ 1491 **/
1011static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw) 1492u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
1012{ 1493{
1013 u16 i; 1494 u16 i;
1014 u16 j; 1495 u16 j;
@@ -1071,7 +1552,7 @@ s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
1071 status = hw->eeprom.ops.read(hw, 0, &checksum); 1552 status = hw->eeprom.ops.read(hw, 0, &checksum);
1072 1553
1073 if (status == 0) { 1554 if (status == 0) {
1074 checksum = ixgbe_calc_eeprom_checksum(hw); 1555 checksum = hw->eeprom.ops.calc_checksum(hw);
1075 1556
1076 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); 1557 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
1077 1558
@@ -1109,9 +1590,9 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
1109 status = hw->eeprom.ops.read(hw, 0, &checksum); 1590 status = hw->eeprom.ops.read(hw, 0, &checksum);
1110 1591
1111 if (status == 0) { 1592 if (status == 0) {
1112 checksum = ixgbe_calc_eeprom_checksum(hw); 1593 checksum = hw->eeprom.ops.calc_checksum(hw);
1113 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, 1594 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
1114 checksum); 1595 checksum);
1115 } else { 1596 } else {
1116 hw_dbg(hw, "EEPROM read failed\n"); 1597 hw_dbg(hw, "EEPROM read failed\n");
1117 } 1598 }
@@ -1159,37 +1640,37 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
1159 u32 rar_low, rar_high; 1640 u32 rar_low, rar_high;
1160 u32 rar_entries = hw->mac.num_rar_entries; 1641 u32 rar_entries = hw->mac.num_rar_entries;
1161 1642
1643 /* Make sure we are using a valid rar index range */
1644 if (index >= rar_entries) {
1645 hw_dbg(hw, "RAR index %d is out of range.\n", index);
1646 return IXGBE_ERR_INVALID_ARGUMENT;
1647 }
1648
1162 /* setup VMDq pool selection before this RAR gets enabled */ 1649 /* setup VMDq pool selection before this RAR gets enabled */
1163 hw->mac.ops.set_vmdq(hw, index, vmdq); 1650 hw->mac.ops.set_vmdq(hw, index, vmdq);
1164 1651
1165 /* Make sure we are using a valid rar index range */ 1652 /*
1166 if (index < rar_entries) { 1653 * HW expects these in little endian so we reverse the byte
1167 /* 1654 * order from network order (big endian) to little endian
1168 * HW expects these in little endian so we reverse the byte 1655 */
1169 * order from network order (big endian) to little endian 1656 rar_low = ((u32)addr[0] |
1170 */ 1657 ((u32)addr[1] << 8) |
1171 rar_low = ((u32)addr[0] | 1658 ((u32)addr[2] << 16) |
1172 ((u32)addr[1] << 8) | 1659 ((u32)addr[3] << 24));
1173 ((u32)addr[2] << 16) | 1660 /*
1174 ((u32)addr[3] << 24)); 1661 * Some parts put the VMDq setting in the extra RAH bits,
1175 /* 1662 * so save everything except the lower 16 bits that hold part
1176 * Some parts put the VMDq setting in the extra RAH bits, 1663 * of the address and the address valid bit.
1177 * so save everything except the lower 16 bits that hold part 1664 */
1178 * of the address and the address valid bit. 1665 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1179 */ 1666 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1180 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 1667 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
1181 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1182 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
1183 1668
1184 if (enable_addr != 0) 1669 if (enable_addr != 0)
1185 rar_high |= IXGBE_RAH_AV; 1670 rar_high |= IXGBE_RAH_AV;
1186 1671
1187 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); 1672 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
1188 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 1673 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1189 } else {
1190 hw_dbg(hw, "RAR index %d is out of range.\n", index);
1191 return IXGBE_ERR_RAR_INDEX;
1192 }
1193 1674
1194 return 0; 1675 return 0;
1195} 1676}
@@ -1207,58 +1688,26 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
1207 u32 rar_entries = hw->mac.num_rar_entries; 1688 u32 rar_entries = hw->mac.num_rar_entries;
1208 1689
1209 /* Make sure we are using a valid rar index range */ 1690 /* Make sure we are using a valid rar index range */
1210 if (index < rar_entries) { 1691 if (index >= rar_entries) {
1211 /*
1212 * Some parts put the VMDq setting in the extra RAH bits,
1213 * so save everything except the lower 16 bits that hold part
1214 * of the address and the address valid bit.
1215 */
1216 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1217 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1218
1219 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
1220 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1221 } else {
1222 hw_dbg(hw, "RAR index %d is out of range.\n", index); 1692 hw_dbg(hw, "RAR index %d is out of range.\n", index);
1223 return IXGBE_ERR_RAR_INDEX; 1693 return IXGBE_ERR_INVALID_ARGUMENT;
1224 } 1694 }
1225 1695
1226 /* clear VMDq pool/queue selection for this RAR */ 1696 /*
1227 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); 1697 * Some parts put the VMDq setting in the extra RAH bits,
1228 1698 * so save everything except the lower 16 bits that hold part
1229 return 0; 1699 * of the address and the address valid bit.
1230} 1700 */
1231
1232/**
1233 * ixgbe_enable_rar - Enable Rx address register
1234 * @hw: pointer to hardware structure
1235 * @index: index into the RAR table
1236 *
1237 * Enables the select receive address register.
1238 **/
1239static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index)
1240{
1241 u32 rar_high;
1242
1243 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 1701 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1244 rar_high |= IXGBE_RAH_AV; 1702 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1703
1704 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
1245 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 1705 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1246}
1247 1706
1248/** 1707 /* clear VMDq pool/queue selection for this RAR */
1249 * ixgbe_disable_rar - Disable Rx address register 1708 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
1250 * @hw: pointer to hardware structure
1251 * @index: index into the RAR table
1252 *
1253 * Disables the select receive address register.
1254 **/
1255static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index)
1256{
1257 u32 rar_high;
1258 1709
1259 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 1710 return 0;
1260 rar_high &= (~IXGBE_RAH_AV);
1261 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1262} 1711}
1263 1712
1264/** 1713/**
@@ -1291,6 +1740,9 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
1291 hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr); 1740 hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr);
1292 1741
1293 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 1742 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1743
1744 /* clear VMDq pool/queue selection for RAR 0 */
1745 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
1294 } 1746 }
1295 hw->addr_ctrl.overflow_promisc = 0; 1747 hw->addr_ctrl.overflow_promisc = 0;
1296 1748
@@ -1304,7 +1756,6 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
1304 } 1756 }
1305 1757
1306 /* Clear the MTA */ 1758 /* Clear the MTA */
1307 hw->addr_ctrl.mc_addr_in_rar_count = 0;
1308 hw->addr_ctrl.mta_in_use = 0; 1759 hw->addr_ctrl.mta_in_use = 0;
1309 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 1760 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
1310 1761
@@ -1319,105 +1770,6 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
1319} 1770}
1320 1771
1321/** 1772/**
1322 * ixgbe_add_uc_addr - Adds a secondary unicast address.
1323 * @hw: pointer to hardware structure
1324 * @addr: new address
1325 *
1326 * Adds it to unused receive address register or goes into promiscuous mode.
1327 **/
1328static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
1329{
1330 u32 rar_entries = hw->mac.num_rar_entries;
1331 u32 rar;
1332
1333 hw_dbg(hw, " UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
1334 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
1335
1336 /*
1337 * Place this address in the RAR if there is room,
1338 * else put the controller into promiscuous mode
1339 */
1340 if (hw->addr_ctrl.rar_used_count < rar_entries) {
1341 rar = hw->addr_ctrl.rar_used_count -
1342 hw->addr_ctrl.mc_addr_in_rar_count;
1343 hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
1344 hw_dbg(hw, "Added a secondary address to RAR[%d]\n", rar);
1345 hw->addr_ctrl.rar_used_count++;
1346 } else {
1347 hw->addr_ctrl.overflow_promisc++;
1348 }
1349
1350 hw_dbg(hw, "ixgbe_add_uc_addr Complete\n");
1351}
1352
1353/**
1354 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
1355 * @hw: pointer to hardware structure
1356 * @netdev: pointer to net device structure
1357 *
1358 * The given list replaces any existing list. Clears the secondary addrs from
1359 * receive address registers. Uses unused receive address registers for the
1360 * first secondary addresses, and falls back to promiscuous mode as needed.
1361 *
1362 * Drivers using secondary unicast addresses must set user_set_promisc when
1363 * manually putting the device into promiscuous mode.
1364 **/
1365s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
1366 struct net_device *netdev)
1367{
1368 u32 i;
1369 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
1370 u32 uc_addr_in_use;
1371 u32 fctrl;
1372 struct netdev_hw_addr *ha;
1373
1374 /*
1375 * Clear accounting of old secondary address list,
1376 * don't count RAR[0]
1377 */
1378 uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
1379 hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
1380 hw->addr_ctrl.overflow_promisc = 0;
1381
1382 /* Zero out the other receive addresses */
1383 hw_dbg(hw, "Clearing RAR[1-%d]\n", uc_addr_in_use + 1);
1384 for (i = 0; i < uc_addr_in_use; i++) {
1385 IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
1386 IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
1387 }
1388
1389 /* Add the new addresses */
1390 netdev_for_each_uc_addr(ha, netdev) {
1391 hw_dbg(hw, " Adding the secondary addresses:\n");
1392 ixgbe_add_uc_addr(hw, ha->addr, 0);
1393 }
1394
1395 if (hw->addr_ctrl.overflow_promisc) {
1396 /* enable promisc if not already in overflow or set by user */
1397 if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
1398 hw_dbg(hw, " Entering address overflow promisc mode\n");
1399 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1400 fctrl |= IXGBE_FCTRL_UPE;
1401 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1402 hw->addr_ctrl.uc_set_promisc = true;
1403 }
1404 } else {
1405 /* only disable if set by overflow, not by user */
1406 if ((old_promisc_setting && hw->addr_ctrl.uc_set_promisc) &&
1407 !(hw->addr_ctrl.user_set_promisc)) {
1408 hw_dbg(hw, " Leaving address overflow promisc mode\n");
1409 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1410 fctrl &= ~IXGBE_FCTRL_UPE;
1411 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1412 hw->addr_ctrl.uc_set_promisc = false;
1413 }
1414 }
1415
1416 hw_dbg(hw, "ixgbe_update_uc_addr_list_generic Complete\n");
1417 return 0;
1418}
1419
1420/**
1421 * ixgbe_mta_vector - Determines bit-vector in multicast table to set 1773 * ixgbe_mta_vector - Determines bit-vector in multicast table to set
1422 * @hw: pointer to hardware structure 1774 * @hw: pointer to hardware structure
1423 * @mc_addr: the multicast address 1775 * @mc_addr: the multicast address
@@ -1468,7 +1820,6 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
1468 u32 vector; 1820 u32 vector;
1469 u32 vector_bit; 1821 u32 vector_bit;
1470 u32 vector_reg; 1822 u32 vector_reg;
1471 u32 mta_reg;
1472 1823
1473 hw->addr_ctrl.mta_in_use++; 1824 hw->addr_ctrl.mta_in_use++;
1474 1825
@@ -1486,9 +1837,7 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
1486 */ 1837 */
1487 vector_reg = (vector >> 5) & 0x7F; 1838 vector_reg = (vector >> 5) & 0x7F;
1488 vector_bit = vector & 0x1F; 1839 vector_bit = vector & 0x1F;
1489 mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); 1840 hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
1490 mta_reg |= (1 << vector_bit);
1491 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
1492} 1841}
1493 1842
1494/** 1843/**
@@ -1514,18 +1863,21 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
1514 hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev); 1863 hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev);
1515 hw->addr_ctrl.mta_in_use = 0; 1864 hw->addr_ctrl.mta_in_use = 0;
1516 1865
1517 /* Clear the MTA */ 1866 /* Clear mta_shadow */
1518 hw_dbg(hw, " Clearing MTA\n"); 1867 hw_dbg(hw, " Clearing MTA\n");
1519 for (i = 0; i < hw->mac.mcft_size; i++) 1868 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
1520 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
1521 1869
1522 /* Add the new addresses */ 1870 /* Update mta shadow */
1523 netdev_for_each_mc_addr(ha, netdev) { 1871 netdev_for_each_mc_addr(ha, netdev) {
1524 hw_dbg(hw, " Adding the multicast addresses:\n"); 1872 hw_dbg(hw, " Adding the multicast addresses:\n");
1525 ixgbe_set_mta(hw, ha->addr); 1873 ixgbe_set_mta(hw, ha->addr);
1526 } 1874 }
1527 1875
1528 /* Enable mta */ 1876 /* Enable mta */
1877 for (i = 0; i < hw->mac.mcft_size; i++)
1878 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
1879 hw->mac.mta_shadow[i]);
1880
1529 if (hw->addr_ctrl.mta_in_use > 0) 1881 if (hw->addr_ctrl.mta_in_use > 0)
1530 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, 1882 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
1531 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); 1883 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
@@ -1542,15 +1894,8 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
1542 **/ 1894 **/
1543s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw) 1895s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
1544{ 1896{
1545 u32 i;
1546 u32 rar_entries = hw->mac.num_rar_entries;
1547 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; 1897 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
1548 1898
1549 if (a->mc_addr_in_rar_count > 0)
1550 for (i = (rar_entries - a->mc_addr_in_rar_count);
1551 i < rar_entries; i++)
1552 ixgbe_enable_rar(hw, i);
1553
1554 if (a->mta_in_use > 0) 1899 if (a->mta_in_use > 0)
1555 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | 1900 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
1556 hw->mac.mc_filter_type); 1901 hw->mac.mc_filter_type);
@@ -1566,15 +1911,8 @@ s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
1566 **/ 1911 **/
1567s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) 1912s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
1568{ 1913{
1569 u32 i;
1570 u32 rar_entries = hw->mac.num_rar_entries;
1571 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; 1914 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
1572 1915
1573 if (a->mc_addr_in_rar_count > 0)
1574 for (i = (rar_entries - a->mc_addr_in_rar_count);
1575 i < rar_entries; i++)
1576 ixgbe_disable_rar(hw, i);
1577
1578 if (a->mta_in_use > 0) 1916 if (a->mta_in_use > 0)
1579 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 1917 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
1580 1918
@@ -1594,6 +1932,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
1594 u32 mflcn_reg, fccfg_reg; 1932 u32 mflcn_reg, fccfg_reg;
1595 u32 reg; 1933 u32 reg;
1596 u32 rx_pba_size; 1934 u32 rx_pba_size;
1935 u32 fcrtl, fcrth;
1597 1936
1598#ifdef CONFIG_DCB 1937#ifdef CONFIG_DCB
1599 if (hw->fc.requested_mode == ixgbe_fc_pfc) 1938 if (hw->fc.requested_mode == ixgbe_fc_pfc)
@@ -1602,7 +1941,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
1602#endif /* CONFIG_DCB */ 1941#endif /* CONFIG_DCB */
1603 /* Negotiate the fc mode to use */ 1942 /* Negotiate the fc mode to use */
1604 ret_val = ixgbe_fc_autoneg(hw); 1943 ret_val = ixgbe_fc_autoneg(hw);
1605 if (ret_val) 1944 if (ret_val == IXGBE_ERR_FLOW_CONTROL)
1606 goto out; 1945 goto out;
1607 1946
1608 /* Disable any previous flow control settings */ 1947 /* Disable any previous flow control settings */
@@ -1620,7 +1959,9 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
1620 * 2: Tx flow control is enabled (we can send pause frames but 1959 * 2: Tx flow control is enabled (we can send pause frames but
1621 * we do not support receiving pause frames). 1960 * we do not support receiving pause frames).
1622 * 3: Both Rx and Tx flow control (symmetric) are enabled. 1961 * 3: Both Rx and Tx flow control (symmetric) are enabled.
1962#ifdef CONFIG_DCB
1623 * 4: Priority Flow Control is enabled. 1963 * 4: Priority Flow Control is enabled.
1964#endif
1624 * other: Invalid. 1965 * other: Invalid.
1625 */ 1966 */
1626 switch (hw->fc.current_mode) { 1967 switch (hw->fc.current_mode) {
@@ -1670,41 +2011,21 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
1670 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); 2011 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
1671 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); 2012 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
1672 2013
1673 reg = IXGBE_READ_REG(hw, IXGBE_MTQC); 2014 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
1674 /* Thresholds are different for link flow control when in DCB mode */ 2015 rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
1675 if (reg & IXGBE_MTQC_RT_ENA) {
1676 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
1677
1678 /* Always disable XON for LFC when in DCB mode */
1679 reg = (rx_pba_size >> 5) & 0xFFE0;
1680 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), reg);
1681 2016
1682 reg = (rx_pba_size >> 2) & 0xFFE0; 2017 fcrth = (rx_pba_size - hw->fc.high_water) << 10;
1683 if (hw->fc.current_mode & ixgbe_fc_tx_pause) 2018 fcrtl = (rx_pba_size - hw->fc.low_water) << 10;
1684 reg |= IXGBE_FCRTH_FCEN;
1685 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), reg);
1686 } else {
1687 /*
1688 * Set up and enable Rx high/low water mark thresholds,
1689 * enable XON.
1690 */
1691 if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
1692 if (hw->fc.send_xon) {
1693 IXGBE_WRITE_REG(hw,
1694 IXGBE_FCRTL_82599(packetbuf_num),
1695 (hw->fc.low_water |
1696 IXGBE_FCRTL_XONE));
1697 } else {
1698 IXGBE_WRITE_REG(hw,
1699 IXGBE_FCRTL_82599(packetbuf_num),
1700 hw->fc.low_water);
1701 }
1702 2019
1703 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), 2020 if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
1704 (hw->fc.high_water | IXGBE_FCRTH_FCEN)); 2021 fcrth |= IXGBE_FCRTH_FCEN;
1705 } 2022 if (hw->fc.send_xon)
2023 fcrtl |= IXGBE_FCRTL_XONE;
1706 } 2024 }
1707 2025
2026 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), fcrth);
2027 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), fcrtl);
2028
1708 /* Configure pause time (2 TCs per register) */ 2029 /* Configure pause time (2 TCs per register) */
1709 reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2)); 2030 reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
1710 if ((packetbuf_num & 1) == 0) 2031 if ((packetbuf_num & 1) == 0)
@@ -1728,12 +2049,13 @@ out:
1728 **/ 2049 **/
1729s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw) 2050s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
1730{ 2051{
1731 s32 ret_val = 0; 2052 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
1732 ixgbe_link_speed speed; 2053 ixgbe_link_speed speed;
1733 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
1734 u32 links2, anlp1_reg, autoc_reg, links;
1735 bool link_up; 2054 bool link_up;
1736 2055
2056 if (hw->fc.disable_fc_autoneg)
2057 goto out;
2058
1737 /* 2059 /*
1738 * AN should have completed when the cable was plugged in. 2060 * AN should have completed when the cable was plugged in.
1739 * Look for reasons to bail out. Bail out if: 2061 * Look for reasons to bail out. Bail out if:
@@ -1744,153 +2066,199 @@ s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
1744 * So use link_up_wait_to_complete=false. 2066 * So use link_up_wait_to_complete=false.
1745 */ 2067 */
1746 hw->mac.ops.check_link(hw, &speed, &link_up, false); 2068 hw->mac.ops.check_link(hw, &speed, &link_up, false);
1747 2069 if (!link_up) {
1748 if (hw->fc.disable_fc_autoneg || (!link_up)) { 2070 ret_val = IXGBE_ERR_FLOW_CONTROL;
1749 hw->fc.fc_was_autonegged = false;
1750 hw->fc.current_mode = hw->fc.requested_mode;
1751 goto out; 2071 goto out;
1752 } 2072 }
1753 2073
1754 /* 2074 switch (hw->phy.media_type) {
1755 * On backplane, bail out if 2075 /* Autoneg flow control on fiber adapters */
1756 * - backplane autoneg was not completed, or if 2076 case ixgbe_media_type_fiber:
1757 * - we are 82599 and link partner is not AN enabled 2077 if (speed == IXGBE_LINK_SPEED_1GB_FULL)
1758 */ 2078 ret_val = ixgbe_fc_autoneg_fiber(hw);
1759 if (hw->phy.media_type == ixgbe_media_type_backplane) { 2079 break;
1760 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
1761 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
1762 hw->fc.fc_was_autonegged = false;
1763 hw->fc.current_mode = hw->fc.requested_mode;
1764 goto out;
1765 }
1766 2080
1767 if (hw->mac.type == ixgbe_mac_82599EB) { 2081 /* Autoneg flow control on backplane adapters */
1768 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); 2082 case ixgbe_media_type_backplane:
1769 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) { 2083 ret_val = ixgbe_fc_autoneg_backplane(hw);
1770 hw->fc.fc_was_autonegged = false; 2084 break;
1771 hw->fc.current_mode = hw->fc.requested_mode; 2085
1772 goto out; 2086 /* Autoneg flow control on copper adapters */
1773 } 2087 case ixgbe_media_type_copper:
1774 } 2088 if (ixgbe_device_supports_autoneg_fc(hw) == 0)
2089 ret_val = ixgbe_fc_autoneg_copper(hw);
2090 break;
2091
2092 default:
2093 break;
1775 } 2094 }
1776 2095
2096out:
2097 if (ret_val == 0) {
2098 hw->fc.fc_was_autonegged = true;
2099 } else {
2100 hw->fc.fc_was_autonegged = false;
2101 hw->fc.current_mode = hw->fc.requested_mode;
2102 }
2103 return ret_val;
2104}
2105
2106/**
2107 * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
2108 * @hw: pointer to hardware structure
2109 *
2110 * Enable flow control according on 1 gig fiber.
2111 **/
2112static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2113{
2114 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
2115 s32 ret_val;
2116
1777 /* 2117 /*
1778 * On multispeed fiber at 1g, bail out if 2118 * On multispeed fiber at 1g, bail out if
1779 * - link is up but AN did not complete, or if 2119 * - link is up but AN did not complete, or if
1780 * - link is up and AN completed but timed out 2120 * - link is up and AN completed but timed out
1781 */ 2121 */
1782 if (hw->phy.multispeed_fiber && (speed == IXGBE_LINK_SPEED_1GB_FULL)) { 2122
1783 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); 2123 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
1784 if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || 2124 if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
1785 ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) { 2125 ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
1786 hw->fc.fc_was_autonegged = false; 2126 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
1787 hw->fc.current_mode = hw->fc.requested_mode; 2127 goto out;
1788 goto out;
1789 }
1790 } 2128 }
1791 2129
2130 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
2131 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
2132
2133 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
2134 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
2135 IXGBE_PCS1GANA_ASM_PAUSE,
2136 IXGBE_PCS1GANA_SYM_PAUSE,
2137 IXGBE_PCS1GANA_ASM_PAUSE);
2138
2139out:
2140 return ret_val;
2141}
2142
2143/**
2144 * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
2145 * @hw: pointer to hardware structure
2146 *
2147 * Enable flow control according to IEEE clause 37.
2148 **/
2149static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
2150{
2151 u32 links2, anlp1_reg, autoc_reg, links;
2152 s32 ret_val;
2153
1792 /* 2154 /*
1793 * Bail out on 2155 * On backplane, bail out if
1794 * - copper or CX4 adapters 2156 * - backplane autoneg was not completed, or if
1795 * - fiber adapters running at 10gig 2157 * - we are 82599 and link partner is not AN enabled
1796 */ 2158 */
1797 if ((hw->phy.media_type == ixgbe_media_type_copper) || 2159 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
1798 (hw->phy.media_type == ixgbe_media_type_cx4) || 2160 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
1799 ((hw->phy.media_type == ixgbe_media_type_fiber) &&
1800 (speed == IXGBE_LINK_SPEED_10GB_FULL))) {
1801 hw->fc.fc_was_autonegged = false; 2161 hw->fc.fc_was_autonegged = false;
1802 hw->fc.current_mode = hw->fc.requested_mode; 2162 hw->fc.current_mode = hw->fc.requested_mode;
2163 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
1803 goto out; 2164 goto out;
1804 } 2165 }
1805 2166
2167 if (hw->mac.type == ixgbe_mac_82599EB) {
2168 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
2169 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
2170 hw->fc.fc_was_autonegged = false;
2171 hw->fc.current_mode = hw->fc.requested_mode;
2172 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2173 goto out;
2174 }
2175 }
1806 /* 2176 /*
1807 * Read the AN advertisement and LP ability registers and resolve 2177 * Read the 10g AN autoc and LP ability registers and resolve
1808 * local flow control settings accordingly 2178 * local flow control settings accordingly
1809 */ 2179 */
1810 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && 2180 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1811 (hw->phy.media_type != ixgbe_media_type_backplane)) { 2181 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
1812 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
1813 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
1814 if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1815 (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE)) {
1816 /*
1817 * Now we need to check if the user selected Rx ONLY
1818 * of pause frames. In this case, we had to advertise
1819 * FULL flow control because we could not advertise RX
1820 * ONLY. Hence, we must now check to see if we need to
1821 * turn OFF the TRANSMISSION of PAUSE frames.
1822 */
1823 if (hw->fc.requested_mode == ixgbe_fc_full) {
1824 hw->fc.current_mode = ixgbe_fc_full;
1825 hw_dbg(hw, "Flow Control = FULL.\n");
1826 } else {
1827 hw->fc.current_mode = ixgbe_fc_rx_pause;
1828 hw_dbg(hw, "Flow Control=RX PAUSE only\n");
1829 }
1830 } else if (!(pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1831 (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
1832 (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1833 (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
1834 hw->fc.current_mode = ixgbe_fc_tx_pause;
1835 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
1836 } else if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1837 (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
1838 !(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1839 (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
1840 hw->fc.current_mode = ixgbe_fc_rx_pause;
1841 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
1842 } else {
1843 hw->fc.current_mode = ixgbe_fc_none;
1844 hw_dbg(hw, "Flow Control = NONE.\n");
1845 }
1846 }
1847 2182
1848 if (hw->phy.media_type == ixgbe_media_type_backplane) { 2183 ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
2184 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
2185 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
2186
2187out:
2188 return ret_val;
2189}
2190
2191/**
2192 * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
2193 * @hw: pointer to hardware structure
2194 *
2195 * Enable flow control according to IEEE clause 37.
2196 **/
2197static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
2198{
2199 u16 technology_ability_reg = 0;
2200 u16 lp_technology_ability_reg = 0;
2201
2202 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
2203 MDIO_MMD_AN,
2204 &technology_ability_reg);
2205 hw->phy.ops.read_reg(hw, MDIO_AN_LPA,
2206 MDIO_MMD_AN,
2207 &lp_technology_ability_reg);
2208
2209 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
2210 (u32)lp_technology_ability_reg,
2211 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
2212 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
2213}
2214
2215/**
2216 * ixgbe_negotiate_fc - Negotiate flow control
2217 * @hw: pointer to hardware structure
2218 * @adv_reg: flow control advertised settings
2219 * @lp_reg: link partner's flow control settings
2220 * @adv_sym: symmetric pause bit in advertisement
2221 * @adv_asm: asymmetric pause bit in advertisement
2222 * @lp_sym: symmetric pause bit in link partner advertisement
2223 * @lp_asm: asymmetric pause bit in link partner advertisement
2224 *
2225 * Find the intersection between advertised settings and link partner's
2226 * advertised settings
2227 **/
2228static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2229 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2230{
2231 if ((!(adv_reg)) || (!(lp_reg)))
2232 return IXGBE_ERR_FC_NOT_NEGOTIATED;
2233
2234 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
1849 /* 2235 /*
1850 * Read the 10g AN autoc and LP ability registers and resolve 2236 * Now we need to check if the user selected Rx ONLY
1851 * local flow control settings accordingly 2237 * of pause frames. In this case, we had to advertise
2238 * FULL flow control because we could not advertise RX
2239 * ONLY. Hence, we must now check to see if we need to
2240 * turn OFF the TRANSMISSION of PAUSE frames.
1852 */ 2241 */
1853 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2242 if (hw->fc.requested_mode == ixgbe_fc_full) {
1854 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); 2243 hw->fc.current_mode = ixgbe_fc_full;
1855 2244 hw_dbg(hw, "Flow Control = FULL.\n");
1856 if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
1857 (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE)) {
1858 /*
1859 * Now we need to check if the user selected Rx ONLY
1860 * of pause frames. In this case, we had to advertise
1861 * FULL flow control because we could not advertise RX
1862 * ONLY. Hence, we must now check to see if we need to
1863 * turn OFF the TRANSMISSION of PAUSE frames.
1864 */
1865 if (hw->fc.requested_mode == ixgbe_fc_full) {
1866 hw->fc.current_mode = ixgbe_fc_full;
1867 hw_dbg(hw, "Flow Control = FULL.\n");
1868 } else {
1869 hw->fc.current_mode = ixgbe_fc_rx_pause;
1870 hw_dbg(hw, "Flow Control=RX PAUSE only\n");
1871 }
1872 } else if (!(autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
1873 (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
1874 (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
1875 (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
1876 hw->fc.current_mode = ixgbe_fc_tx_pause;
1877 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
1878 } else if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
1879 (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
1880 !(anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
1881 (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
1882 hw->fc.current_mode = ixgbe_fc_rx_pause;
1883 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
1884 } else { 2245 } else {
1885 hw->fc.current_mode = ixgbe_fc_none; 2246 hw->fc.current_mode = ixgbe_fc_rx_pause;
1886 hw_dbg(hw, "Flow Control = NONE.\n"); 2247 hw_dbg(hw, "Flow Control=RX PAUSE frames only\n");
1887 } 2248 }
2249 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2250 (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2251 hw->fc.current_mode = ixgbe_fc_tx_pause;
2252 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
2253 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2254 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2255 hw->fc.current_mode = ixgbe_fc_rx_pause;
2256 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
2257 } else {
2258 hw->fc.current_mode = ixgbe_fc_none;
2259 hw_dbg(hw, "Flow Control = NONE.\n");
1888 } 2260 }
1889 /* Record that current_mode is the result of a successful autoneg */ 2261 return 0;
1890 hw->fc.fc_was_autonegged = true;
1891
1892out:
1893 return ret_val;
1894} 2262}
1895 2263
1896/** 2264/**
@@ -1902,7 +2270,8 @@ out:
1902static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num) 2270static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
1903{ 2271{
1904 s32 ret_val = 0; 2272 s32 ret_val = 0;
1905 u32 reg; 2273 u32 reg = 0, reg_bp = 0;
2274 u16 reg_cu = 0;
1906 2275
1907#ifdef CONFIG_DCB 2276#ifdef CONFIG_DCB
1908 if (hw->fc.requested_mode == ixgbe_fc_pfc) { 2277 if (hw->fc.requested_mode == ixgbe_fc_pfc) {
@@ -1910,7 +2279,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
1910 goto out; 2279 goto out;
1911 } 2280 }
1912 2281
1913#endif 2282#endif /* CONFIG_DCB */
1914 /* Validate the packetbuf configuration */ 2283 /* Validate the packetbuf configuration */
1915 if (packetbuf_num < 0 || packetbuf_num > 7) { 2284 if (packetbuf_num < 0 || packetbuf_num > 7) {
1916 hw_dbg(hw, "Invalid packet buffer number [%d], expected range " 2285 hw_dbg(hw, "Invalid packet buffer number [%d], expected range "
@@ -1948,11 +2317,26 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
1948 hw->fc.requested_mode = ixgbe_fc_full; 2317 hw->fc.requested_mode = ixgbe_fc_full;
1949 2318
1950 /* 2319 /*
1951 * Set up the 1G flow control advertisement registers so the HW will be 2320 * Set up the 1G and 10G flow control advertisement registers so the
1952 * able to do fc autoneg once the cable is plugged in. If we end up 2321 * HW will be able to do fc autoneg once the cable is plugged in. If
1953 * using 10g instead, this is harmless. 2322 * we link at 10G, the 1G advertisement is harmless and vice versa.
1954 */ 2323 */
1955 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 2324
2325 switch (hw->phy.media_type) {
2326 case ixgbe_media_type_fiber:
2327 case ixgbe_media_type_backplane:
2328 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
2329 reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2330 break;
2331
2332 case ixgbe_media_type_copper:
2333 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
2334 MDIO_MMD_AN, &reg_cu);
2335 break;
2336
2337 default:
2338 ;
2339 }
1956 2340
1957 /* 2341 /*
1958 * The possible values of fc.requested_mode are: 2342 * The possible values of fc.requested_mode are:
@@ -1971,6 +2355,11 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
1971 case ixgbe_fc_none: 2355 case ixgbe_fc_none:
1972 /* Flow control completely disabled by software override. */ 2356 /* Flow control completely disabled by software override. */
1973 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); 2357 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
2358 if (hw->phy.media_type == ixgbe_media_type_backplane)
2359 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
2360 IXGBE_AUTOC_ASM_PAUSE);
2361 else if (hw->phy.media_type == ixgbe_media_type_copper)
2362 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
1974 break; 2363 break;
1975 case ixgbe_fc_rx_pause: 2364 case ixgbe_fc_rx_pause:
1976 /* 2365 /*
@@ -1982,6 +2371,11 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
1982 * disable the adapter's ability to send PAUSE frames. 2371 * disable the adapter's ability to send PAUSE frames.
1983 */ 2372 */
1984 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); 2373 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
2374 if (hw->phy.media_type == ixgbe_media_type_backplane)
2375 reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
2376 IXGBE_AUTOC_ASM_PAUSE);
2377 else if (hw->phy.media_type == ixgbe_media_type_copper)
2378 reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
1985 break; 2379 break;
1986 case ixgbe_fc_tx_pause: 2380 case ixgbe_fc_tx_pause:
1987 /* 2381 /*
@@ -1990,10 +2384,22 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
1990 */ 2384 */
1991 reg |= (IXGBE_PCS1GANA_ASM_PAUSE); 2385 reg |= (IXGBE_PCS1GANA_ASM_PAUSE);
1992 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE); 2386 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE);
2387 if (hw->phy.media_type == ixgbe_media_type_backplane) {
2388 reg_bp |= (IXGBE_AUTOC_ASM_PAUSE);
2389 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE);
2390 } else if (hw->phy.media_type == ixgbe_media_type_copper) {
2391 reg_cu |= (IXGBE_TAF_ASM_PAUSE);
2392 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE);
2393 }
1993 break; 2394 break;
1994 case ixgbe_fc_full: 2395 case ixgbe_fc_full:
1995 /* Flow control (both Rx and Tx) is enabled by SW override. */ 2396 /* Flow control (both Rx and Tx) is enabled by SW override. */
1996 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); 2397 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
2398 if (hw->phy.media_type == ixgbe_media_type_backplane)
2399 reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
2400 IXGBE_AUTOC_ASM_PAUSE);
2401 else if (hw->phy.media_type == ixgbe_media_type_copper)
2402 reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
1997 break; 2403 break;
1998#ifdef CONFIG_DCB 2404#ifdef CONFIG_DCB
1999 case ixgbe_fc_pfc: 2405 case ixgbe_fc_pfc:
@@ -2007,80 +2413,37 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
2007 break; 2413 break;
2008 } 2414 }
2009 2415
2010 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); 2416 if (hw->mac.type != ixgbe_mac_X540) {
2011 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); 2417 /*
2012 2418 * Enable auto-negotiation between the MAC & PHY;
2013 /* Disable AN timeout */ 2419 * the MAC will advertise clause 37 flow control.
2014 if (hw->fc.strict_ieee) 2420 */
2015 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; 2421 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
2422 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
2016 2423
2017 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); 2424 /* Disable AN timeout */
2018 hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg); 2425 if (hw->fc.strict_ieee)
2426 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
2019 2427
2020 /* 2428 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
2021 * Set up the 10G flow control advertisement registers so the HW 2429 hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
2022 * can do fc autoneg once the cable is plugged in. If we end up 2430 }
2023 * using 1g instead, this is harmless.
2024 */
2025 reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2026 2431
2027 /* 2432 /*
2028 * The possible values of fc.requested_mode are: 2433 * AUTOC restart handles negotiation of 1G and 10G on backplane
2029 * 0: Flow control is completely disabled 2434 * and copper. There is no need to set the PCS1GCTL register.
2030 * 1: Rx flow control is enabled (we can receive pause frames, 2435 *
2031 * but not send pause frames).
2032 * 2: Tx flow control is enabled (we can send pause frames but
2033 * we do not support receiving pause frames).
2034 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2035 * other: Invalid.
2036 */ 2436 */
2037 switch (hw->fc.requested_mode) { 2437 if (hw->phy.media_type == ixgbe_media_type_backplane) {
2038 case ixgbe_fc_none: 2438 reg_bp |= IXGBE_AUTOC_AN_RESTART;
2039 /* Flow control completely disabled by software override. */ 2439 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
2040 reg &= ~(IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE); 2440 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
2041 break; 2441 (ixgbe_device_supports_autoneg_fc(hw) == 0)) {
2042 case ixgbe_fc_rx_pause: 2442 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
2043 /* 2443 MDIO_MMD_AN, reg_cu);
2044 * Rx Flow control is enabled and Tx Flow control is
2045 * disabled by software override. Since there really
2046 * isn't a way to advertise that we are capable of RX
2047 * Pause ONLY, we will advertise that we support both
2048 * symmetric and asymmetric Rx PAUSE. Later, we will
2049 * disable the adapter's ability to send PAUSE frames.
2050 */
2051 reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
2052 break;
2053 case ixgbe_fc_tx_pause:
2054 /*
2055 * Tx Flow control is enabled, and Rx Flow control is
2056 * disabled by software override.
2057 */
2058 reg |= (IXGBE_AUTOC_ASM_PAUSE);
2059 reg &= ~(IXGBE_AUTOC_SYM_PAUSE);
2060 break;
2061 case ixgbe_fc_full:
2062 /* Flow control (both Rx and Tx) is enabled by SW override. */
2063 reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
2064 break;
2065#ifdef CONFIG_DCB
2066 case ixgbe_fc_pfc:
2067 goto out;
2068 break;
2069#endif /* CONFIG_DCB */
2070 default:
2071 hw_dbg(hw, "Flow control param set incorrectly\n");
2072 ret_val = IXGBE_ERR_CONFIG;
2073 goto out;
2074 break;
2075 } 2444 }
2076 /*
2077 * AUTOC restart handles negotiation of 1G and 10G. There is
2078 * no need to set the PCS1GCTL register.
2079 */
2080 reg |= IXGBE_AUTOC_AN_RESTART;
2081 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg);
2082 hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
2083 2445
2446 hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
2084out: 2447out:
2085 return ret_val; 2448 return ret_val;
2086} 2449}
@@ -2096,10 +2459,16 @@ out:
2096 **/ 2459 **/
2097s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) 2460s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2098{ 2461{
2462 struct ixgbe_adapter *adapter = hw->back;
2099 u32 i; 2463 u32 i;
2100 u32 reg_val; 2464 u32 reg_val;
2101 u32 number_of_queues; 2465 u32 number_of_queues;
2102 s32 status = IXGBE_ERR_MASTER_REQUESTS_PENDING; 2466 s32 status = 0;
2467 u16 dev_status = 0;
2468
2469 /* Just jump out if bus mastering is already disabled */
2470 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2471 goto out;
2103 2472
2104 /* Disable the receive unit by stopping each queue */ 2473 /* Disable the receive unit by stopping each queue */
2105 number_of_queues = hw->mac.max_rx_queues; 2474 number_of_queues = hw->mac.max_rx_queues;
@@ -2116,13 +2485,43 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2116 IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val); 2485 IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val);
2117 2486
2118 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { 2487 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2119 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) { 2488 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2120 status = 0; 2489 goto check_device_status;
2490 udelay(100);
2491 }
2492
2493 hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n");
2494 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
2495
2496 /*
2497 * Before proceeding, make sure that the PCIe block does not have
2498 * transactions pending.
2499 */
2500check_device_status:
2501 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2502 pci_read_config_word(adapter->pdev, IXGBE_PCI_DEVICE_STATUS,
2503 &dev_status);
2504 if (!(dev_status & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
2121 break; 2505 break;
2122 }
2123 udelay(100); 2506 udelay(100);
2124 } 2507 }
2125 2508
2509 if (i == IXGBE_PCI_MASTER_DISABLE_TIMEOUT)
2510 hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n");
2511 else
2512 goto out;
2513
2514 /*
2515 * Two consecutive resets are required via CTRL.RST per datasheet
2516 * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
2517 * of this need. The first reset prevents new master requests from
2518 * being issued by our device. We then must wait 1usec for any
2519 * remaining completions from the PCIe bus to trickle in, and then reset
2520 * again to clear out any effects they may have had on our device.
2521 */
2522 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2523
2524out:
2126 return status; 2525 return status;
2127} 2526}
2128 2527
@@ -2132,7 +2531,7 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2132 * @hw: pointer to hardware structure 2531 * @hw: pointer to hardware structure
2133 * @mask: Mask to specify which semaphore to acquire 2532 * @mask: Mask to specify which semaphore to acquire
2134 * 2533 *
2135 * Acquires the SWFW semaphore thought the GSSR register for the specified 2534 * Acquires the SWFW semaphore through the GSSR register for the specified
2136 * function (CSR, PHY0, PHY1, EEPROM, Flash) 2535 * function (CSR, PHY0, PHY1, EEPROM, Flash)
2137 **/ 2536 **/
2138s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) 2537s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
@@ -2143,6 +2542,10 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2143 s32 timeout = 200; 2542 s32 timeout = 200;
2144 2543
2145 while (timeout) { 2544 while (timeout) {
2545 /*
2546 * SW EEPROM semaphore bit is used for access to all
2547 * SW_FW_SYNC/GSSR bits (not just EEPROM)
2548 */
2146 if (ixgbe_get_eeprom_semaphore(hw)) 2549 if (ixgbe_get_eeprom_semaphore(hw))
2147 return IXGBE_ERR_SWFW_SYNC; 2550 return IXGBE_ERR_SWFW_SYNC;
2148 2551
@@ -2155,12 +2558,12 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2155 * thread currently using resource (swmask) 2558 * thread currently using resource (swmask)
2156 */ 2559 */
2157 ixgbe_release_eeprom_semaphore(hw); 2560 ixgbe_release_eeprom_semaphore(hw);
2158 msleep(5); 2561 usleep_range(5000, 10000);
2159 timeout--; 2562 timeout--;
2160 } 2563 }
2161 2564
2162 if (!timeout) { 2565 if (!timeout) {
2163 hw_dbg(hw, "Driver can't access resource, GSSR timeout.\n"); 2566 hw_dbg(hw, "Driver can't access resource, SW_FW_SYNC timeout.\n");
2164 return IXGBE_ERR_SWFW_SYNC; 2567 return IXGBE_ERR_SWFW_SYNC;
2165 } 2568 }
2166 2569
@@ -2176,7 +2579,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2176 * @hw: pointer to hardware structure 2579 * @hw: pointer to hardware structure
2177 * @mask: Mask to specify which semaphore to release 2580 * @mask: Mask to specify which semaphore to release
2178 * 2581 *
2179 * Releases the SWFW semaphore thought the GSSR register for the specified 2582 * Releases the SWFW semaphore through the GSSR register for the specified
2180 * function (CSR, PHY0, PHY1, EEPROM, Flash) 2583 * function (CSR, PHY0, PHY1, EEPROM, Flash)
2181 **/ 2584 **/
2182void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask) 2585void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
@@ -2229,7 +2632,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
2229 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 2632 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2230 autoc_reg |= IXGBE_AUTOC_FLU; 2633 autoc_reg |= IXGBE_AUTOC_FLU;
2231 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 2634 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2232 msleep(10); 2635 usleep_range(10000, 20000);
2233 } 2636 }
2234 2637
2235 led_reg &= ~IXGBE_LED_MODE_MASK(index); 2638 led_reg &= ~IXGBE_LED_MODE_MASK(index);
@@ -2364,37 +2767,38 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
2364 u32 mpsar_lo, mpsar_hi; 2767 u32 mpsar_lo, mpsar_hi;
2365 u32 rar_entries = hw->mac.num_rar_entries; 2768 u32 rar_entries = hw->mac.num_rar_entries;
2366 2769
2367 if (rar < rar_entries) { 2770 /* Make sure we are using a valid rar index range */
2368 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); 2771 if (rar >= rar_entries) {
2369 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); 2772 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
2773 return IXGBE_ERR_INVALID_ARGUMENT;
2774 }
2370 2775
2371 if (!mpsar_lo && !mpsar_hi) 2776 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2372 goto done; 2777 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2373 2778
2374 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { 2779 if (!mpsar_lo && !mpsar_hi)
2375 if (mpsar_lo) { 2780 goto done;
2376 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
2377 mpsar_lo = 0;
2378 }
2379 if (mpsar_hi) {
2380 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
2381 mpsar_hi = 0;
2382 }
2383 } else if (vmdq < 32) {
2384 mpsar_lo &= ~(1 << vmdq);
2385 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
2386 } else {
2387 mpsar_hi &= ~(1 << (vmdq - 32));
2388 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
2389 }
2390 2781
2391 /* was that the last pool using this rar? */ 2782 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
2392 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0) 2783 if (mpsar_lo) {
2393 hw->mac.ops.clear_rar(hw, rar); 2784 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
2785 mpsar_lo = 0;
2786 }
2787 if (mpsar_hi) {
2788 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
2789 mpsar_hi = 0;
2790 }
2791 } else if (vmdq < 32) {
2792 mpsar_lo &= ~(1 << vmdq);
2793 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
2394 } else { 2794 } else {
2395 hw_dbg(hw, "RAR index %d is out of range.\n", rar); 2795 mpsar_hi &= ~(1 << (vmdq - 32));
2796 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
2396 } 2797 }
2397 2798
2799 /* was that the last pool using this rar? */
2800 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
2801 hw->mac.ops.clear_rar(hw, rar);
2398done: 2802done:
2399 return 0; 2803 return 0;
2400} 2804}
@@ -2410,18 +2814,20 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
2410 u32 mpsar; 2814 u32 mpsar;
2411 u32 rar_entries = hw->mac.num_rar_entries; 2815 u32 rar_entries = hw->mac.num_rar_entries;
2412 2816
2413 if (rar < rar_entries) { 2817 /* Make sure we are using a valid rar index range */
2414 if (vmdq < 32) { 2818 if (rar >= rar_entries) {
2415 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2416 mpsar |= 1 << vmdq;
2417 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
2418 } else {
2419 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2420 mpsar |= 1 << (vmdq - 32);
2421 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
2422 }
2423 } else {
2424 hw_dbg(hw, "RAR index %d is out of range.\n", rar); 2819 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
2820 return IXGBE_ERR_INVALID_ARGUMENT;
2821 }
2822
2823 if (vmdq < 32) {
2824 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2825 mpsar |= 1 << vmdq;
2826 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
2827 } else {
2828 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2829 mpsar |= 1 << (vmdq - 32);
2830 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
2425 } 2831 }
2426 return 0; 2832 return 0;
2427} 2833}
@@ -2434,7 +2840,6 @@ s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
2434{ 2840{
2435 int i; 2841 int i;
2436 2842
2437
2438 for (i = 0; i < 128; i++) 2843 for (i = 0; i < 128; i++)
2439 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); 2844 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
2440 2845
@@ -2449,7 +2854,7 @@ s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
2449 * return the VLVF index where this VLAN id should be placed 2854 * return the VLVF index where this VLAN id should be placed
2450 * 2855 *
2451 **/ 2856 **/
2452s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan) 2857static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
2453{ 2858{
2454 u32 bits = 0; 2859 u32 bits = 0;
2455 u32 first_empty_slot = 0; 2860 u32 first_empty_slot = 0;
@@ -2663,12 +3068,21 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
2663 * Reads the links register to determine if link is up and the current speed 3068 * Reads the links register to determine if link is up and the current speed
2664 **/ 3069 **/
2665s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, 3070s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
2666 bool *link_up, bool link_up_wait_to_complete) 3071 bool *link_up, bool link_up_wait_to_complete)
2667{ 3072{
2668 u32 links_reg; 3073 u32 links_reg, links_orig;
2669 u32 i; 3074 u32 i;
2670 3075
3076 /* clear the old state */
3077 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
3078
2671 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 3079 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3080
3081 if (links_orig != links_reg) {
3082 hw_dbg(hw, "LINKS changed from %08X to %08X\n",
3083 links_orig, links_reg);
3084 }
3085
2672 if (link_up_wait_to_complete) { 3086 if (link_up_wait_to_complete) {
2673 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { 3087 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
2674 if (links_reg & IXGBE_LINKS_UP) { 3088 if (links_reg & IXGBE_LINKS_UP) {
@@ -2691,10 +3105,13 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
2691 IXGBE_LINKS_SPEED_10G_82599) 3105 IXGBE_LINKS_SPEED_10G_82599)
2692 *speed = IXGBE_LINK_SPEED_10GB_FULL; 3106 *speed = IXGBE_LINK_SPEED_10GB_FULL;
2693 else if ((links_reg & IXGBE_LINKS_SPEED_82599) == 3107 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
2694 IXGBE_LINKS_SPEED_1G_82599) 3108 IXGBE_LINKS_SPEED_1G_82599)
2695 *speed = IXGBE_LINK_SPEED_1GB_FULL; 3109 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2696 else 3110 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3111 IXGBE_LINKS_SPEED_100_82599)
2697 *speed = IXGBE_LINK_SPEED_100_FULL; 3112 *speed = IXGBE_LINK_SPEED_100_FULL;
3113 else
3114 *speed = IXGBE_LINK_SPEED_UNKNOWN;
2698 3115
2699 /* if link is down, zero out the current_mode */ 3116 /* if link is down, zero out the current_mode */
2700 if (*link_up == false) { 3117 if (*link_up == false) {
@@ -2706,7 +3123,7 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
2706} 3123}
2707 3124
2708/** 3125/**
2709 * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from 3126 * ixgbe_get_wwn_prefix_generic Get alternative WWNN/WWPN prefix from
2710 * the EEPROM 3127 * the EEPROM
2711 * @hw: pointer to hardware structure 3128 * @hw: pointer to hardware structure
2712 * @wwnn_prefix: the alternative WWNN prefix 3129 * @wwnn_prefix: the alternative WWNN prefix
@@ -2716,7 +3133,7 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
2716 * block to check the support for the alternative WWNN/WWPN prefix support. 3133 * block to check the support for the alternative WWNN/WWPN prefix support.
2717 **/ 3134 **/
2718s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, 3135s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
2719 u16 *wwpn_prefix) 3136 u16 *wwpn_prefix)
2720{ 3137{
2721 u16 offset, caps; 3138 u16 offset, caps;
2722 u16 alt_san_mac_blk_offset; 3139 u16 alt_san_mac_blk_offset;
@@ -2749,3 +3166,104 @@ s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
2749wwn_prefix_out: 3166wwn_prefix_out:
2750 return 0; 3167 return 0;
2751} 3168}
3169
3170/**
3171 * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
3172 * control
3173 * @hw: pointer to hardware structure
3174 *
3175 * There are several phys that do not support autoneg flow control. This
3176 * function check the device id to see if the associated phy supports
3177 * autoneg flow control.
3178 **/
3179static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
3180{
3181
3182 switch (hw->device_id) {
3183 case IXGBE_DEV_ID_X540T:
3184 return 0;
3185 case IXGBE_DEV_ID_82599_T3_LOM:
3186 return 0;
3187 default:
3188 return IXGBE_ERR_FC_NOT_SUPPORTED;
3189 }
3190}
3191
3192/**
3193 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
3194 * @hw: pointer to hardware structure
3195 * @enable: enable or disable switch for anti-spoofing
3196 * @pf: Physical Function pool - do not enable anti-spoofing for the PF
3197 *
3198 **/
3199void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
3200{
3201 int j;
3202 int pf_target_reg = pf >> 3;
3203 int pf_target_shift = pf % 8;
3204 u32 pfvfspoof = 0;
3205
3206 if (hw->mac.type == ixgbe_mac_82598EB)
3207 return;
3208
3209 if (enable)
3210 pfvfspoof = IXGBE_SPOOF_MACAS_MASK;
3211
3212 /*
3213 * PFVFSPOOF register array is size 8 with 8 bits assigned to
3214 * MAC anti-spoof enables in each register array element.
3215 */
3216 for (j = 0; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
3217 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
3218
3219 /* If not enabling anti-spoofing then done */
3220 if (!enable)
3221 return;
3222
3223 /*
3224 * The PF should be allowed to spoof so that it can support
3225 * emulation mode NICs. Reset the bit assigned to the PF
3226 */
3227 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg));
3228 pfvfspoof ^= (1 << pf_target_shift);
3229 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg), pfvfspoof);
3230}
3231
3232/**
3233 * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
3234 * @hw: pointer to hardware structure
3235 * @enable: enable or disable switch for VLAN anti-spoofing
3236 * @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
3237 *
3238 **/
3239void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
3240{
3241 int vf_target_reg = vf >> 3;
3242 int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
3243 u32 pfvfspoof;
3244
3245 if (hw->mac.type == ixgbe_mac_82598EB)
3246 return;
3247
3248 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
3249 if (enable)
3250 pfvfspoof |= (1 << vf_target_shift);
3251 else
3252 pfvfspoof &= ~(1 << vf_target_shift);
3253 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
3254}
3255
3256/**
3257 * ixgbe_get_device_caps_generic - Get additional device capabilities
3258 * @hw: pointer to hardware structure
3259 * @device_caps: the EEPROM word with the extra device capabilities
3260 *
3261 * This function will read the EEPROM location for the device capabilities,
3262 * and return the word through device_caps.
3263 **/
3264s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
3265{
3266 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
3267
3268 return 0;
3269}
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 5cf15aa11cac..46be83cfb500 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -29,13 +29,16 @@
29#define _IXGBE_COMMON_H_ 29#define _IXGBE_COMMON_H_
30 30
31#include "ixgbe_type.h" 31#include "ixgbe_type.h"
32#include "ixgbe.h"
32 33
33u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw); 34u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
34s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw); 35s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
35s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw); 36s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
36s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw); 37s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
38s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw);
37s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw); 39s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
38s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num); 40s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
41 u32 pba_num_size);
39s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr); 42s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
40s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw); 43s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
41void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw); 44void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw);
@@ -46,13 +49,22 @@ s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
46 49
47s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw); 50s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
48s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data); 51s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
52s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
53 u16 words, u16 *data);
49s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data); 54s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
55s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
56 u16 words, u16 *data);
57s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
58s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
59 u16 words, u16 *data);
50s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 60s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
51 u16 *data); 61 u16 *data);
62s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
63 u16 words, u16 *data);
64u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
52s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, 65s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
53 u16 *checksum_val); 66 u16 *checksum_val);
54s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); 67s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
55s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
56 68
57s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, 69s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
58 u32 enable_addr); 70 u32 enable_addr);
@@ -60,8 +72,6 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
60s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw); 72s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
61s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, 73s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
62 struct net_device *netdev); 74 struct net_device *netdev);
63s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
64 struct net_device *netdev);
65s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw); 75s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
66s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw); 76s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
67s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); 77s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
@@ -82,9 +92,13 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
82s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, 92s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
83 ixgbe_link_speed *speed, 93 ixgbe_link_speed *speed,
84 bool *link_up, bool link_up_wait_to_complete); 94 bool *link_up, bool link_up_wait_to_complete);
85 95s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
96 u16 *wwpn_prefix);
86s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index); 97s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
87s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); 98s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
99void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf);
100void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
101s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
88 102
89#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg))) 103#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
90 104
@@ -105,9 +119,8 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
105 119
106#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS) 120#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
107 121
108extern struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw);
109#define hw_dbg(hw, format, arg...) \ 122#define hw_dbg(hw, format, arg...) \
110 netdev_dbg(ixgbe_get_hw_dev(hw), format, ##arg) 123 netdev_dbg(((struct ixgbe_adapter *)(hw->back))->netdev, format, ##arg)
111#define e_dev_info(format, arg...) \ 124#define e_dev_info(format, arg...) \
112 dev_info(&adapter->pdev->dev, format, ## arg) 125 dev_info(&adapter->pdev->dev, format, ## arg)
113#define e_dev_warn(format, arg...) \ 126#define e_dev_warn(format, arg...) \
diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c
index 9aea4f04bbd2..686a17aadef3 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ixgbe/ixgbe_dcb.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -34,95 +34,39 @@
34#include "ixgbe_dcb_82599.h" 34#include "ixgbe_dcb_82599.h"
35 35
36/** 36/**
37 * ixgbe_dcb_config - Struct containing DCB settings. 37 * ixgbe_ieee_credits - This calculates the ieee traffic class
38 * @dcb_config: Pointer to DCB config structure 38 * credits from the configured bandwidth percentages. Credits
39 * 39 * are the smallest unit programmable into the underlying
40 * This function checks DCB rules for DCB settings. 40 * hardware. The IEEE 802.1Qaz specification do not use bandwidth
41 * The following rules are checked: 41 * groups so this is much simplified from the CEE case.
42 * 1. The sum of bandwidth percentages of all Bandwidth Groups must total 100%.
43 * 2. The sum of bandwidth percentages of all Traffic Classes within a Bandwidth
44 * Group must total 100.
45 * 3. A Traffic Class should not be set to both Link Strict Priority
46 * and Group Strict Priority.
47 * 4. Link strict Bandwidth Groups can only have link strict traffic classes
48 * with zero bandwidth.
49 */ 42 */
50s32 ixgbe_dcb_check_config(struct ixgbe_dcb_config *dcb_config) 43s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, __u16 *max, int max_frame)
51{ 44{
52 struct tc_bw_alloc *p; 45 int min_percent = 100;
53 s32 ret_val = 0; 46 int min_credit, multiplier;
54 u8 i, j, bw = 0, bw_id; 47 int i;
55 u8 bw_sum[2][MAX_BW_GROUP];
56 bool link_strict[2][MAX_BW_GROUP];
57
58 memset(bw_sum, 0, sizeof(bw_sum));
59 memset(link_strict, 0, sizeof(link_strict));
60
61 /* First Tx, then Rx */
62 for (i = 0; i < 2; i++) {
63 /* Check each traffic class for rule violation */
64 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
65 p = &dcb_config->tc_config[j].path[i];
66
67 bw = p->bwg_percent;
68 bw_id = p->bwg_id;
69
70 if (bw_id >= MAX_BW_GROUP) {
71 ret_val = DCB_ERR_CONFIG;
72 goto err_config;
73 }
74 if (p->prio_type == prio_link) {
75 link_strict[i][bw_id] = true;
76 /* Link strict should have zero bandwidth */
77 if (bw) {
78 ret_val = DCB_ERR_LS_BW_NONZERO;
79 goto err_config;
80 }
81 } else if (!bw) {
82 /*
83 * Traffic classes without link strict
84 * should have non-zero bandwidth.
85 */
86 ret_val = DCB_ERR_TC_BW_ZERO;
87 goto err_config;
88 }
89 bw_sum[i][bw_id] += bw;
90 }
91 48
92 bw = 0; 49 min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) /
50 DCB_CREDIT_QUANTUM;
93 51
94 /* Check each bandwidth group for rule violation */ 52 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
95 for (j = 0; j < MAX_BW_GROUP; j++) { 53 if (bw[i] < min_percent && bw[i])
96 bw += dcb_config->bw_percentage[i][j]; 54 min_percent = bw[i];
97 /*
98 * Sum of bandwidth percentages of all traffic classes
99 * within a Bandwidth Group must total 100 except for
100 * link strict group (zero bandwidth).
101 */
102 if (link_strict[i][j]) {
103 if (bw_sum[i][j]) {
104 /*
105 * Link strict group should have zero
106 * bandwidth.
107 */
108 ret_val = DCB_ERR_LS_BWG_NONZERO;
109 goto err_config;
110 }
111 } else if (bw_sum[i][j] != BW_PERCENT &&
112 bw_sum[i][j] != 0) {
113 ret_val = DCB_ERR_TC_BW;
114 goto err_config;
115 }
116 }
117
118 if (bw != BW_PERCENT) {
119 ret_val = DCB_ERR_BW_GROUP;
120 goto err_config;
121 }
122 } 55 }
123 56
124err_config: 57 multiplier = (min_credit / min_percent) + 1;
125 return ret_val; 58
59 /* Find out the hw credits for each TC */
60 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
61 int val = min(bw[i] * multiplier, MAX_CREDIT_REFILL);
62
63 if (val < min_credit)
64 val = min_credit;
65 refill[i] = val;
66
67 max[i] = bw[i] ? (bw[i] * MAX_CREDIT)/100 : min_credit;
68 }
69 return 0;
126} 70}
127 71
128/** 72/**
@@ -134,10 +78,14 @@ err_config:
134 * It should be called only after the rules are checked by 78 * It should be called only after the rules are checked by
135 * ixgbe_dcb_check_config(). 79 * ixgbe_dcb_check_config().
136 */ 80 */
137s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config, 81s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw,
138 u8 direction) 82 struct ixgbe_dcb_config *dcb_config,
83 int max_frame, u8 direction)
139{ 84{
140 struct tc_bw_alloc *p; 85 struct tc_bw_alloc *p;
86 int min_credit;
87 int min_multiplier;
88 int min_percent = 100;
141 s32 ret_val = 0; 89 s32 ret_val = 0;
142 /* Initialization values default for Tx settings */ 90 /* Initialization values default for Tx settings */
143 u32 credit_refill = 0; 91 u32 credit_refill = 0;
@@ -151,6 +99,31 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config,
151 goto out; 99 goto out;
152 } 100 }
153 101
102 min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) /
103 DCB_CREDIT_QUANTUM;
104
105 /* Find smallest link percentage */
106 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
107 p = &dcb_config->tc_config[i].path[direction];
108 bw_percent = dcb_config->bw_percentage[direction][p->bwg_id];
109 link_percentage = p->bwg_percent;
110
111 link_percentage = (link_percentage * bw_percent) / 100;
112
113 if (link_percentage && link_percentage < min_percent)
114 min_percent = link_percentage;
115 }
116
117 /*
118 * The ratio between traffic classes will control the bandwidth
119 * percentages seen on the wire. To calculate this ratio we use
120 * a multiplier. It is required that the refill credits must be
121 * larger than the max frame size so here we find the smallest
122 * multiplier that will allow all bandwidth percentages to be
123 * greater than the max frame size.
124 */
125 min_multiplier = (min_credit / min_percent) + 1;
126
154 /* Find out the link percentage for each TC first */ 127 /* Find out the link percentage for each TC first */
155 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 128 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
156 p = &dcb_config->tc_config[i].path[direction]; 129 p = &dcb_config->tc_config[i].path[direction];
@@ -165,8 +138,9 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config,
165 /* Save link_percentage for reference */ 138 /* Save link_percentage for reference */
166 p->link_percent = (u8)link_percentage; 139 p->link_percent = (u8)link_percentage;
167 140
168 /* Calculate credit refill and save it */ 141 /* Calculate credit refill ratio using multiplier */
169 credit_refill = link_percentage * MINIMUM_CREDIT_REFILL; 142 credit_refill = min(link_percentage * min_multiplier,
143 MAX_CREDIT_REFILL);
170 p->data_credits_refill = (u16)credit_refill; 144 p->data_credits_refill = (u16)credit_refill;
171 145
172 /* Calculate maximum credit for the TC */ 146 /* Calculate maximum credit for the TC */
@@ -177,8 +151,8 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config,
177 * of a TC is too small, the maximum credit may not be 151 * of a TC is too small, the maximum credit may not be
178 * enough to send out a jumbo frame in data plane arbitration. 152 * enough to send out a jumbo frame in data plane arbitration.
179 */ 153 */
180 if (credit_max && (credit_max < MINIMUM_CREDIT_FOR_JUMBO)) 154 if (credit_max && (credit_max < min_credit))
181 credit_max = MINIMUM_CREDIT_FOR_JUMBO; 155 credit_max = min_credit;
182 156
183 if (direction == DCB_TX_CONFIG) { 157 if (direction == DCB_TX_CONFIG) {
184 /* 158 /*
@@ -187,7 +161,8 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config,
187 * credit may not be enough to send out a TSO 161 * credit may not be enough to send out a TSO
188 * packet in descriptor plane arbitration. 162 * packet in descriptor plane arbitration.
189 */ 163 */
190 if (credit_max && 164 if ((hw->mac.type == ixgbe_mac_82598EB) &&
165 credit_max &&
191 (credit_max < MINIMUM_CREDIT_FOR_TSO)) 166 (credit_max < MINIMUM_CREDIT_FOR_TSO))
192 credit_max = MINIMUM_CREDIT_FOR_TSO; 167 credit_max = MINIMUM_CREDIT_FOR_TSO;
193 168
@@ -202,131 +177,57 @@ out:
202 return ret_val; 177 return ret_val;
203} 178}
204 179
205/** 180void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en)
206 * ixgbe_dcb_get_tc_stats - Returns status of each traffic class
207 * @hw: pointer to hardware structure
208 * @stats: pointer to statistics structure
209 * @tc_count: Number of elements in bwg_array.
210 *
211 * This function returns the status data for each of the Traffic Classes in use.
212 */
213s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
214 u8 tc_count)
215{ 181{
216 s32 ret = 0; 182 int i;
217 if (hw->mac.type == ixgbe_mac_82598EB)
218 ret = ixgbe_dcb_get_tc_stats_82598(hw, stats, tc_count);
219 else if (hw->mac.type == ixgbe_mac_82599EB)
220 ret = ixgbe_dcb_get_tc_stats_82599(hw, stats, tc_count);
221 return ret;
222}
223 183
224/** 184 *pfc_en = 0;
225 * ixgbe_dcb_get_pfc_stats - Returns CBFC status of each traffic class 185 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
226 * hw - pointer to hardware structure 186 *pfc_en |= (cfg->tc_config[i].dcb_pfc & 0xF) << i;
227 * stats - pointer to statistics structure
228 * tc_count - Number of elements in bwg_array.
229 *
230 * This function returns the CBFC status data for each of the Traffic Classes.
231 */
232s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
233 u8 tc_count)
234{
235 s32 ret = 0;
236 if (hw->mac.type == ixgbe_mac_82598EB)
237 ret = ixgbe_dcb_get_pfc_stats_82598(hw, stats, tc_count);
238 else if (hw->mac.type == ixgbe_mac_82599EB)
239 ret = ixgbe_dcb_get_pfc_stats_82599(hw, stats, tc_count);
240 return ret;
241} 187}
242 188
243/** 189void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *cfg, int direction,
244 * ixgbe_dcb_config_rx_arbiter - Config Rx arbiter 190 u16 *refill)
245 * @hw: pointer to hardware structure
246 * @dcb_config: pointer to ixgbe_dcb_config structure
247 *
248 * Configure Rx Data Arbiter and credits for each traffic class.
249 */
250s32 ixgbe_dcb_config_rx_arbiter(struct ixgbe_hw *hw,
251 struct ixgbe_dcb_config *dcb_config)
252{ 191{
253 s32 ret = 0; 192 struct tc_bw_alloc *p;
254 if (hw->mac.type == ixgbe_mac_82598EB) 193 int i;
255 ret = ixgbe_dcb_config_rx_arbiter_82598(hw, dcb_config);
256 else if (hw->mac.type == ixgbe_mac_82599EB)
257 ret = ixgbe_dcb_config_rx_arbiter_82599(hw, dcb_config);
258 return ret;
259}
260 194
261/** 195 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
262 * ixgbe_dcb_config_tx_desc_arbiter - Config Tx Desc arbiter 196 p = &cfg->tc_config[i].path[direction];
263 * @hw: pointer to hardware structure 197 refill[i] = p->data_credits_refill;
264 * @dcb_config: pointer to ixgbe_dcb_config structure 198 }
265 *
266 * Configure Tx Descriptor Arbiter and credits for each traffic class.
267 */
268s32 ixgbe_dcb_config_tx_desc_arbiter(struct ixgbe_hw *hw,
269 struct ixgbe_dcb_config *dcb_config)
270{
271 s32 ret = 0;
272 if (hw->mac.type == ixgbe_mac_82598EB)
273 ret = ixgbe_dcb_config_tx_desc_arbiter_82598(hw, dcb_config);
274 else if (hw->mac.type == ixgbe_mac_82599EB)
275 ret = ixgbe_dcb_config_tx_desc_arbiter_82599(hw, dcb_config);
276 return ret;
277} 199}
278 200
279/** 201void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *cfg, u16 *max)
280 * ixgbe_dcb_config_tx_data_arbiter - Config Tx data arbiter
281 * @hw: pointer to hardware structure
282 * @dcb_config: pointer to ixgbe_dcb_config structure
283 *
284 * Configure Tx Data Arbiter and credits for each traffic class.
285 */
286s32 ixgbe_dcb_config_tx_data_arbiter(struct ixgbe_hw *hw,
287 struct ixgbe_dcb_config *dcb_config)
288{ 202{
289 s32 ret = 0; 203 int i;
290 if (hw->mac.type == ixgbe_mac_82598EB) 204
291 ret = ixgbe_dcb_config_tx_data_arbiter_82598(hw, dcb_config); 205 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
292 else if (hw->mac.type == ixgbe_mac_82599EB) 206 max[i] = cfg->tc_config[i].desc_credits_max;
293 ret = ixgbe_dcb_config_tx_data_arbiter_82599(hw, dcb_config);
294 return ret;
295} 207}
296 208
297/** 209void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *cfg, int direction,
298 * ixgbe_dcb_config_pfc - Config priority flow control 210 u8 *bwgid)
299 * @hw: pointer to hardware structure
300 * @dcb_config: pointer to ixgbe_dcb_config structure
301 *
302 * Configure Priority Flow Control for each traffic class.
303 */
304s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *hw,
305 struct ixgbe_dcb_config *dcb_config)
306{ 211{
307 s32 ret = 0; 212 struct tc_bw_alloc *p;
308 if (hw->mac.type == ixgbe_mac_82598EB) 213 int i;
309 ret = ixgbe_dcb_config_pfc_82598(hw, dcb_config); 214
310 else if (hw->mac.type == ixgbe_mac_82599EB) 215 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
311 ret = ixgbe_dcb_config_pfc_82599(hw, dcb_config); 216 p = &cfg->tc_config[i].path[direction];
312 return ret; 217 bwgid[i] = p->bwg_id;
218 }
313} 219}
314 220
315/** 221void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction,
316 * ixgbe_dcb_config_tc_stats - Config traffic class statistics 222 u8 *ptype)
317 * @hw: pointer to hardware structure
318 *
319 * Configure queue statistics registers, all queues belonging to same traffic
320 * class uses a single set of queue statistics counters.
321 */
322s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *hw)
323{ 223{
324 s32 ret = 0; 224 struct tc_bw_alloc *p;
325 if (hw->mac.type == ixgbe_mac_82598EB) 225 int i;
326 ret = ixgbe_dcb_config_tc_stats_82598(hw); 226
327 else if (hw->mac.type == ixgbe_mac_82599EB) 227 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
328 ret = ixgbe_dcb_config_tc_stats_82599(hw); 228 p = &cfg->tc_config[i].path[direction];
329 return ret; 229 ptype[i] = p->prio_type;
230 }
330} 231}
331 232
332/** 233/**
@@ -340,10 +241,82 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
340 struct ixgbe_dcb_config *dcb_config) 241 struct ixgbe_dcb_config *dcb_config)
341{ 242{
342 s32 ret = 0; 243 s32 ret = 0;
343 if (hw->mac.type == ixgbe_mac_82598EB) 244 u8 pfc_en;
344 ret = ixgbe_dcb_hw_config_82598(hw, dcb_config); 245 u8 ptype[MAX_TRAFFIC_CLASS];
345 else if (hw->mac.type == ixgbe_mac_82599EB) 246 u8 bwgid[MAX_TRAFFIC_CLASS];
346 ret = ixgbe_dcb_hw_config_82599(hw, dcb_config); 247 u16 refill[MAX_TRAFFIC_CLASS];
248 u16 max[MAX_TRAFFIC_CLASS];
249 /* CEE does not define a priority to tc mapping so map 1:1 */
250 u8 prio_tc[MAX_TRAFFIC_CLASS] = {0, 1, 2, 3, 4, 5, 6, 7};
251
252 /* Unpack CEE standard containers */
253 ixgbe_dcb_unpack_pfc(dcb_config, &pfc_en);
254 ixgbe_dcb_unpack_refill(dcb_config, DCB_TX_CONFIG, refill);
255 ixgbe_dcb_unpack_max(dcb_config, max);
256 ixgbe_dcb_unpack_bwgid(dcb_config, DCB_TX_CONFIG, bwgid);
257 ixgbe_dcb_unpack_prio(dcb_config, DCB_TX_CONFIG, ptype);
258
259 switch (hw->mac.type) {
260 case ixgbe_mac_82598EB:
261 ret = ixgbe_dcb_hw_config_82598(hw, dcb_config->rx_pba_cfg,
262 pfc_en, refill, max, bwgid,
263 ptype);
264 break;
265 case ixgbe_mac_82599EB:
266 case ixgbe_mac_X540:
267 ret = ixgbe_dcb_hw_config_82599(hw, dcb_config->rx_pba_cfg,
268 pfc_en, refill, max, bwgid,
269 ptype, prio_tc);
270 break;
271 default:
272 break;
273 }
274 return ret;
275}
276
277/* Helper routines to abstract HW specifics from DCB netlink ops */
278s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en)
279{
280 int ret = -EINVAL;
281
282 switch (hw->mac.type) {
283 case ixgbe_mac_82598EB:
284 ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en);
285 break;
286 case ixgbe_mac_82599EB:
287 case ixgbe_mac_X540:
288 ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en);
289 break;
290 default:
291 break;
292 }
347 return ret; 293 return ret;
348} 294}
349 295
296s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
297 u16 *refill, u16 *max, u8 *bwg_id,
298 u8 *prio_type, u8 *prio_tc)
299{
300 switch (hw->mac.type) {
301 case ixgbe_mac_82598EB:
302 ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max,
303 prio_type);
304 ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max,
305 bwg_id, prio_type);
306 ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max,
307 bwg_id, prio_type);
308 break;
309 case ixgbe_mac_82599EB:
310 case ixgbe_mac_X540:
311 ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max,
312 bwg_id, prio_type, prio_tc);
313 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
314 bwg_id, prio_type);
315 ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,
316 prio_type, prio_tc);
317 break;
318 default:
319 break;
320 }
321 return 0;
322}
diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h
index 5caafd4afbc3..944838fc7b59 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ixgbe/ixgbe_dcb.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -139,7 +139,6 @@ struct ixgbe_dcb_config {
139 struct tc_configuration tc_config[MAX_TRAFFIC_CLASS]; 139 struct tc_configuration tc_config[MAX_TRAFFIC_CLASS];
140 u8 bw_percentage[2][MAX_BW_GROUP]; /* One each for Tx/Rx */ 140 u8 bw_percentage[2][MAX_BW_GROUP]; /* One each for Tx/Rx */
141 bool pfc_mode_enable; 141 bool pfc_mode_enable;
142 bool round_robin_enable;
143 142
144 enum dcb_rx_pba_cfg rx_pba_cfg; 143 enum dcb_rx_pba_cfg rx_pba_cfg;
145 144
@@ -148,35 +147,26 @@ struct ixgbe_dcb_config {
148}; 147};
149 148
150/* DCB driver APIs */ 149/* DCB driver APIs */
151 150void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en);
152/* DCB rule checking function.*/ 151void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *, int, u16 *);
153s32 ixgbe_dcb_check_config(struct ixgbe_dcb_config *config); 152void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *, u16 *);
153void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *, int, u8 *);
154void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *, int, u8 *);
154 155
155/* DCB credits calculation */ 156/* DCB credits calculation */
156s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *, u8); 157s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, __u16 *max, int max_frame);
157 158s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *,
158/* DCB PFC functions */ 159 struct ixgbe_dcb_config *, int, u8);
159s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *, struct ixgbe_dcb_config *g);
160s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8);
161
162/* DCB traffic class stats */
163s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *);
164s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8);
165
166/* DCB config arbiters */
167s32 ixgbe_dcb_config_tx_desc_arbiter(struct ixgbe_hw *,
168 struct ixgbe_dcb_config *);
169s32 ixgbe_dcb_config_tx_data_arbiter(struct ixgbe_hw *,
170 struct ixgbe_dcb_config *);
171s32 ixgbe_dcb_config_rx_arbiter(struct ixgbe_hw *, struct ixgbe_dcb_config *);
172 160
173/* DCB hw initialization */ 161/* DCB hw initialization */
162s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, u16 *refill, u16 *max,
163 u8 *bwg_id, u8 *prio_type, u8 *tc_prio);
164s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en);
174s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *); 165s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
175 166
176/* DCB definitions for credit calculation */ 167/* DCB definitions for credit calculation */
168#define DCB_CREDIT_QUANTUM 64 /* DCB Quantum */
177#define MAX_CREDIT_REFILL 511 /* 0x1FF * 64B = 32704B */ 169#define MAX_CREDIT_REFILL 511 /* 0x1FF * 64B = 32704B */
178#define MINIMUM_CREDIT_REFILL 5 /* 5*64B = 320B */
179#define MINIMUM_CREDIT_FOR_JUMBO 145 /* 145= UpperBound((9*1024+54)/64B) for 9KB jumbo frame */
180#define DCB_MAX_TSO_SIZE (32*1024) /* MAX TSO packet size supported in DCB mode */ 170#define DCB_MAX_TSO_SIZE (32*1024) /* MAX TSO packet size supported in DCB mode */
181#define MINIMUM_CREDIT_FOR_TSO (DCB_MAX_TSO_SIZE/64 + 1) /* 513 for 32KB TSO packet */ 171#define MINIMUM_CREDIT_FOR_TSO (DCB_MAX_TSO_SIZE/64 + 1) /* 513 for 32KB TSO packet */
182#define MAX_CREDIT 4095 /* Maximum credit supported: 256KB * 1204 / 64B */ 172#define MAX_CREDIT 4095 /* Maximum credit supported: 256KB * 1204 / 64B */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ixgbe/ixgbe_dcb_82598.c
index f0e9279d4669..771d01a60d06 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -32,80 +32,20 @@
32#include "ixgbe_dcb_82598.h" 32#include "ixgbe_dcb_82598.h"
33 33
34/** 34/**
35 * ixgbe_dcb_get_tc_stats_82598 - Return status data for each traffic class
36 * @hw: pointer to hardware structure
37 * @stats: pointer to statistics structure
38 * @tc_count: Number of elements in bwg_array.
39 *
40 * This function returns the status data for each of the Traffic Classes in use.
41 */
42s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *hw,
43 struct ixgbe_hw_stats *stats,
44 u8 tc_count)
45{
46 int tc;
47
48 if (tc_count > MAX_TRAFFIC_CLASS)
49 return DCB_ERR_PARAM;
50
51 /* Statistics pertaining to each traffic class */
52 for (tc = 0; tc < tc_count; tc++) {
53 /* Transmitted Packets */
54 stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc));
55 /* Transmitted Bytes */
56 stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC(tc));
57 /* Received Packets */
58 stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc));
59 /* Received Bytes */
60 stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC(tc));
61 }
62
63 return 0;
64}
65
66/**
67 * ixgbe_dcb_get_pfc_stats_82598 - Returns CBFC status data
68 * @hw: pointer to hardware structure
69 * @stats: pointer to statistics structure
70 * @tc_count: Number of elements in bwg_array.
71 *
72 * This function returns the CBFC status data for each of the Traffic Classes.
73 */
74s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *hw,
75 struct ixgbe_hw_stats *stats,
76 u8 tc_count)
77{
78 int tc;
79
80 if (tc_count > MAX_TRAFFIC_CLASS)
81 return DCB_ERR_PARAM;
82
83 for (tc = 0; tc < tc_count; tc++) {
84 /* Priority XOFF Transmitted */
85 stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc));
86 /* Priority XOFF Received */
87 stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(tc));
88 }
89
90 return 0;
91}
92
93/**
94 * ixgbe_dcb_config_packet_buffers_82598 - Configure packet buffers 35 * ixgbe_dcb_config_packet_buffers_82598 - Configure packet buffers
95 * @hw: pointer to hardware structure 36 * @hw: pointer to hardware structure
96 * @dcb_config: pointer to ixgbe_dcb_config structure 37 * @dcb_config: pointer to ixgbe_dcb_config structure
97 * 38 *
98 * Configure packet buffers for DCB mode. 39 * Configure packet buffers for DCB mode.
99 */ 40 */
100static s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw, 41static s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw, u8 rx_pba)
101 struct ixgbe_dcb_config *dcb_config)
102{ 42{
103 s32 ret_val = 0; 43 s32 ret_val = 0;
104 u32 value = IXGBE_RXPBSIZE_64KB; 44 u32 value = IXGBE_RXPBSIZE_64KB;
105 u8 i = 0; 45 u8 i = 0;
106 46
107 /* Setup Rx packet buffer sizes */ 47 /* Setup Rx packet buffer sizes */
108 switch (dcb_config->rx_pba_cfg) { 48 switch (rx_pba) {
109 case pba_80_48: 49 case pba_80_48:
110 /* Setup the first four at 80KB */ 50 /* Setup the first four at 80KB */
111 value = IXGBE_RXPBSIZE_80KB; 51 value = IXGBE_RXPBSIZE_80KB;
@@ -138,9 +78,10 @@ static s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw,
138 * Configure Rx Data Arbiter and credits for each traffic class. 78 * Configure Rx Data Arbiter and credits for each traffic class.
139 */ 79 */
140s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, 80s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
141 struct ixgbe_dcb_config *dcb_config) 81 u16 *refill,
82 u16 *max,
83 u8 *prio_type)
142{ 84{
143 struct tc_bw_alloc *p;
144 u32 reg = 0; 85 u32 reg = 0;
145 u32 credit_refill = 0; 86 u32 credit_refill = 0;
146 u32 credit_max = 0; 87 u32 credit_max = 0;
@@ -161,13 +102,12 @@ s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
161 102
162 /* Configure traffic class credits and priority */ 103 /* Configure traffic class credits and priority */
163 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 104 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
164 p = &dcb_config->tc_config[i].path[DCB_RX_CONFIG]; 105 credit_refill = refill[i];
165 credit_refill = p->data_credits_refill; 106 credit_max = max[i];
166 credit_max = p->data_credits_max;
167 107
168 reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT); 108 reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT);
169 109
170 if (p->prio_type == prio_link) 110 if (prio_type[i] == prio_link)
171 reg |= IXGBE_RT2CR_LSP; 111 reg |= IXGBE_RT2CR_LSP;
172 112
173 IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg); 113 IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg);
@@ -195,9 +135,11 @@ s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
195 * Configure Tx Descriptor Arbiter and credits for each traffic class. 135 * Configure Tx Descriptor Arbiter and credits for each traffic class.
196 */ 136 */
197s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, 137s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
198 struct ixgbe_dcb_config *dcb_config) 138 u16 *refill,
139 u16 *max,
140 u8 *bwg_id,
141 u8 *prio_type)
199{ 142{
200 struct tc_bw_alloc *p;
201 u32 reg, max_credits; 143 u32 reg, max_credits;
202 u8 i; 144 u8 i;
203 145
@@ -205,10 +147,8 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
205 147
206 /* Enable arbiter */ 148 /* Enable arbiter */
207 reg &= ~IXGBE_DPMCS_ARBDIS; 149 reg &= ~IXGBE_DPMCS_ARBDIS;
208 if (!(dcb_config->round_robin_enable)) { 150 /* Enable DFP and Recycle mode */
209 /* Enable DFP and Recycle mode */ 151 reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM);
210 reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM);
211 }
212 reg |= IXGBE_DPMCS_TSOEF; 152 reg |= IXGBE_DPMCS_TSOEF;
213 /* Configure Max TSO packet size 34KB including payload and headers */ 153 /* Configure Max TSO packet size 34KB including payload and headers */
214 reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT); 154 reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT);
@@ -217,16 +157,15 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
217 157
218 /* Configure traffic class credits and priority */ 158 /* Configure traffic class credits and priority */
219 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 159 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
220 p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG]; 160 max_credits = max[i];
221 max_credits = dcb_config->tc_config[i].desc_credits_max;
222 reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT; 161 reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT;
223 reg |= p->data_credits_refill; 162 reg |= refill[i];
224 reg |= (u32)(p->bwg_id) << IXGBE_TDTQ2TCCR_BWG_SHIFT; 163 reg |= (u32)(bwg_id[i]) << IXGBE_TDTQ2TCCR_BWG_SHIFT;
225 164
226 if (p->prio_type == prio_group) 165 if (prio_type[i] == prio_group)
227 reg |= IXGBE_TDTQ2TCCR_GSP; 166 reg |= IXGBE_TDTQ2TCCR_GSP;
228 167
229 if (p->prio_type == prio_link) 168 if (prio_type[i] == prio_link)
230 reg |= IXGBE_TDTQ2TCCR_LSP; 169 reg |= IXGBE_TDTQ2TCCR_LSP;
231 170
232 IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg); 171 IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg);
@@ -243,9 +182,11 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
243 * Configure Tx Data Arbiter and credits for each traffic class. 182 * Configure Tx Data Arbiter and credits for each traffic class.
244 */ 183 */
245s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, 184s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
246 struct ixgbe_dcb_config *dcb_config) 185 u16 *refill,
186 u16 *max,
187 u8 *bwg_id,
188 u8 *prio_type)
247{ 189{
248 struct tc_bw_alloc *p;
249 u32 reg; 190 u32 reg;
250 u8 i; 191 u8 i;
251 192
@@ -259,15 +200,14 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
259 200
260 /* Configure traffic class credits and priority */ 201 /* Configure traffic class credits and priority */
261 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 202 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
262 p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG]; 203 reg = refill[i];
263 reg = p->data_credits_refill; 204 reg |= (u32)(max[i]) << IXGBE_TDPT2TCCR_MCL_SHIFT;
264 reg |= (u32)(p->data_credits_max) << IXGBE_TDPT2TCCR_MCL_SHIFT; 205 reg |= (u32)(bwg_id[i]) << IXGBE_TDPT2TCCR_BWG_SHIFT;
265 reg |= (u32)(p->bwg_id) << IXGBE_TDPT2TCCR_BWG_SHIFT;
266 206
267 if (p->prio_type == prio_group) 207 if (prio_type[i] == prio_group)
268 reg |= IXGBE_TDPT2TCCR_GSP; 208 reg |= IXGBE_TDPT2TCCR_GSP;
269 209
270 if (p->prio_type == prio_link) 210 if (prio_type[i] == prio_link)
271 reg |= IXGBE_TDPT2TCCR_LSP; 211 reg |= IXGBE_TDPT2TCCR_LSP;
272 212
273 IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg); 213 IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg);
@@ -288,63 +228,57 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
288 * 228 *
289 * Configure Priority Flow Control for each traffic class. 229 * Configure Priority Flow Control for each traffic class.
290 */ 230 */
291s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, 231s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
292 struct ixgbe_dcb_config *dcb_config)
293{ 232{
294 u32 reg, rx_pba_size; 233 u32 reg, rx_pba_size;
295 u8 i; 234 u8 i;
296 235
297 if (!dcb_config->pfc_mode_enable) 236 if (pfc_en) {
298 goto out; 237 /* Enable Transmit Priority Flow Control */
299 238 reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
300 /* Enable Transmit Priority Flow Control */ 239 reg &= ~IXGBE_RMCS_TFCE_802_3X;
301 reg = IXGBE_READ_REG(hw, IXGBE_RMCS); 240 /* correct the reporting of our flow control status */
302 reg &= ~IXGBE_RMCS_TFCE_802_3X; 241 reg |= IXGBE_RMCS_TFCE_PRIORITY;
303 /* correct the reporting of our flow control status */ 242 IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
304 reg |= IXGBE_RMCS_TFCE_PRIORITY; 243
305 IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg); 244 /* Enable Receive Priority Flow Control */
306 245 reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
307 /* Enable Receive Priority Flow Control */ 246 reg &= ~IXGBE_FCTRL_RFCE;
308 reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); 247 reg |= IXGBE_FCTRL_RPFCE;
309 reg &= ~IXGBE_FCTRL_RFCE; 248 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
310 reg |= IXGBE_FCTRL_RPFCE; 249
311 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg); 250 /* Configure pause time */
251 for (i = 0; i < (MAX_TRAFFIC_CLASS >> 1); i++)
252 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), 0x68006800);
253
254 /* Configure flow control refresh threshold value */
255 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, 0x3400);
256 }
312 257
313 /* 258 /*
314 * Configure flow control thresholds and enable priority flow control 259 * Configure flow control thresholds and enable priority flow control
315 * for each traffic class. 260 * for each traffic class.
316 */ 261 */
317 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 262 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
318 if (dcb_config->rx_pba_cfg == pba_equal) { 263 int enabled = pfc_en & (1 << i);
319 rx_pba_size = IXGBE_RXPBSIZE_64KB; 264 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
320 } else { 265 rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
321 rx_pba_size = (i < 4) ? IXGBE_RXPBSIZE_80KB 266 reg = (rx_pba_size - hw->fc.low_water) << 10;
322 : IXGBE_RXPBSIZE_48KB;
323 }
324 267
325 reg = ((rx_pba_size >> 5) & 0xFFF0); 268 if (enabled == pfc_enabled_tx ||
326 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx || 269 enabled == pfc_enabled_full)
327 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full)
328 reg |= IXGBE_FCRTL_XONE; 270 reg |= IXGBE_FCRTL_XONE;
329 271
330 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg); 272 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg);
331 273
332 reg = ((rx_pba_size >> 2) & 0xFFF0); 274 reg = (rx_pba_size - hw->fc.high_water) << 10;
333 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx || 275 if (enabled == pfc_enabled_tx ||
334 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full) 276 enabled == pfc_enabled_full)
335 reg |= IXGBE_FCRTH_FCEN; 277 reg |= IXGBE_FCRTH_FCEN;
336 278
337 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg); 279 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
338 } 280 }
339 281
340 /* Configure pause time */
341 for (i = 0; i < (MAX_TRAFFIC_CLASS >> 1); i++)
342 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), 0x68006800);
343
344 /* Configure flow control refresh threshold value */
345 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, 0x3400);
346
347out:
348 return 0; 282 return 0;
349} 283}
350 284
@@ -355,7 +289,7 @@ out:
355 * Configure queue statistics registers, all queues belonging to same traffic 289 * Configure queue statistics registers, all queues belonging to same traffic
356 * class uses a single set of queue statistics counters. 290 * class uses a single set of queue statistics counters.
357 */ 291 */
358s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw) 292static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
359{ 293{
360 u32 reg = 0; 294 u32 reg = 0;
361 u8 i = 0; 295 u8 i = 0;
@@ -388,13 +322,16 @@ s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
388 * Configure dcb settings and enable dcb mode. 322 * Configure dcb settings and enable dcb mode.
389 */ 323 */
390s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, 324s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw,
391 struct ixgbe_dcb_config *dcb_config) 325 u8 rx_pba, u8 pfc_en, u16 *refill,
326 u16 *max, u8 *bwg_id, u8 *prio_type)
392{ 327{
393 ixgbe_dcb_config_packet_buffers_82598(hw, dcb_config); 328 ixgbe_dcb_config_packet_buffers_82598(hw, rx_pba);
394 ixgbe_dcb_config_rx_arbiter_82598(hw, dcb_config); 329 ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, prio_type);
395 ixgbe_dcb_config_tx_desc_arbiter_82598(hw, dcb_config); 330 ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max,
396 ixgbe_dcb_config_tx_data_arbiter_82598(hw, dcb_config); 331 bwg_id, prio_type);
397 ixgbe_dcb_config_pfc_82598(hw, dcb_config); 332 ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max,
333 bwg_id, prio_type);
334 ixgbe_dcb_config_pfc_82598(hw, pfc_en);
398 ixgbe_dcb_config_tc_stats_82598(hw); 335 ixgbe_dcb_config_tc_stats_82598(hw);
399 336
400 return 0; 337 return 0;
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ixgbe/ixgbe_dcb_82598.h
index cc728fa092e2..1e9750c2b46b 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -71,24 +71,28 @@
71/* DCB hardware-specific driver APIs */ 71/* DCB hardware-specific driver APIs */
72 72
73/* DCB PFC functions */ 73/* DCB PFC functions */
74s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, struct ixgbe_dcb_config *); 74s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8 pfc_en);
75s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *, struct ixgbe_hw_stats *,
76 u8);
77
78/* DCB traffic class stats */
79s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *);
80s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *, struct ixgbe_hw_stats *,
81 u8);
82
83/* DCB config arbiters */
84s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *,
85 struct ixgbe_dcb_config *);
86s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *,
87 struct ixgbe_dcb_config *);
88s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *,
89 struct ixgbe_dcb_config *);
90 75
91/* DCB hw initialization */ 76/* DCB hw initialization */
92s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *, struct ixgbe_dcb_config *); 77s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
78 u16 *refill,
79 u16 *max,
80 u8 *prio_type);
81
82s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
83 u16 *refill,
84 u16 *max,
85 u8 *bwg_id,
86 u8 *prio_type);
87
88s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
89 u16 *refill,
90 u16 *max,
91 u8 *bwg_id,
92 u8 *prio_type);
93
94s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw,
95 u8 rx_pba, u8 pfc_en, u16 *refill,
96 u16 *max, u8 *bwg_id, u8 *prio_type);
93 97
94#endif /* _DCB_82598_CONFIG_H */ 98#endif /* _DCB_82598_CONFIG_H */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c
index 25b02fb425ac..d50cf78c234d 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -31,115 +31,79 @@
31#include "ixgbe_dcb_82599.h" 31#include "ixgbe_dcb_82599.h"
32 32
33/** 33/**
34 * ixgbe_dcb_get_tc_stats_82599 - Returns status for each traffic class 34 * ixgbe_dcb_config_packet_buffers_82599 - Configure DCB packet buffers
35 * @hw: pointer to hardware structure 35 * @hw: pointer to hardware structure
36 * @stats: pointer to statistics structure 36 * @rx_pba: method to distribute packet buffer
37 * @tc_count: Number of elements in bwg_array.
38 * 37 *
39 * This function returns the status data for each of the Traffic Classes in use. 38 * Configure packet buffers for DCB mode.
40 */ 39 */
41s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *hw, 40static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw, u8 rx_pba)
42 struct ixgbe_hw_stats *stats,
43 u8 tc_count)
44{ 41{
45 int tc; 42 int num_tcs = IXGBE_MAX_PACKET_BUFFERS;
46 43 u32 rx_pb_size = hw->mac.rx_pb_size << IXGBE_RXPBSIZE_SHIFT;
47 if (tc_count > MAX_TRAFFIC_CLASS) 44 u32 rxpktsize;
48 return DCB_ERR_PARAM; 45 u32 txpktsize;
49 /* Statistics pertaining to each traffic class */ 46 u32 txpbthresh;
50 for (tc = 0; tc < tc_count; tc++) { 47 u8 i = 0;
51 /* Transmitted Packets */
52 stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc));
53 /* Transmitted Bytes */
54 stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC(tc));
55 /* Received Packets */
56 stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc));
57 /* Received Bytes */
58 stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC(tc));
59 }
60
61 return 0;
62}
63 48
64/** 49 /*
65 * ixgbe_dcb_get_pfc_stats_82599 - Return CBFC status data 50 * This really means configure the first half of the TCs
66 * @hw: pointer to hardware structure 51 * (Traffic Classes) to use 5/8 of the Rx packet buffer
67 * @stats: pointer to statistics structure 52 * space. To determine the size of the buffer for each TC,
68 * @tc_count: Number of elements in bwg_array. 53 * we are multiplying the average size by 5/4 and applying
69 * 54 * it to half of the traffic classes.
70 * This function returns the CBFC status data for each of the Traffic Classes. 55 */
71 */ 56 if (rx_pba == pba_80_48) {
72s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw, 57 rxpktsize = (rx_pb_size * 5) / (num_tcs * 4);
73 struct ixgbe_hw_stats *stats, 58 rx_pb_size -= rxpktsize * (num_tcs / 2);
74 u8 tc_count) 59 for (; i < (num_tcs / 2); i++)
75{ 60 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
76 int tc;
77
78 if (tc_count > MAX_TRAFFIC_CLASS)
79 return DCB_ERR_PARAM;
80 for (tc = 0; tc < tc_count; tc++) {
81 /* Priority XOFF Transmitted */
82 stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc));
83 /* Priority XOFF Received */
84 stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(tc));
85 } 61 }
86 62
87 return 0; 63 /* Divide the remaining Rx packet buffer evenly among the TCs */
88} 64 rxpktsize = rx_pb_size / (num_tcs - i);
65 for (; i < num_tcs; i++)
66 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
89 67
90/** 68 /*
91 * ixgbe_dcb_config_packet_buffers_82599 - Configure DCB packet buffers 69 * Setup Tx packet buffer and threshold equally for all TCs
92 * @hw: pointer to hardware structure 70 * TXPBTHRESH register is set in K so divide by 1024 and subtract
93 * @dcb_config: pointer to ixgbe_dcb_config structure 71 * 10 since the largest packet we support is just over 9K.
94 * 72 */
95 * Configure packet buffers for DCB mode. 73 txpktsize = IXGBE_TXPBSIZE_MAX / num_tcs;
96 */ 74 txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
97s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw, 75 for (i = 0; i < num_tcs; i++) {
98 struct ixgbe_dcb_config *dcb_config) 76 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
99{ 77 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
100 s32 ret_val = 0; 78 }
101 u32 value = IXGBE_RXPBSIZE_64KB;
102 u8 i = 0;
103 79
104 /* Setup Rx packet buffer sizes */ 80 /* Clear unused TCs, if any, to zero buffer size*/
105 switch (dcb_config->rx_pba_cfg) { 81 for (; i < MAX_TRAFFIC_CLASS; i++) {
106 case pba_80_48: 82 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
107 /* Setup the first four at 80KB */ 83 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
108 value = IXGBE_RXPBSIZE_80KB; 84 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
109 for (; i < 4; i++)
110 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), value);
111 /* Setup the last four at 48KB...don't re-init i */
112 value = IXGBE_RXPBSIZE_48KB;
113 /* Fall Through */
114 case pba_equal:
115 default:
116 for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
117 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), value);
118
119 /* Setup Tx packet buffer sizes */
120 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
121 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i),
122 IXGBE_TXPBSIZE_20KB);
123 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i),
124 IXGBE_TXPBTHRESH_DCB);
125 }
126 break;
127 } 85 }
128 86
129 return ret_val; 87 return 0;
130} 88}
131 89
132/** 90/**
133 * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter 91 * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter
134 * @hw: pointer to hardware structure 92 * @hw: pointer to hardware structure
135 * @dcb_config: pointer to ixgbe_dcb_config structure 93 * @refill: refill credits index by traffic class
94 * @max: max credits index by traffic class
95 * @bwg_id: bandwidth grouping indexed by traffic class
96 * @prio_type: priority type indexed by traffic class
136 * 97 *
137 * Configure Rx Packet Arbiter and credits for each traffic class. 98 * Configure Rx Packet Arbiter and credits for each traffic class.
138 */ 99 */
139s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, 100s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
140 struct ixgbe_dcb_config *dcb_config) 101 u16 *refill,
102 u16 *max,
103 u8 *bwg_id,
104 u8 *prio_type,
105 u8 *prio_tc)
141{ 106{
142 struct tc_bw_alloc *p;
143 u32 reg = 0; 107 u32 reg = 0;
144 u32 credit_refill = 0; 108 u32 credit_refill = 0;
145 u32 credit_max = 0; 109 u32 credit_max = 0;
@@ -155,20 +119,18 @@ s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
155 /* Map all traffic classes to their UP, 1 to 1 */ 119 /* Map all traffic classes to their UP, 1 to 1 */
156 reg = 0; 120 reg = 0;
157 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) 121 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
158 reg |= (i << (i * IXGBE_RTRUP2TC_UP_SHIFT)); 122 reg |= (prio_tc[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT));
159 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg); 123 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
160 124
161 /* Configure traffic class credits and priority */ 125 /* Configure traffic class credits and priority */
162 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 126 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
163 p = &dcb_config->tc_config[i].path[DCB_RX_CONFIG]; 127 credit_refill = refill[i];
164 128 credit_max = max[i];
165 credit_refill = p->data_credits_refill;
166 credit_max = p->data_credits_max;
167 reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT); 129 reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT);
168 130
169 reg |= (u32)(p->bwg_id) << IXGBE_RTRPT4C_BWG_SHIFT; 131 reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT;
170 132
171 if (p->prio_type == prio_link) 133 if (prio_type[i] == prio_link)
172 reg |= IXGBE_RTRPT4C_LSP; 134 reg |= IXGBE_RTRPT4C_LSP;
173 135
174 IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg); 136 IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg);
@@ -187,14 +149,19 @@ s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
187/** 149/**
188 * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter 150 * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter
189 * @hw: pointer to hardware structure 151 * @hw: pointer to hardware structure
190 * @dcb_config: pointer to ixgbe_dcb_config structure 152 * @refill: refill credits index by traffic class
153 * @max: max credits index by traffic class
154 * @bwg_id: bandwidth grouping indexed by traffic class
155 * @prio_type: priority type indexed by traffic class
191 * 156 *
192 * Configure Tx Descriptor Arbiter and credits for each traffic class. 157 * Configure Tx Descriptor Arbiter and credits for each traffic class.
193 */ 158 */
194s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, 159s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
195 struct ixgbe_dcb_config *dcb_config) 160 u16 *refill,
161 u16 *max,
162 u8 *bwg_id,
163 u8 *prio_type)
196{ 164{
197 struct tc_bw_alloc *p;
198 u32 reg, max_credits; 165 u32 reg, max_credits;
199 u8 i; 166 u8 i;
200 167
@@ -206,16 +173,15 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
206 173
207 /* Configure traffic class credits and priority */ 174 /* Configure traffic class credits and priority */
208 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 175 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
209 p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG]; 176 max_credits = max[i];
210 max_credits = dcb_config->tc_config[i].desc_credits_max;
211 reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT; 177 reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT;
212 reg |= p->data_credits_refill; 178 reg |= refill[i];
213 reg |= (u32)(p->bwg_id) << IXGBE_RTTDT2C_BWG_SHIFT; 179 reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT;
214 180
215 if (p->prio_type == prio_group) 181 if (prio_type[i] == prio_group)
216 reg |= IXGBE_RTTDT2C_GSP; 182 reg |= IXGBE_RTTDT2C_GSP;
217 183
218 if (p->prio_type == prio_link) 184 if (prio_type[i] == prio_link)
219 reg |= IXGBE_RTTDT2C_LSP; 185 reg |= IXGBE_RTTDT2C_LSP;
220 186
221 IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg); 187 IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg);
@@ -234,14 +200,20 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
234/** 200/**
235 * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter 201 * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter
236 * @hw: pointer to hardware structure 202 * @hw: pointer to hardware structure
237 * @dcb_config: pointer to ixgbe_dcb_config structure 203 * @refill: refill credits index by traffic class
204 * @max: max credits index by traffic class
205 * @bwg_id: bandwidth grouping indexed by traffic class
206 * @prio_type: priority type indexed by traffic class
238 * 207 *
239 * Configure Tx Packet Arbiter and credits for each traffic class. 208 * Configure Tx Packet Arbiter and credits for each traffic class.
240 */ 209 */
241s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, 210s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
242 struct ixgbe_dcb_config *dcb_config) 211 u16 *refill,
212 u16 *max,
213 u8 *bwg_id,
214 u8 *prio_type,
215 u8 *prio_tc)
243{ 216{
244 struct tc_bw_alloc *p;
245 u32 reg; 217 u32 reg;
246 u8 i; 218 u8 i;
247 219
@@ -257,20 +229,19 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
257 /* Map all traffic classes to their UP, 1 to 1 */ 229 /* Map all traffic classes to their UP, 1 to 1 */
258 reg = 0; 230 reg = 0;
259 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) 231 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
260 reg |= (i << (i * IXGBE_RTTUP2TC_UP_SHIFT)); 232 reg |= (prio_tc[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT));
261 IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg); 233 IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg);
262 234
263 /* Configure traffic class credits and priority */ 235 /* Configure traffic class credits and priority */
264 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 236 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
265 p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG]; 237 reg = refill[i];
266 reg = p->data_credits_refill; 238 reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT;
267 reg |= (u32)(p->data_credits_max) << IXGBE_RTTPT2C_MCL_SHIFT; 239 reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT;
268 reg |= (u32)(p->bwg_id) << IXGBE_RTTPT2C_BWG_SHIFT;
269 240
270 if (p->prio_type == prio_group) 241 if (prio_type[i] == prio_group)
271 reg |= IXGBE_RTTPT2C_GSP; 242 reg |= IXGBE_RTTPT2C_GSP;
272 243
273 if (p->prio_type == prio_link) 244 if (prio_type[i] == prio_link)
274 reg |= IXGBE_RTTPT2C_LSP; 245 reg |= IXGBE_RTTPT2C_LSP;
275 246
276 IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg); 247 IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg);
@@ -290,65 +261,64 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
290/** 261/**
291 * ixgbe_dcb_config_pfc_82599 - Configure priority flow control 262 * ixgbe_dcb_config_pfc_82599 - Configure priority flow control
292 * @hw: pointer to hardware structure 263 * @hw: pointer to hardware structure
293 * @dcb_config: pointer to ixgbe_dcb_config structure 264 * @pfc_en: enabled pfc bitmask
294 * 265 *
295 * Configure Priority Flow Control (PFC) for each traffic class. 266 * Configure Priority Flow Control (PFC) for each traffic class.
296 */ 267 */
297s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, 268s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en)
298 struct ixgbe_dcb_config *dcb_config)
299{ 269{
300 u32 i, reg, rx_pba_size; 270 u32 i, reg, rx_pba_size;
301 271
302 /* If PFC is disabled globally then fall back to LFC. */
303 if (!dcb_config->pfc_mode_enable) {
304 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
305 hw->mac.ops.fc_enable(hw, i);
306 goto out;
307 }
308
309 /* Configure PFC Tx thresholds per TC */ 272 /* Configure PFC Tx thresholds per TC */
310 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 273 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
311 if (dcb_config->rx_pba_cfg == pba_equal) 274 int enabled = pfc_en & (1 << i);
312 rx_pba_size = IXGBE_RXPBSIZE_64KB; 275 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
313 else 276 rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
314 rx_pba_size = (i < 4) ? IXGBE_RXPBSIZE_80KB
315 : IXGBE_RXPBSIZE_48KB;
316 277
317 reg = ((rx_pba_size >> 5) & 0xFFE0); 278 reg = (rx_pba_size - hw->fc.low_water) << 10;
318 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full || 279
319 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx) 280 if (enabled)
320 reg |= IXGBE_FCRTL_XONE; 281 reg |= IXGBE_FCRTL_XONE;
321 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg); 282 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg);
322 283
323 reg = ((rx_pba_size >> 2) & 0xFFE0); 284 reg = (rx_pba_size - hw->fc.high_water) << 10;
324 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full || 285 if (enabled)
325 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
326 reg |= IXGBE_FCRTH_FCEN; 286 reg |= IXGBE_FCRTH_FCEN;
327 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg); 287 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
328 } 288 }
329 289
330 /* Configure pause time (2 TCs per register) */ 290 if (pfc_en) {
331 reg = hw->fc.pause_time | (hw->fc.pause_time << 16); 291 /* Configure pause time (2 TCs per register) */
332 for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) 292 reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
333 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); 293 for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
294 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
334 295
335 /* Configure flow control refresh threshold value */ 296 /* Configure flow control refresh threshold value */
336 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); 297 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
337 298
338 /* Enable Transmit PFC */
339 reg = IXGBE_FCCFG_TFCE_PRIORITY;
340 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, reg);
341 299
342 /* 300 reg = IXGBE_FCCFG_TFCE_PRIORITY;
343 * Enable Receive PFC 301 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, reg);
344 * We will always honor XOFF frames we receive when 302 /*
345 * we are in PFC mode. 303 * Enable Receive PFC
346 */ 304 * 82599 will always honor XOFF frames we receive when
347 reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 305 * we are in PFC mode however X540 only honors enabled
348 reg &= ~IXGBE_MFLCN_RFCE; 306 * traffic classes.
349 reg |= IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_DPF; 307 */
350 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg); 308 reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
351out: 309 reg &= ~IXGBE_MFLCN_RFCE;
310 reg |= IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_DPF;
311
312 if (hw->mac.type == ixgbe_mac_X540)
313 reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT;
314
315 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
316
317 } else {
318 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
319 hw->mac.ops.fc_enable(hw, i);
320 }
321
352 return 0; 322 return 0;
353} 323}
354 324
@@ -359,7 +329,7 @@ out:
359 * Configure queue statistics registers, all queues belonging to same traffic 329 * Configure queue statistics registers, all queues belonging to same traffic
360 * class uses a single set of queue statistics counters. 330 * class uses a single set of queue statistics counters.
361 */ 331 */
362s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw) 332static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
363{ 333{
364 u32 reg = 0; 334 u32 reg = 0;
365 u8 i = 0; 335 u8 i = 0;
@@ -408,11 +378,10 @@ s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
408/** 378/**
409 * ixgbe_dcb_config_82599 - Configure general DCB parameters 379 * ixgbe_dcb_config_82599 - Configure general DCB parameters
410 * @hw: pointer to hardware structure 380 * @hw: pointer to hardware structure
411 * @dcb_config: pointer to ixgbe_dcb_config structure
412 * 381 *
413 * Configure general DCB parameters. 382 * Configure general DCB parameters.
414 */ 383 */
415s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw) 384static s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw)
416{ 385{
417 u32 reg; 386 u32 reg;
418 u32 q; 387 u32 q;
@@ -454,25 +423,39 @@ s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw)
454 reg &= ~IXGBE_RTTDCS_ARBDIS; 423 reg &= ~IXGBE_RTTDCS_ARBDIS;
455 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); 424 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
456 425
426 /* Enable Security TX Buffer IFG for DCB */
427 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
428 reg |= IXGBE_SECTX_DCB;
429 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
430
457 return 0; 431 return 0;
458} 432}
459 433
460/** 434/**
461 * ixgbe_dcb_hw_config_82599 - Configure and enable DCB 435 * ixgbe_dcb_hw_config_82599 - Configure and enable DCB
462 * @hw: pointer to hardware structure 436 * @hw: pointer to hardware structure
463 * @dcb_config: pointer to ixgbe_dcb_config structure 437 * @rx_pba: method to distribute packet buffer
438 * @refill: refill credits index by traffic class
439 * @max: max credits index by traffic class
440 * @bwg_id: bandwidth grouping indexed by traffic class
441 * @prio_type: priority type indexed by traffic class
442 * @pfc_en: enabled pfc bitmask
464 * 443 *
465 * Configure dcb settings and enable dcb mode. 444 * Configure dcb settings and enable dcb mode.
466 */ 445 */
467s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, 446s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw,
468 struct ixgbe_dcb_config *dcb_config) 447 u8 rx_pba, u8 pfc_en, u16 *refill,
448 u16 *max, u8 *bwg_id, u8 *prio_type, u8 *prio_tc)
469{ 449{
470 ixgbe_dcb_config_packet_buffers_82599(hw, dcb_config); 450 ixgbe_dcb_config_packet_buffers_82599(hw, rx_pba);
471 ixgbe_dcb_config_82599(hw); 451 ixgbe_dcb_config_82599(hw);
472 ixgbe_dcb_config_rx_arbiter_82599(hw, dcb_config); 452 ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
473 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, dcb_config); 453 prio_type, prio_tc);
474 ixgbe_dcb_config_tx_data_arbiter_82599(hw, dcb_config); 454 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
475 ixgbe_dcb_config_pfc_82599(hw, dcb_config); 455 bwg_id, prio_type);
456 ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max,
457 bwg_id, prio_type, prio_tc);
458 ixgbe_dcb_config_pfc_82599(hw, pfc_en);
476 ixgbe_dcb_config_tc_stats_82599(hw); 459 ixgbe_dcb_config_tc_stats_82599(hw);
477 460
478 return 0; 461 return 0;
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ixgbe/ixgbe_dcb_82599.h
index 0f3f791e1e1d..2de71a503153 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -92,36 +92,44 @@
92#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */ 92#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */
93#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */ 93#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */
94#define IXGBE_RXPBSIZE_128KB 0x00020000 /* 128KB Packet Buffer */ 94#define IXGBE_RXPBSIZE_128KB 0x00020000 /* 128KB Packet Buffer */
95#define IXGBE_TXPBSIZE_MAX 0x00028000 /* 160KB Packet Buffer*/
95 96
96#define IXGBE_TXPBTHRESH_DCB 0xA /* THRESH value for DCB mode */ 97#define IXGBE_TXPBTHRESH_DCB 0xA /* THRESH value for DCB mode */
98#define IXGBE_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */
99
100/* SECTXMINIFG DCB */
101#define IXGBE_SECTX_DCB 0x00001F00 /* DCB TX Buffer IFG */
97 102
98 103
99/* DCB hardware-specific driver APIs */ 104/* DCB hardware-specific driver APIs */
100 105
101/* DCB PFC functions */ 106/* DCB PFC functions */
102s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, 107s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en);
103 struct ixgbe_dcb_config *dcb_config); 108
104s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw, 109/* DCB hw initialization */
105 struct ixgbe_hw_stats *stats,
106 u8 tc_count);
107
108/* DCB traffic class stats */
109s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw);
110s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *hw,
111 struct ixgbe_hw_stats *stats,
112 u8 tc_count);
113
114/* DCB config arbiters */
115s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
116 struct ixgbe_dcb_config *dcb_config);
117s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
118 struct ixgbe_dcb_config *dcb_config);
119s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, 110s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
120 struct ixgbe_dcb_config *dcb_config); 111 u16 *refill,
112 u16 *max,
113 u8 *bwg_id,
114 u8 *prio_type,
115 u8 *prio_tc);
121 116
117s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
118 u16 *refill,
119 u16 *max,
120 u8 *bwg_id,
121 u8 *prio_type);
122
123s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
124 u16 *refill,
125 u16 *max,
126 u8 *bwg_id,
127 u8 *prio_type,
128 u8 *prio_tc);
122 129
123/* DCB hw initialization */
124s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, 130s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw,
125 struct ixgbe_dcb_config *config); 131 u8 rx_pba, u8 pfc_en, u16 *refill,
132 u16 *max, u8 *bwg_id, u8 *prio_type,
133 u8 *prio_tc);
126 134
127#endif /* _DCB_82599_CONFIG_H */ 135#endif /* _DCB_82599_CONFIG_H */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index b53b465e24af..5e7ed225851a 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -37,7 +37,6 @@
37#define BIT_PG_RX 0x04 37#define BIT_PG_RX 0x04
38#define BIT_PG_TX 0x08 38#define BIT_PG_TX 0x08
39#define BIT_APP_UPCHG 0x10 39#define BIT_APP_UPCHG 0x10
40#define BIT_RESETLINK 0x40
41#define BIT_LINKSPEED 0x80 40#define BIT_LINKSPEED 0x80
42 41
43/* Responses for the DCB_C_SET_ALL command */ 42/* Responses for the DCB_C_SET_ALL command */
@@ -130,16 +129,24 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
130 netdev->netdev_ops->ndo_stop(netdev); 129 netdev->netdev_ops->ndo_stop(netdev);
131 ixgbe_clear_interrupt_scheme(adapter); 130 ixgbe_clear_interrupt_scheme(adapter);
132 131
133 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 132 switch (adapter->hw.mac.type) {
133 case ixgbe_mac_82598EB:
134 adapter->last_lfc_mode = adapter->hw.fc.current_mode; 134 adapter->last_lfc_mode = adapter->hw.fc.current_mode;
135 adapter->hw.fc.requested_mode = ixgbe_fc_none; 135 adapter->hw.fc.requested_mode = ixgbe_fc_none;
136 } 136 break;
137 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; 137 case ixgbe_mac_82599EB:
138 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 138 case ixgbe_mac_X540:
139 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 139 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
140 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; 140 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
141 break;
142 default:
143 break;
141 } 144 }
145
142 adapter->flags |= IXGBE_FLAG_DCB_ENABLED; 146 adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
147 if (!netdev_get_num_tc(netdev))
148 ixgbe_setup_tc(netdev, MAX_TRAFFIC_CLASS);
149
143 ixgbe_init_interrupt_scheme(adapter); 150 ixgbe_init_interrupt_scheme(adapter);
144 if (netif_running(netdev)) 151 if (netif_running(netdev))
145 netdev->netdev_ops->ndo_open(netdev); 152 netdev->netdev_ops->ndo_open(netdev);
@@ -154,9 +161,16 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
154 adapter->temp_dcb_cfg.pfc_mode_enable = false; 161 adapter->temp_dcb_cfg.pfc_mode_enable = false;
155 adapter->dcb_cfg.pfc_mode_enable = false; 162 adapter->dcb_cfg.pfc_mode_enable = false;
156 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; 163 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
157 adapter->flags |= IXGBE_FLAG_RSS_ENABLED; 164 switch (adapter->hw.mac.type) {
158 if (adapter->hw.mac.type == ixgbe_mac_82599EB) 165 case ixgbe_mac_82599EB:
166 case ixgbe_mac_X540:
159 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; 167 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
168 break;
169 default:
170 break;
171 }
172
173 ixgbe_setup_tc(netdev, 0);
160 174
161 ixgbe_init_interrupt_scheme(adapter); 175 ixgbe_init_interrupt_scheme(adapter);
162 if (netif_running(netdev)) 176 if (netif_running(netdev))
@@ -178,9 +192,14 @@ static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
178 for (i = 0; i < netdev->addr_len; i++) 192 for (i = 0; i < netdev->addr_len; i++)
179 perm_addr[i] = adapter->hw.mac.perm_addr[i]; 193 perm_addr[i] = adapter->hw.mac.perm_addr[i];
180 194
181 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 195 switch (adapter->hw.mac.type) {
196 case ixgbe_mac_82599EB:
197 case ixgbe_mac_X540:
182 for (j = 0; j < netdev->addr_len; j++, i++) 198 for (j = 0; j < netdev->addr_len; j++, i++)
183 perm_addr[i] = adapter->hw.mac.san_addr[j]; 199 perm_addr[i] = adapter->hw.mac.san_addr[j];
200 break;
201 default:
202 break;
184 } 203 }
185} 204}
186 205
@@ -208,10 +227,8 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
208 (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent != 227 (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent !=
209 adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent) || 228 adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent) ||
210 (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap != 229 (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
211 adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap)) { 230 adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap))
212 adapter->dcb_set_bitmap |= BIT_PG_TX; 231 adapter->dcb_set_bitmap |= BIT_PG_TX;
213 adapter->dcb_set_bitmap |= BIT_RESETLINK;
214 }
215} 232}
216 233
217static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, 234static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
@@ -222,10 +239,8 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
222 adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct; 239 adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct;
223 240
224 if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] != 241 if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] !=
225 adapter->dcb_cfg.bw_percentage[0][bwg_id]) { 242 adapter->dcb_cfg.bw_percentage[0][bwg_id])
226 adapter->dcb_set_bitmap |= BIT_PG_TX; 243 adapter->dcb_set_bitmap |= BIT_PG_TX;
227 adapter->dcb_set_bitmap |= BIT_RESETLINK;
228 }
229} 244}
230 245
231static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc, 246static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
@@ -252,10 +267,8 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
252 (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent != 267 (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent !=
253 adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent) || 268 adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent) ||
254 (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap != 269 (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap !=
255 adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap)) { 270 adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap))
256 adapter->dcb_set_bitmap |= BIT_PG_RX; 271 adapter->dcb_set_bitmap |= BIT_PG_RX;
257 adapter->dcb_set_bitmap |= BIT_RESETLINK;
258 }
259} 272}
260 273
261static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, 274static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
@@ -266,10 +279,8 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
266 adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct; 279 adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct;
267 280
268 if (adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] != 281 if (adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] !=
269 adapter->dcb_cfg.bw_percentage[1][bwg_id]) { 282 adapter->dcb_cfg.bw_percentage[1][bwg_id])
270 adapter->dcb_set_bitmap |= BIT_PG_RX; 283 adapter->dcb_set_bitmap |= BIT_PG_RX;
271 adapter->dcb_set_bitmap |= BIT_RESETLINK;
272 }
273} 284}
274 285
275static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, 286static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
@@ -336,70 +347,120 @@ static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
336static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) 347static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
337{ 348{
338 struct ixgbe_adapter *adapter = netdev_priv(netdev); 349 struct ixgbe_adapter *adapter = netdev_priv(netdev);
350 struct dcb_app app = {
351 .selector = DCB_APP_IDTYPE_ETHTYPE,
352 .protocol = ETH_P_FCOE,
353 };
354 u8 up = dcb_getapp(netdev, &app);
339 int ret; 355 int ret;
340 356
341 if (!adapter->dcb_set_bitmap)
342 return DCB_NO_HW_CHG;
343
344 ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, 357 ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
345 adapter->ring_feature[RING_F_DCB].indices); 358 MAX_TRAFFIC_CLASS);
346
347 if (ret) 359 if (ret)
348 return DCB_NO_HW_CHG; 360 return DCB_NO_HW_CHG;
349 361
362 /* In IEEE mode app data must be parsed into DCBX format for
363 * hardware routines.
364 */
365 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)
366 up = (1 << up);
367
368#ifdef IXGBE_FCOE
369 if (up && (up != (1 << adapter->fcoe.up)))
370 adapter->dcb_set_bitmap |= BIT_APP_UPCHG;
371
350 /* 372 /*
351 * Only take down the adapter if the configuration change 373 * Only take down the adapter if an app change occurred. FCoE
352 * requires a reset. 374 * may shuffle tx rings in this case and this can not be done
375 * without a reset currently.
353 */ 376 */
354 if (adapter->dcb_set_bitmap & BIT_RESETLINK) { 377 if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
355 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) 378 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
356 msleep(1); 379 usleep_range(1000, 2000);
357 380
358 if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { 381 ixgbe_fcoe_setapp(adapter, up);
359 if (netif_running(netdev)) 382
360 netdev->netdev_ops->ndo_stop(netdev); 383 if (netif_running(netdev))
361 ixgbe_clear_interrupt_scheme(adapter); 384 netdev->netdev_ops->ndo_stop(netdev);
362 } else { 385 ixgbe_clear_interrupt_scheme(adapter);
363 if (netif_running(netdev))
364 ixgbe_down(adapter);
365 }
366 } 386 }
387#endif
367 388
368 if (adapter->dcb_cfg.pfc_mode_enable) { 389 if (adapter->dcb_cfg.pfc_mode_enable) {
369 if ((adapter->hw.mac.type != ixgbe_mac_82598EB) && 390 switch (adapter->hw.mac.type) {
370 (adapter->hw.fc.current_mode != ixgbe_fc_pfc)) 391 case ixgbe_mac_82599EB:
371 adapter->last_lfc_mode = adapter->hw.fc.current_mode; 392 case ixgbe_mac_X540:
393 if (adapter->hw.fc.current_mode != ixgbe_fc_pfc)
394 adapter->last_lfc_mode =
395 adapter->hw.fc.current_mode;
396 break;
397 default:
398 break;
399 }
372 adapter->hw.fc.requested_mode = ixgbe_fc_pfc; 400 adapter->hw.fc.requested_mode = ixgbe_fc_pfc;
373 } else { 401 } else {
374 if (adapter->hw.mac.type != ixgbe_mac_82598EB) 402 switch (adapter->hw.mac.type) {
375 adapter->hw.fc.requested_mode = adapter->last_lfc_mode; 403 case ixgbe_mac_82598EB:
376 else
377 adapter->hw.fc.requested_mode = ixgbe_fc_none; 404 adapter->hw.fc.requested_mode = ixgbe_fc_none;
405 break;
406 case ixgbe_mac_82599EB:
407 case ixgbe_mac_X540:
408 adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
409 break;
410 default:
411 break;
412 }
378 } 413 }
379 414
380 if (adapter->dcb_set_bitmap & BIT_RESETLINK) { 415#ifdef IXGBE_FCOE
381 if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { 416 if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
382 ixgbe_init_interrupt_scheme(adapter); 417 ixgbe_init_interrupt_scheme(adapter);
383 if (netif_running(netdev)) 418 if (netif_running(netdev))
384 netdev->netdev_ops->ndo_open(netdev); 419 netdev->netdev_ops->ndo_open(netdev);
385 } else {
386 if (netif_running(netdev))
387 ixgbe_up(adapter);
388 }
389 ret = DCB_HW_CHG_RST; 420 ret = DCB_HW_CHG_RST;
390 } else if (adapter->dcb_set_bitmap & BIT_PFC) { 421 }
391 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 422#endif
392 ixgbe_dcb_config_pfc_82598(&adapter->hw, 423
393 &adapter->dcb_cfg); 424 if (adapter->dcb_set_bitmap & BIT_PFC) {
394 else if (adapter->hw.mac.type == ixgbe_mac_82599EB) 425 u8 pfc_en;
395 ixgbe_dcb_config_pfc_82599(&adapter->hw, 426 ixgbe_dcb_unpack_pfc(&adapter->dcb_cfg, &pfc_en);
396 &adapter->dcb_cfg); 427 ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc_en);
397 ret = DCB_HW_CHG; 428 ret = DCB_HW_CHG;
398 } 429 }
430
431 if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) {
432 u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS];
433 u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS];
434 /* Priority to TC mapping in CEE case default to 1:1 */
435 u8 prio_tc[MAX_TRAFFIC_CLASS] = {0, 1, 2, 3, 4, 5, 6, 7};
436 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
437
438#ifdef CONFIG_FCOE
439 if (adapter->netdev->features & NETIF_F_FCOE_MTU)
440 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
441#endif
442
443 ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg,
444 max_frame, DCB_TX_CONFIG);
445 ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg,
446 max_frame, DCB_RX_CONFIG);
447
448 ixgbe_dcb_unpack_refill(&adapter->dcb_cfg,
449 DCB_TX_CONFIG, refill);
450 ixgbe_dcb_unpack_max(&adapter->dcb_cfg, max);
451 ixgbe_dcb_unpack_bwgid(&adapter->dcb_cfg,
452 DCB_TX_CONFIG, bwg_id);
453 ixgbe_dcb_unpack_prio(&adapter->dcb_cfg,
454 DCB_TX_CONFIG, prio_type);
455
456 ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max,
457 bwg_id, prio_type, prio_tc);
458 }
459
399 if (adapter->dcb_cfg.pfc_mode_enable) 460 if (adapter->dcb_cfg.pfc_mode_enable)
400 adapter->hw.fc.current_mode = ixgbe_fc_pfc; 461 adapter->hw.fc.current_mode = ixgbe_fc_pfc;
401 462
402 if (adapter->dcb_set_bitmap & BIT_RESETLINK) 463 if (adapter->dcb_set_bitmap & BIT_APP_UPCHG)
403 clear_bit(__IXGBE_RESETTING, &adapter->state); 464 clear_bit(__IXGBE_RESETTING, &adapter->state);
404 adapter->dcb_set_bitmap = 0x00; 465 adapter->dcb_set_bitmap = 0x00;
405 return ret; 466 return ret;
@@ -408,40 +469,38 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
408static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap) 469static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap)
409{ 470{
410 struct ixgbe_adapter *adapter = netdev_priv(netdev); 471 struct ixgbe_adapter *adapter = netdev_priv(netdev);
411 u8 rval = 0;
412 472
413 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 473 switch (capid) {
414 switch (capid) { 474 case DCB_CAP_ATTR_PG:
415 case DCB_CAP_ATTR_PG: 475 *cap = true;
416 *cap = true; 476 break;
417 break; 477 case DCB_CAP_ATTR_PFC:
418 case DCB_CAP_ATTR_PFC: 478 *cap = true;
419 *cap = true; 479 break;
420 break; 480 case DCB_CAP_ATTR_UP2TC:
421 case DCB_CAP_ATTR_UP2TC: 481 *cap = false;
422 *cap = false; 482 break;
423 break; 483 case DCB_CAP_ATTR_PG_TCS:
424 case DCB_CAP_ATTR_PG_TCS: 484 *cap = 0x80;
425 *cap = 0x80; 485 break;
426 break; 486 case DCB_CAP_ATTR_PFC_TCS:
427 case DCB_CAP_ATTR_PFC_TCS: 487 *cap = 0x80;
428 *cap = 0x80; 488 break;
429 break; 489 case DCB_CAP_ATTR_GSP:
430 case DCB_CAP_ATTR_GSP: 490 *cap = true;
431 *cap = true; 491 break;
432 break; 492 case DCB_CAP_ATTR_BCN:
433 case DCB_CAP_ATTR_BCN: 493 *cap = false;
434 *cap = false; 494 break;
435 break; 495 case DCB_CAP_ATTR_DCBX:
436 default: 496 *cap = adapter->dcbx_cap;
437 rval = -EINVAL; 497 break;
438 break; 498 default:
439 } 499 *cap = false;
440 } else { 500 break;
441 rval = -EINVAL;
442 } 501 }
443 502
444 return rval; 503 return 0;
445} 504}
446 505
447static u8 ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) 506static u8 ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
@@ -502,65 +561,205 @@ static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
502 */ 561 */
503static u8 ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) 562static u8 ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
504{ 563{
505 u8 rval = 0; 564 struct ixgbe_adapter *adapter = netdev_priv(netdev);
565 struct dcb_app app = {
566 .selector = idtype,
567 .protocol = id,
568 };
506 569
507 switch (idtype) { 570 if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
508 case DCB_APP_IDTYPE_ETHTYPE: 571 return 0;
509#ifdef IXGBE_FCOE 572
510 if (id == ETH_P_FCOE) 573 return dcb_getapp(netdev, &app);
511 rval = ixgbe_fcoe_getapp(netdev_priv(netdev)); 574}
512#endif 575
513 break; 576static int ixgbe_dcbnl_ieee_getets(struct net_device *dev,
514 case DCB_APP_IDTYPE_PORTNUM: 577 struct ieee_ets *ets)
515 break; 578{
516 default: 579 struct ixgbe_adapter *adapter = netdev_priv(dev);
517 break; 580 struct ieee_ets *my_ets = adapter->ixgbe_ieee_ets;
581
582 /* No IEEE PFC settings available */
583 if (!my_ets)
584 return -EINVAL;
585
586 ets->ets_cap = MAX_TRAFFIC_CLASS;
587 ets->cbs = my_ets->cbs;
588 memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
589 memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw));
590 memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
591 memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
592 return 0;
593}
594
595static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
596 struct ieee_ets *ets)
597{
598 struct ixgbe_adapter *adapter = netdev_priv(dev);
599 __u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS];
600 __u8 prio_type[IEEE_8021QAZ_MAX_TCS];
601 int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
602 int i, err;
603 __u64 *p = (__u64 *) ets->prio_tc;
604 /* naively give each TC a bwg to map onto CEE hardware */
605 __u8 bwg_id[IEEE_8021QAZ_MAX_TCS] = {0, 1, 2, 3, 4, 5, 6, 7};
606
607 if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
608 return -EINVAL;
609
610 if (!adapter->ixgbe_ieee_ets) {
611 adapter->ixgbe_ieee_ets = kmalloc(sizeof(struct ieee_ets),
612 GFP_KERNEL);
613 if (!adapter->ixgbe_ieee_ets)
614 return -ENOMEM;
518 } 615 }
519 return rval; 616
617 memcpy(adapter->ixgbe_ieee_ets, ets, sizeof(*adapter->ixgbe_ieee_ets));
618
619 /* Map TSA onto CEE prio type */
620 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
621 switch (ets->tc_tsa[i]) {
622 case IEEE_8021QAZ_TSA_STRICT:
623 prio_type[i] = 2;
624 break;
625 case IEEE_8021QAZ_TSA_ETS:
626 prio_type[i] = 0;
627 break;
628 default:
629 /* Hardware only supports priority strict or
630 * ETS transmission selection algorithms if
631 * we receive some other value from dcbnl
632 * throw an error
633 */
634 return -EINVAL;
635 }
636 }
637
638 if (*p)
639 ixgbe_dcbnl_set_state(dev, 1);
640 else
641 ixgbe_dcbnl_set_state(dev, 0);
642
643 ixgbe_ieee_credits(ets->tc_tx_bw, refill, max, max_frame);
644 err = ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max,
645 bwg_id, prio_type, ets->prio_tc);
646 return err;
520} 647}
521 648
522/** 649static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev,
523 * ixgbe_dcbnl_setapp - set the DCBX application user priority 650 struct ieee_pfc *pfc)
524 * @netdev : the corresponding netdev 651{
525 * @idtype : identifies the id as ether type or TCP/UDP port number 652 struct ixgbe_adapter *adapter = netdev_priv(dev);
526 * @id: id is either ether type or TCP/UDP port number 653 struct ieee_pfc *my_pfc = adapter->ixgbe_ieee_pfc;
527 * @up: the 802.1p user priority bitmap 654 int i;
528 * 655
529 * Returns : 0 on success or 1 on error 656 /* No IEEE PFC settings available */
530 */ 657 if (!my_pfc)
531static u8 ixgbe_dcbnl_setapp(struct net_device *netdev, 658 return -EINVAL;
532 u8 idtype, u16 id, u8 up) 659
660 pfc->pfc_cap = MAX_TRAFFIC_CLASS;
661 pfc->pfc_en = my_pfc->pfc_en;
662 pfc->mbc = my_pfc->mbc;
663 pfc->delay = my_pfc->delay;
664
665 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
666 pfc->requests[i] = adapter->stats.pxoffrxc[i];
667 pfc->indications[i] = adapter->stats.pxofftxc[i];
668 }
669
670 return 0;
671}
672
673static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
674 struct ieee_pfc *pfc)
675{
676 struct ixgbe_adapter *adapter = netdev_priv(dev);
677 int err;
678
679 if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
680 return -EINVAL;
681
682 if (!adapter->ixgbe_ieee_pfc) {
683 adapter->ixgbe_ieee_pfc = kmalloc(sizeof(struct ieee_pfc),
684 GFP_KERNEL);
685 if (!adapter->ixgbe_ieee_pfc)
686 return -ENOMEM;
687 }
688
689 memcpy(adapter->ixgbe_ieee_pfc, pfc, sizeof(*adapter->ixgbe_ieee_pfc));
690 err = ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en);
691 return err;
692}
693
694static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
695 struct dcb_app *app)
533{ 696{
534 u8 rval = 1; 697 struct ixgbe_adapter *adapter = netdev_priv(dev);
698
699 if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
700 return -EINVAL;
701
702 dcb_setapp(dev, app);
535 703
536 switch (idtype) {
537 case DCB_APP_IDTYPE_ETHTYPE:
538#ifdef IXGBE_FCOE 704#ifdef IXGBE_FCOE
539 if (id == ETH_P_FCOE) { 705 if (app->selector == 1 && app->protocol == ETH_P_FCOE &&
540 u8 tc; 706 adapter->fcoe.tc == app->priority)
541 struct ixgbe_adapter *adapter; 707 ixgbe_dcbnl_set_all(dev);
542
543 adapter = netdev_priv(netdev);
544 tc = adapter->fcoe.tc;
545 rval = ixgbe_fcoe_setapp(adapter, up);
546 if ((!rval) && (tc != adapter->fcoe.tc) &&
547 (adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
548 (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
549 adapter->dcb_set_bitmap |= BIT_APP_UPCHG;
550 adapter->dcb_set_bitmap |= BIT_RESETLINK;
551 }
552 }
553#endif 708#endif
554 break; 709 return 0;
555 case DCB_APP_IDTYPE_PORTNUM: 710}
556 break; 711
557 default: 712static u8 ixgbe_dcbnl_getdcbx(struct net_device *dev)
558 break; 713{
714 struct ixgbe_adapter *adapter = netdev_priv(dev);
715 return adapter->dcbx_cap;
716}
717
718static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode)
719{
720 struct ixgbe_adapter *adapter = netdev_priv(dev);
721 struct ieee_ets ets = {0};
722 struct ieee_pfc pfc = {0};
723
724 /* no support for LLD_MANAGED modes or CEE+IEEE */
725 if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
726 ((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) ||
727 !(mode & DCB_CAP_DCBX_HOST))
728 return 1;
729
730 if (mode == adapter->dcbx_cap)
731 return 0;
732
733 adapter->dcbx_cap = mode;
734
735 /* ETS and PFC defaults */
736 ets.ets_cap = 8;
737 pfc.pfc_cap = 8;
738
739 if (mode & DCB_CAP_DCBX_VER_IEEE) {
740 ixgbe_dcbnl_ieee_setets(dev, &ets);
741 ixgbe_dcbnl_ieee_setpfc(dev, &pfc);
742 } else if (mode & DCB_CAP_DCBX_VER_CEE) {
743 adapter->dcb_set_bitmap |= (BIT_PFC & BIT_PG_TX & BIT_PG_RX);
744 ixgbe_dcbnl_set_all(dev);
745 } else {
746 /* Drop into single TC mode strict priority as this
747 * indicates CEE and IEEE versions are disabled
748 */
749 ixgbe_dcbnl_ieee_setets(dev, &ets);
750 ixgbe_dcbnl_ieee_setpfc(dev, &pfc);
751 ixgbe_dcbnl_set_state(dev, 0);
559 } 752 }
560 return rval; 753
754 return 0;
561} 755}
562 756
563const struct dcbnl_rtnl_ops dcbnl_ops = { 757const struct dcbnl_rtnl_ops dcbnl_ops = {
758 .ieee_getets = ixgbe_dcbnl_ieee_getets,
759 .ieee_setets = ixgbe_dcbnl_ieee_setets,
760 .ieee_getpfc = ixgbe_dcbnl_ieee_getpfc,
761 .ieee_setpfc = ixgbe_dcbnl_ieee_setpfc,
762 .ieee_setapp = ixgbe_dcbnl_ieee_setapp,
564 .getstate = ixgbe_dcbnl_get_state, 763 .getstate = ixgbe_dcbnl_get_state,
565 .setstate = ixgbe_dcbnl_set_state, 764 .setstate = ixgbe_dcbnl_set_state,
566 .getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr, 765 .getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr,
@@ -581,6 +780,6 @@ const struct dcbnl_rtnl_ops dcbnl_ops = {
581 .getpfcstate = ixgbe_dcbnl_getpfcstate, 780 .getpfcstate = ixgbe_dcbnl_getpfcstate,
582 .setpfcstate = ixgbe_dcbnl_setpfcstate, 781 .setpfcstate = ixgbe_dcbnl_setpfcstate,
583 .getapp = ixgbe_dcbnl_getapp, 782 .getapp = ixgbe_dcbnl_getapp,
584 .setapp = ixgbe_dcbnl_setapp, 783 .getdcbx = ixgbe_dcbnl_getdcbx,
784 .setdcbx = ixgbe_dcbnl_setdcbx,
585}; 785};
586
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index dcebc82c6f4d..cb1555bc8548 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -84,6 +84,7 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
84 {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)}, 84 {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)},
85 {"fdir_match", IXGBE_STAT(stats.fdirmatch)}, 85 {"fdir_match", IXGBE_STAT(stats.fdirmatch)},
86 {"fdir_miss", IXGBE_STAT(stats.fdirmiss)}, 86 {"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
87 {"fdir_overflow", IXGBE_STAT(fdir_overflow)},
87 {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)}, 88 {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)},
88 {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)}, 89 {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)},
89 {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)}, 90 {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)},
@@ -102,6 +103,10 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
102 {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)}, 103 {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
103 {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)}, 104 {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
104 {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)}, 105 {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
106 {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)},
107 {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)},
108 {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)},
109 {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)},
105#ifdef IXGBE_FCOE 110#ifdef IXGBE_FCOE
106 {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)}, 111 {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)},
107 {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)}, 112 {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
@@ -152,20 +157,35 @@ static int ixgbe_get_settings(struct net_device *netdev,
152 ecmd->supported |= (SUPPORTED_1000baseT_Full | 157 ecmd->supported |= (SUPPORTED_1000baseT_Full |
153 SUPPORTED_Autoneg); 158 SUPPORTED_Autoneg);
154 159
160 switch (hw->mac.type) {
161 case ixgbe_mac_X540:
162 ecmd->supported |= SUPPORTED_100baseT_Full;
163 break;
164 default:
165 break;
166 }
167
155 ecmd->advertising = ADVERTISED_Autoneg; 168 ecmd->advertising = ADVERTISED_Autoneg;
156 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) 169 if (hw->phy.autoneg_advertised) {
157 ecmd->advertising |= ADVERTISED_10000baseT_Full; 170 if (hw->phy.autoneg_advertised &
158 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) 171 IXGBE_LINK_SPEED_100_FULL)
159 ecmd->advertising |= ADVERTISED_1000baseT_Full; 172 ecmd->advertising |= ADVERTISED_100baseT_Full;
160 /* 173 if (hw->phy.autoneg_advertised &
161 * It's possible that phy.autoneg_advertised may not be 174 IXGBE_LINK_SPEED_10GB_FULL)
162 * set yet. If so display what the default would be - 175 ecmd->advertising |= ADVERTISED_10000baseT_Full;
163 * both 1G and 10G supported. 176 if (hw->phy.autoneg_advertised &
164 */ 177 IXGBE_LINK_SPEED_1GB_FULL)
165 if (!(ecmd->advertising & (ADVERTISED_1000baseT_Full | 178 ecmd->advertising |= ADVERTISED_1000baseT_Full;
166 ADVERTISED_10000baseT_Full))) 179 } else {
180 /*
181 * Default advertised modes in case
182 * phy.autoneg_advertised isn't set.
183 */
167 ecmd->advertising |= (ADVERTISED_10000baseT_Full | 184 ecmd->advertising |= (ADVERTISED_10000baseT_Full |
168 ADVERTISED_1000baseT_Full); 185 ADVERTISED_1000baseT_Full);
186 if (hw->mac.type == ixgbe_mac_X540)
187 ecmd->advertising |= ADVERTISED_100baseT_Full;
188 }
169 189
170 if (hw->phy.media_type == ixgbe_media_type_copper) { 190 if (hw->phy.media_type == ixgbe_media_type_copper) {
171 ecmd->supported |= SUPPORTED_TP; 191 ecmd->supported |= SUPPORTED_TP;
@@ -185,6 +205,16 @@ static int ixgbe_get_settings(struct net_device *netdev,
185 ADVERTISED_FIBRE); 205 ADVERTISED_FIBRE);
186 ecmd->port = PORT_FIBRE; 206 ecmd->port = PORT_FIBRE;
187 ecmd->autoneg = AUTONEG_DISABLE; 207 ecmd->autoneg = AUTONEG_DISABLE;
208 } else if ((hw->device_id == IXGBE_DEV_ID_82599_COMBO_BACKPLANE) ||
209 (hw->device_id == IXGBE_DEV_ID_82599_KX4_MEZZ)) {
210 ecmd->supported |= (SUPPORTED_1000baseT_Full |
211 SUPPORTED_Autoneg |
212 SUPPORTED_FIBRE);
213 ecmd->advertising = (ADVERTISED_10000baseT_Full |
214 ADVERTISED_1000baseT_Full |
215 ADVERTISED_Autoneg |
216 ADVERTISED_FIBRE);
217 ecmd->port = PORT_FIBRE;
188 } else { 218 } else {
189 ecmd->supported |= (SUPPORTED_1000baseT_Full | 219 ecmd->supported |= (SUPPORTED_1000baseT_Full |
190 SUPPORTED_FIBRE); 220 SUPPORTED_FIBRE);
@@ -204,6 +234,7 @@ static int ixgbe_get_settings(struct net_device *netdev,
204 /* Get PHY type */ 234 /* Get PHY type */
205 switch (adapter->hw.phy.type) { 235 switch (adapter->hw.phy.type) {
206 case ixgbe_phy_tn: 236 case ixgbe_phy_tn:
237 case ixgbe_phy_aq:
207 case ixgbe_phy_cu_unknown: 238 case ixgbe_phy_cu_unknown:
208 /* Copper 10G-BASET */ 239 /* Copper 10G-BASET */
209 ecmd->port = PORT_TP; 240 ecmd->port = PORT_TP;
@@ -260,11 +291,22 @@ static int ixgbe_get_settings(struct net_device *netdev,
260 291
261 hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 292 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
262 if (link_up) { 293 if (link_up) {
263 ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? 294 switch (link_speed) {
264 SPEED_10000 : SPEED_1000; 295 case IXGBE_LINK_SPEED_10GB_FULL:
296 ethtool_cmd_speed_set(ecmd, SPEED_10000);
297 break;
298 case IXGBE_LINK_SPEED_1GB_FULL:
299 ethtool_cmd_speed_set(ecmd, SPEED_1000);
300 break;
301 case IXGBE_LINK_SPEED_100_FULL:
302 ethtool_cmd_speed_set(ecmd, SPEED_100);
303 break;
304 default:
305 break;
306 }
265 ecmd->duplex = DUPLEX_FULL; 307 ecmd->duplex = DUPLEX_FULL;
266 } else { 308 } else {
267 ecmd->speed = -1; 309 ethtool_cmd_speed_set(ecmd, -1);
268 ecmd->duplex = -1; 310 ecmd->duplex = -1;
269 } 311 }
270 312
@@ -295,6 +337,9 @@ static int ixgbe_set_settings(struct net_device *netdev,
295 if (ecmd->advertising & ADVERTISED_1000baseT_Full) 337 if (ecmd->advertising & ADVERTISED_1000baseT_Full)
296 advertised |= IXGBE_LINK_SPEED_1GB_FULL; 338 advertised |= IXGBE_LINK_SPEED_1GB_FULL;
297 339
340 if (ecmd->advertising & ADVERTISED_100baseT_Full)
341 advertised |= IXGBE_LINK_SPEED_100_FULL;
342
298 if (old == advertised) 343 if (old == advertised)
299 return err; 344 return err;
300 /* this sets the link speed and restarts auto-neg */ 345 /* this sets the link speed and restarts auto-neg */
@@ -306,9 +351,10 @@ static int ixgbe_set_settings(struct net_device *netdev,
306 } 351 }
307 } else { 352 } else {
308 /* in this case we currently only support 10Gb/FULL */ 353 /* in this case we currently only support 10Gb/FULL */
354 u32 speed = ethtool_cmd_speed(ecmd);
309 if ((ecmd->autoneg == AUTONEG_ENABLE) || 355 if ((ecmd->autoneg == AUTONEG_ENABLE) ||
310 (ecmd->advertising != ADVERTISED_10000baseT_Full) || 356 (ecmd->advertising != ADVERTISED_10000baseT_Full) ||
311 (ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)) 357 (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
312 return -EINVAL; 358 return -EINVAL;
313 } 359 }
314 360
@@ -332,13 +378,6 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
332 else 378 else
333 pause->autoneg = 1; 379 pause->autoneg = 1;
334 380
335#ifdef CONFIG_DCB
336 if (hw->fc.current_mode == ixgbe_fc_pfc) {
337 pause->rx_pause = 0;
338 pause->tx_pause = 0;
339 }
340
341#endif
342 if (hw->fc.current_mode == ixgbe_fc_rx_pause) { 381 if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
343 pause->rx_pause = 1; 382 pause->rx_pause = 1;
344 } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) { 383 } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
@@ -346,6 +385,11 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
346 } else if (hw->fc.current_mode == ixgbe_fc_full) { 385 } else if (hw->fc.current_mode == ixgbe_fc_full) {
347 pause->rx_pause = 1; 386 pause->rx_pause = 1;
348 pause->tx_pause = 1; 387 pause->tx_pause = 1;
388#ifdef CONFIG_DCB
389 } else if (hw->fc.current_mode == ixgbe_fc_pfc) {
390 pause->rx_pause = 0;
391 pause->tx_pause = 0;
392#endif
349 } 393 }
350} 394}
351 395
@@ -363,7 +407,6 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
363 return -EINVAL; 407 return -EINVAL;
364 408
365#endif 409#endif
366
367 fc = hw->fc; 410 fc = hw->fc;
368 411
369 if (pause->autoneg != AUTONEG_ENABLE) 412 if (pause->autoneg != AUTONEG_ENABLE)
@@ -401,7 +444,7 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
401static u32 ixgbe_get_rx_csum(struct net_device *netdev) 444static u32 ixgbe_get_rx_csum(struct net_device *netdev)
402{ 445{
403 struct ixgbe_adapter *adapter = netdev_priv(netdev); 446 struct ixgbe_adapter *adapter = netdev_priv(netdev);
404 return (adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED); 447 return adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED;
405} 448}
406 449
407static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data) 450static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data)
@@ -412,11 +455,6 @@ static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data)
412 else 455 else
413 adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED; 456 adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
414 457
415 if (netif_running(netdev))
416 ixgbe_reinit_locked(adapter);
417 else
418 ixgbe_reset(adapter);
419
420 return 0; 458 return 0;
421} 459}
422 460
@@ -428,16 +466,21 @@ static u32 ixgbe_get_tx_csum(struct net_device *netdev)
428static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data) 466static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data)
429{ 467{
430 struct ixgbe_adapter *adapter = netdev_priv(netdev); 468 struct ixgbe_adapter *adapter = netdev_priv(netdev);
469 u32 feature_list;
431 470
432 if (data) { 471 feature_list = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
433 netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); 472 switch (adapter->hw.mac.type) {
434 if (adapter->hw.mac.type == ixgbe_mac_82599EB) 473 case ixgbe_mac_82599EB:
435 netdev->features |= NETIF_F_SCTP_CSUM; 474 case ixgbe_mac_X540:
436 } else { 475 feature_list |= NETIF_F_SCTP_CSUM;
437 netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); 476 break;
438 if (adapter->hw.mac.type == ixgbe_mac_82599EB) 477 default:
439 netdev->features &= ~NETIF_F_SCTP_CSUM; 478 break;
440 } 479 }
480 if (data)
481 netdev->features |= feature_list;
482 else
483 netdev->features &= ~feature_list;
441 484
442 return 0; 485 return 0;
443} 486}
@@ -530,10 +573,20 @@ static void ixgbe_get_regs(struct net_device *netdev,
530 regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1)); 573 regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1));
531 regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2)); 574 regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2));
532 regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3)); 575 regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3));
533 for (i = 0; i < 8; i++) 576 for (i = 0; i < 8; i++) {
534 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i)); 577 switch (hw->mac.type) {
535 for (i = 0; i < 8; i++) 578 case ixgbe_mac_82598EB:
536 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i)); 579 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
580 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
581 break;
582 case ixgbe_mac_82599EB:
583 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
584 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
585 break;
586 default:
587 break;
588 }
589 }
537 regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV); 590 regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
538 regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS); 591 regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
539 592
@@ -615,6 +668,7 @@ static void ixgbe_get_regs(struct net_device *netdev,
615 regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM); 668 regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
616 regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0)); 669 regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
617 670
671 /* DCB */
618 regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); 672 regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
619 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS); 673 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
620 regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); 674 regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
@@ -798,11 +852,8 @@ static int ixgbe_get_eeprom(struct net_device *netdev,
798 if (!eeprom_buff) 852 if (!eeprom_buff)
799 return -ENOMEM; 853 return -ENOMEM;
800 854
801 for (i = 0; i < eeprom_len; i++) { 855 ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len,
802 if ((ret_val = hw->eeprom.ops.read(hw, first_word + i, 856 eeprom_buff);
803 &eeprom_buff[i])))
804 break;
805 }
806 857
807 /* Device's eeprom is always little-endian, word addressable */ 858 /* Device's eeprom is always little-endian, word addressable */
808 for (i = 0; i < eeprom_len; i++) 859 for (i = 0; i < eeprom_len; i++)
@@ -820,16 +871,20 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
820 struct ixgbe_adapter *adapter = netdev_priv(netdev); 871 struct ixgbe_adapter *adapter = netdev_priv(netdev);
821 char firmware_version[32]; 872 char firmware_version[32];
822 873
823 strncpy(drvinfo->driver, ixgbe_driver_name, 32); 874 strncpy(drvinfo->driver, ixgbe_driver_name,
824 strncpy(drvinfo->version, ixgbe_driver_version, 32); 875 sizeof(drvinfo->driver) - 1);
876 strncpy(drvinfo->version, ixgbe_driver_version,
877 sizeof(drvinfo->version) - 1);
825 878
826 sprintf(firmware_version, "%d.%d-%d", 879 snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d",
827 (adapter->eeprom_version & 0xF000) >> 12, 880 (adapter->eeprom_version & 0xF000) >> 12,
828 (adapter->eeprom_version & 0x0FF0) >> 4, 881 (adapter->eeprom_version & 0x0FF0) >> 4,
829 adapter->eeprom_version & 0x000F); 882 adapter->eeprom_version & 0x000F);
830 883
831 strncpy(drvinfo->fw_version, firmware_version, 32); 884 strncpy(drvinfo->fw_version, firmware_version,
832 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); 885 sizeof(drvinfo->fw_version));
886 strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
887 sizeof(drvinfo->bus_info));
833 drvinfo->n_stats = IXGBE_STATS_LEN; 888 drvinfo->n_stats = IXGBE_STATS_LEN;
834 drvinfo->testinfo_len = IXGBE_TEST_LEN; 889 drvinfo->testinfo_len = IXGBE_TEST_LEN;
835 drvinfo->regdump_len = ixgbe_get_regs_len(netdev); 890 drvinfo->regdump_len = ixgbe_get_regs_len(netdev);
@@ -879,7 +934,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
879 } 934 }
880 935
881 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) 936 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
882 msleep(1); 937 usleep_range(1000, 2000);
883 938
884 if (!netif_running(adapter->netdev)) { 939 if (!netif_running(adapter->netdev)) {
885 for (i = 0; i < adapter->num_tx_queues; i++) 940 for (i = 0; i < adapter->num_tx_queues; i++)
@@ -902,13 +957,11 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
902 memcpy(&temp_tx_ring[i], adapter->tx_ring[i], 957 memcpy(&temp_tx_ring[i], adapter->tx_ring[i],
903 sizeof(struct ixgbe_ring)); 958 sizeof(struct ixgbe_ring));
904 temp_tx_ring[i].count = new_tx_count; 959 temp_tx_ring[i].count = new_tx_count;
905 err = ixgbe_setup_tx_resources(adapter, 960 err = ixgbe_setup_tx_resources(&temp_tx_ring[i]);
906 &temp_tx_ring[i]);
907 if (err) { 961 if (err) {
908 while (i) { 962 while (i) {
909 i--; 963 i--;
910 ixgbe_free_tx_resources(adapter, 964 ixgbe_free_tx_resources(&temp_tx_ring[i]);
911 &temp_tx_ring[i]);
912 } 965 }
913 goto clear_reset; 966 goto clear_reset;
914 } 967 }
@@ -927,13 +980,11 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
927 memcpy(&temp_rx_ring[i], adapter->rx_ring[i], 980 memcpy(&temp_rx_ring[i], adapter->rx_ring[i],
928 sizeof(struct ixgbe_ring)); 981 sizeof(struct ixgbe_ring));
929 temp_rx_ring[i].count = new_rx_count; 982 temp_rx_ring[i].count = new_rx_count;
930 err = ixgbe_setup_rx_resources(adapter, 983 err = ixgbe_setup_rx_resources(&temp_rx_ring[i]);
931 &temp_rx_ring[i]);
932 if (err) { 984 if (err) {
933 while (i) { 985 while (i) {
934 i--; 986 i--;
935 ixgbe_free_rx_resources(adapter, 987 ixgbe_free_rx_resources(&temp_rx_ring[i]);
936 &temp_rx_ring[i]);
937 } 988 }
938 goto err_setup; 989 goto err_setup;
939 } 990 }
@@ -948,8 +999,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
948 /* tx */ 999 /* tx */
949 if (new_tx_count != adapter->tx_ring_count) { 1000 if (new_tx_count != adapter->tx_ring_count) {
950 for (i = 0; i < adapter->num_tx_queues; i++) { 1001 for (i = 0; i < adapter->num_tx_queues; i++) {
951 ixgbe_free_tx_resources(adapter, 1002 ixgbe_free_tx_resources(adapter->tx_ring[i]);
952 adapter->tx_ring[i]);
953 memcpy(adapter->tx_ring[i], &temp_tx_ring[i], 1003 memcpy(adapter->tx_ring[i], &temp_tx_ring[i],
954 sizeof(struct ixgbe_ring)); 1004 sizeof(struct ixgbe_ring));
955 } 1005 }
@@ -959,8 +1009,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
959 /* rx */ 1009 /* rx */
960 if (new_rx_count != adapter->rx_ring_count) { 1010 if (new_rx_count != adapter->rx_ring_count) {
961 for (i = 0; i < adapter->num_rx_queues; i++) { 1011 for (i = 0; i < adapter->num_rx_queues; i++) {
962 ixgbe_free_rx_resources(adapter, 1012 ixgbe_free_rx_resources(adapter->rx_ring[i]);
963 adapter->rx_ring[i]);
964 memcpy(adapter->rx_ring[i], &temp_rx_ring[i], 1013 memcpy(adapter->rx_ring[i], &temp_rx_ring[i],
965 sizeof(struct ixgbe_ring)); 1014 sizeof(struct ixgbe_ring));
966 } 1015 }
@@ -984,9 +1033,6 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
984 return IXGBE_TEST_LEN; 1033 return IXGBE_TEST_LEN;
985 case ETH_SS_STATS: 1034 case ETH_SS_STATS:
986 return IXGBE_STATS_LEN; 1035 return IXGBE_STATS_LEN;
987 case ETH_SS_NTUPLE_FILTERS:
988 return (ETHTOOL_MAX_NTUPLE_LIST_ENTRY *
989 ETHTOOL_MAX_NTUPLE_STRING_PER_ENTRY);
990 default: 1036 default:
991 return -EOPNOTSUPP; 1037 return -EOPNOTSUPP;
992 } 1038 }
@@ -996,12 +1042,11 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
996 struct ethtool_stats *stats, u64 *data) 1042 struct ethtool_stats *stats, u64 *data)
997{ 1043{
998 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1044 struct ixgbe_adapter *adapter = netdev_priv(netdev);
999 u64 *queue_stat;
1000 int stat_count = sizeof(struct ixgbe_queue_stats) / sizeof(u64);
1001 struct rtnl_link_stats64 temp; 1045 struct rtnl_link_stats64 temp;
1002 const struct rtnl_link_stats64 *net_stats; 1046 const struct rtnl_link_stats64 *net_stats;
1003 int j, k; 1047 unsigned int start;
1004 int i; 1048 struct ixgbe_ring *ring;
1049 int i, j;
1005 char *p = NULL; 1050 char *p = NULL;
1006 1051
1007 ixgbe_update_stats(adapter); 1052 ixgbe_update_stats(adapter);
@@ -1022,16 +1067,22 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1022 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1067 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1023 } 1068 }
1024 for (j = 0; j < adapter->num_tx_queues; j++) { 1069 for (j = 0; j < adapter->num_tx_queues; j++) {
1025 queue_stat = (u64 *)&adapter->tx_ring[j]->stats; 1070 ring = adapter->tx_ring[j];
1026 for (k = 0; k < stat_count; k++) 1071 do {
1027 data[i + k] = queue_stat[k]; 1072 start = u64_stats_fetch_begin_bh(&ring->syncp);
1028 i += k; 1073 data[i] = ring->stats.packets;
1074 data[i+1] = ring->stats.bytes;
1075 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
1076 i += 2;
1029 } 1077 }
1030 for (j = 0; j < adapter->num_rx_queues; j++) { 1078 for (j = 0; j < adapter->num_rx_queues; j++) {
1031 queue_stat = (u64 *)&adapter->rx_ring[j]->stats; 1079 ring = adapter->rx_ring[j];
1032 for (k = 0; k < stat_count; k++) 1080 do {
1033 data[i + k] = queue_stat[k]; 1081 start = u64_stats_fetch_begin_bh(&ring->syncp);
1034 i += k; 1082 data[i] = ring->stats.packets;
1083 data[i+1] = ring->stats.bytes;
1084 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
1085 i += 2;
1035 } 1086 }
1036 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 1087 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
1037 for (j = 0; j < MAX_TX_PACKET_BUFFERS; j++) { 1088 for (j = 0; j < MAX_TX_PACKET_BUFFERS; j++) {
@@ -1136,7 +1187,7 @@ struct ixgbe_reg_test {
1136#define TABLE64_TEST_HI 6 1187#define TABLE64_TEST_HI 6
1137 1188
1138/* default 82599 register test */ 1189/* default 82599 register test */
1139static struct ixgbe_reg_test reg_test_82599[] = { 1190static const struct ixgbe_reg_test reg_test_82599[] = {
1140 { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1191 { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1141 { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1192 { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1142 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1193 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
@@ -1160,7 +1211,7 @@ static struct ixgbe_reg_test reg_test_82599[] = {
1160}; 1211};
1161 1212
1162/* default 82598 register test */ 1213/* default 82598 register test */
1163static struct ixgbe_reg_test reg_test_82598[] = { 1214static const struct ixgbe_reg_test reg_test_82598[] = {
1164 { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1215 { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1165 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1216 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1166 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1217 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
@@ -1187,54 +1238,82 @@ static struct ixgbe_reg_test reg_test_82598[] = {
1187 { 0, 0, 0, 0 } 1238 { 0, 0, 0, 0 }
1188}; 1239};
1189 1240
1190#define REG_PATTERN_TEST(R, M, W) \ 1241static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
1191{ \ 1242 u32 mask, u32 write)
1192 u32 pat, val, before; \ 1243{
1193 const u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \ 1244 u32 pat, val, before;
1194 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { \ 1245 static const u32 test_pattern[] = {
1195 before = readl(adapter->hw.hw_addr + R); \ 1246 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
1196 writel((_test[pat] & W), (adapter->hw.hw_addr + R)); \ 1247
1197 val = readl(adapter->hw.hw_addr + R); \ 1248 for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
1198 if (val != (_test[pat] & W & M)) { \ 1249 before = readl(adapter->hw.hw_addr + reg);
1199 e_err(drv, "pattern test reg %04X failed: got " \ 1250 writel((test_pattern[pat] & write),
1200 "0x%08X expected 0x%08X\n", \ 1251 (adapter->hw.hw_addr + reg));
1201 R, val, (_test[pat] & W & M)); \ 1252 val = readl(adapter->hw.hw_addr + reg);
1202 *data = R; \ 1253 if (val != (test_pattern[pat] & write & mask)) {
1203 writel(before, adapter->hw.hw_addr + R); \ 1254 e_err(drv, "pattern test reg %04X failed: got "
1204 return 1; \ 1255 "0x%08X expected 0x%08X\n",
1205 } \ 1256 reg, val, (test_pattern[pat] & write & mask));
1206 writel(before, adapter->hw.hw_addr + R); \ 1257 *data = reg;
1207 } \ 1258 writel(before, adapter->hw.hw_addr + reg);
1259 return 1;
1260 }
1261 writel(before, adapter->hw.hw_addr + reg);
1262 }
1263 return 0;
1208} 1264}
1209 1265
1210#define REG_SET_AND_CHECK(R, M, W) \ 1266static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
1211{ \ 1267 u32 mask, u32 write)
1212 u32 val, before; \ 1268{
1213 before = readl(adapter->hw.hw_addr + R); \ 1269 u32 val, before;
1214 writel((W & M), (adapter->hw.hw_addr + R)); \ 1270 before = readl(adapter->hw.hw_addr + reg);
1215 val = readl(adapter->hw.hw_addr + R); \ 1271 writel((write & mask), (adapter->hw.hw_addr + reg));
1216 if ((W & M) != (val & M)) { \ 1272 val = readl(adapter->hw.hw_addr + reg);
1217 e_err(drv, "set/check reg %04X test failed: got 0x%08X " \ 1273 if ((write & mask) != (val & mask)) {
1218 "expected 0x%08X\n", R, (val & M), (W & M)); \ 1274 e_err(drv, "set/check reg %04X test failed: got 0x%08X "
1219 *data = R; \ 1275 "expected 0x%08X\n", reg, (val & mask), (write & mask));
1220 writel(before, (adapter->hw.hw_addr + R)); \ 1276 *data = reg;
1221 return 1; \ 1277 writel(before, (adapter->hw.hw_addr + reg));
1222 } \ 1278 return 1;
1223 writel(before, (adapter->hw.hw_addr + R)); \ 1279 }
1280 writel(before, (adapter->hw.hw_addr + reg));
1281 return 0;
1224} 1282}
1225 1283
1284#define REG_PATTERN_TEST(reg, mask, write) \
1285 do { \
1286 if (reg_pattern_test(adapter, data, reg, mask, write)) \
1287 return 1; \
1288 } while (0) \
1289
1290
1291#define REG_SET_AND_CHECK(reg, mask, write) \
1292 do { \
1293 if (reg_set_and_check(adapter, data, reg, mask, write)) \
1294 return 1; \
1295 } while (0) \
1296
1226static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) 1297static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1227{ 1298{
1228 struct ixgbe_reg_test *test; 1299 const struct ixgbe_reg_test *test;
1229 u32 value, before, after; 1300 u32 value, before, after;
1230 u32 i, toggle; 1301 u32 i, toggle;
1231 1302
1232 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 1303 switch (adapter->hw.mac.type) {
1233 toggle = 0x7FFFF30F; 1304 case ixgbe_mac_82598EB:
1234 test = reg_test_82599;
1235 } else {
1236 toggle = 0x7FFFF3FF; 1305 toggle = 0x7FFFF3FF;
1237 test = reg_test_82598; 1306 test = reg_test_82598;
1307 break;
1308 case ixgbe_mac_82599EB:
1309 case ixgbe_mac_X540:
1310 toggle = 0x7FFFF30F;
1311 test = reg_test_82599;
1312 break;
1313 default:
1314 *data = 1;
1315 return 1;
1316 break;
1238 } 1317 }
1239 1318
1240 /* 1319 /*
@@ -1265,13 +1344,13 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1265 switch (test->test_type) { 1344 switch (test->test_type) {
1266 case PATTERN_TEST: 1345 case PATTERN_TEST:
1267 REG_PATTERN_TEST(test->reg + (i * 0x40), 1346 REG_PATTERN_TEST(test->reg + (i * 0x40),
1268 test->mask, 1347 test->mask,
1269 test->write); 1348 test->write);
1270 break; 1349 break;
1271 case SET_READ_TEST: 1350 case SET_READ_TEST:
1272 REG_SET_AND_CHECK(test->reg + (i * 0x40), 1351 REG_SET_AND_CHECK(test->reg + (i * 0x40),
1273 test->mask, 1352 test->mask,
1274 test->write); 1353 test->write);
1275 break; 1354 break;
1276 case WRITE_NO_TEST: 1355 case WRITE_NO_TEST:
1277 writel(test->write, 1356 writel(test->write,
@@ -1280,18 +1359,18 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1280 break; 1359 break;
1281 case TABLE32_TEST: 1360 case TABLE32_TEST:
1282 REG_PATTERN_TEST(test->reg + (i * 4), 1361 REG_PATTERN_TEST(test->reg + (i * 4),
1283 test->mask, 1362 test->mask,
1284 test->write); 1363 test->write);
1285 break; 1364 break;
1286 case TABLE64_TEST_LO: 1365 case TABLE64_TEST_LO:
1287 REG_PATTERN_TEST(test->reg + (i * 8), 1366 REG_PATTERN_TEST(test->reg + (i * 8),
1288 test->mask, 1367 test->mask,
1289 test->write); 1368 test->write);
1290 break; 1369 break;
1291 case TABLE64_TEST_HI: 1370 case TABLE64_TEST_HI:
1292 REG_PATTERN_TEST((test->reg + 4) + (i * 8), 1371 REG_PATTERN_TEST((test->reg + 4) + (i * 8),
1293 test->mask, 1372 test->mask,
1294 test->write); 1373 test->write);
1295 break; 1374 break;
1296 } 1375 }
1297 } 1376 }
@@ -1354,7 +1433,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1354 1433
1355 /* Disable all the interrupts */ 1434 /* Disable all the interrupts */
1356 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); 1435 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1357 msleep(10); 1436 usleep_range(10000, 20000);
1358 1437
1359 /* Test each interrupt */ 1438 /* Test each interrupt */
1360 for (; i < 10; i++) { 1439 for (; i < 10; i++) {
@@ -1374,7 +1453,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1374 ~mask & 0x00007FFF); 1453 ~mask & 0x00007FFF);
1375 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, 1454 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1376 ~mask & 0x00007FFF); 1455 ~mask & 0x00007FFF);
1377 msleep(10); 1456 usleep_range(10000, 20000);
1378 1457
1379 if (adapter->test_icr & mask) { 1458 if (adapter->test_icr & mask) {
1380 *data = 3; 1459 *data = 3;
@@ -1391,7 +1470,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1391 adapter->test_icr = 0; 1470 adapter->test_icr = 0;
1392 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); 1471 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1393 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); 1472 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
1394 msleep(10); 1473 usleep_range(10000, 20000);
1395 1474
1396 if (!(adapter->test_icr &mask)) { 1475 if (!(adapter->test_icr &mask)) {
1397 *data = 4; 1476 *data = 4;
@@ -1411,7 +1490,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1411 ~mask & 0x00007FFF); 1490 ~mask & 0x00007FFF);
1412 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, 1491 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1413 ~mask & 0x00007FFF); 1492 ~mask & 0x00007FFF);
1414 msleep(10); 1493 usleep_range(10000, 20000);
1415 1494
1416 if (adapter->test_icr) { 1495 if (adapter->test_icr) {
1417 *data = 5; 1496 *data = 5;
@@ -1422,7 +1501,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1422 1501
1423 /* Disable all the interrupts */ 1502 /* Disable all the interrupts */
1424 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); 1503 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1425 msleep(10); 1504 usleep_range(10000, 20000);
1426 1505
1427 /* Unhook test interrupt handler */ 1506 /* Unhook test interrupt handler */
1428 free_irq(irq, netdev); 1507 free_irq(irq, netdev);
@@ -1435,9 +1514,7 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1435 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; 1514 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1436 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; 1515 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1437 struct ixgbe_hw *hw = &adapter->hw; 1516 struct ixgbe_hw *hw = &adapter->hw;
1438 struct pci_dev *pdev = adapter->pdev;
1439 u32 reg_ctl; 1517 u32 reg_ctl;
1440 int i;
1441 1518
1442 /* shut down the DMA engines now so they can be reinitialized later */ 1519 /* shut down the DMA engines now so they can be reinitialized later */
1443 1520
@@ -1445,237 +1522,86 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1445 reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 1522 reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1446 reg_ctl &= ~IXGBE_RXCTRL_RXEN; 1523 reg_ctl &= ~IXGBE_RXCTRL_RXEN;
1447 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl); 1524 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl);
1448 reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(0)); 1525 ixgbe_disable_rx_queue(adapter, rx_ring);
1449 reg_ctl &= ~IXGBE_RXDCTL_ENABLE;
1450 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(0), reg_ctl);
1451 1526
1452 /* now Tx */ 1527 /* now Tx */
1453 reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(0)); 1528 reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx));
1454 reg_ctl &= ~IXGBE_TXDCTL_ENABLE; 1529 reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
1455 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(0), reg_ctl); 1530 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl);
1456 if (hw->mac.type == ixgbe_mac_82599EB) { 1531
1532 switch (hw->mac.type) {
1533 case ixgbe_mac_82599EB:
1534 case ixgbe_mac_X540:
1457 reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1535 reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1458 reg_ctl &= ~IXGBE_DMATXCTL_TE; 1536 reg_ctl &= ~IXGBE_DMATXCTL_TE;
1459 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl); 1537 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
1538 break;
1539 default:
1540 break;
1460 } 1541 }
1461 1542
1462 ixgbe_reset(adapter); 1543 ixgbe_reset(adapter);
1463 1544
1464 if (tx_ring->desc && tx_ring->tx_buffer_info) { 1545 ixgbe_free_tx_resources(&adapter->test_tx_ring);
1465 for (i = 0; i < tx_ring->count; i++) { 1546 ixgbe_free_rx_resources(&adapter->test_rx_ring);
1466 struct ixgbe_tx_buffer *buf =
1467 &(tx_ring->tx_buffer_info[i]);
1468 if (buf->dma)
1469 dma_unmap_single(&pdev->dev, buf->dma,
1470 buf->length, DMA_TO_DEVICE);
1471 if (buf->skb)
1472 dev_kfree_skb(buf->skb);
1473 }
1474 }
1475
1476 if (rx_ring->desc && rx_ring->rx_buffer_info) {
1477 for (i = 0; i < rx_ring->count; i++) {
1478 struct ixgbe_rx_buffer *buf =
1479 &(rx_ring->rx_buffer_info[i]);
1480 if (buf->dma)
1481 dma_unmap_single(&pdev->dev, buf->dma,
1482 IXGBE_RXBUFFER_2048,
1483 DMA_FROM_DEVICE);
1484 if (buf->skb)
1485 dev_kfree_skb(buf->skb);
1486 }
1487 }
1488
1489 if (tx_ring->desc) {
1490 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1491 tx_ring->dma);
1492 tx_ring->desc = NULL;
1493 }
1494 if (rx_ring->desc) {
1495 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
1496 rx_ring->dma);
1497 rx_ring->desc = NULL;
1498 }
1499
1500 kfree(tx_ring->tx_buffer_info);
1501 tx_ring->tx_buffer_info = NULL;
1502 kfree(rx_ring->rx_buffer_info);
1503 rx_ring->rx_buffer_info = NULL;
1504} 1547}
1505 1548
1506static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) 1549static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1507{ 1550{
1508 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; 1551 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1509 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; 1552 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1510 struct pci_dev *pdev = adapter->pdev;
1511 u32 rctl, reg_data; 1553 u32 rctl, reg_data;
1512 int i, ret_val; 1554 int ret_val;
1555 int err;
1513 1556
1514 /* Setup Tx descriptor ring and Tx buffers */ 1557 /* Setup Tx descriptor ring and Tx buffers */
1558 tx_ring->count = IXGBE_DEFAULT_TXD;
1559 tx_ring->queue_index = 0;
1560 tx_ring->dev = &adapter->pdev->dev;
1561 tx_ring->netdev = adapter->netdev;
1562 tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
1563 tx_ring->numa_node = adapter->node;
1564
1565 err = ixgbe_setup_tx_resources(tx_ring);
1566 if (err)
1567 return 1;
1515 1568
1516 if (!tx_ring->count) 1569 switch (adapter->hw.mac.type) {
1517 tx_ring->count = IXGBE_DEFAULT_TXD; 1570 case ixgbe_mac_82599EB:
1518 1571 case ixgbe_mac_X540:
1519 tx_ring->tx_buffer_info = kcalloc(tx_ring->count,
1520 sizeof(struct ixgbe_tx_buffer),
1521 GFP_KERNEL);
1522 if (!(tx_ring->tx_buffer_info)) {
1523 ret_val = 1;
1524 goto err_nomem;
1525 }
1526
1527 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
1528 tx_ring->size = ALIGN(tx_ring->size, 4096);
1529 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
1530 &tx_ring->dma, GFP_KERNEL);
1531 if (!(tx_ring->desc)) {
1532 ret_val = 2;
1533 goto err_nomem;
1534 }
1535 tx_ring->next_to_use = tx_ring->next_to_clean = 0;
1536
1537 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDBAL(0),
1538 ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
1539 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDBAH(0),
1540 ((u64) tx_ring->dma >> 32));
1541 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDLEN(0),
1542 tx_ring->count * sizeof(union ixgbe_adv_tx_desc));
1543 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDH(0), 0);
1544 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(0), 0);
1545
1546 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1547 reg_data |= IXGBE_HLREG0_TXPADEN;
1548 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1549
1550 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1551 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL); 1572 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
1552 reg_data |= IXGBE_DMATXCTL_TE; 1573 reg_data |= IXGBE_DMATXCTL_TE;
1553 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data); 1574 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
1575 break;
1576 default:
1577 break;
1554 } 1578 }
1555 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_TXDCTL(0));
1556 reg_data |= IXGBE_TXDCTL_ENABLE;
1557 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TXDCTL(0), reg_data);
1558
1559 for (i = 0; i < tx_ring->count; i++) {
1560 union ixgbe_adv_tx_desc *desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
1561 struct sk_buff *skb;
1562 unsigned int size = 1024;
1563
1564 skb = alloc_skb(size, GFP_KERNEL);
1565 if (!skb) {
1566 ret_val = 3;
1567 goto err_nomem;
1568 }
1569 skb_put(skb, size);
1570 tx_ring->tx_buffer_info[i].skb = skb;
1571 tx_ring->tx_buffer_info[i].length = skb->len;
1572 tx_ring->tx_buffer_info[i].dma =
1573 dma_map_single(&pdev->dev, skb->data, skb->len,
1574 DMA_TO_DEVICE);
1575 desc->read.buffer_addr =
1576 cpu_to_le64(tx_ring->tx_buffer_info[i].dma);
1577 desc->read.cmd_type_len = cpu_to_le32(skb->len);
1578 desc->read.cmd_type_len |= cpu_to_le32(IXGBE_TXD_CMD_EOP |
1579 IXGBE_TXD_CMD_IFCS |
1580 IXGBE_TXD_CMD_RS);
1581 desc->read.olinfo_status = 0;
1582 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
1583 desc->read.olinfo_status |=
1584 (skb->len << IXGBE_ADVTXD_PAYLEN_SHIFT);
1585 1579
1586 } 1580 ixgbe_configure_tx_ring(adapter, tx_ring);
1587 1581
1588 /* Setup Rx Descriptor ring and Rx buffers */ 1582 /* Setup Rx Descriptor ring and Rx buffers */
1589 1583 rx_ring->count = IXGBE_DEFAULT_RXD;
1590 if (!rx_ring->count) 1584 rx_ring->queue_index = 0;
1591 rx_ring->count = IXGBE_DEFAULT_RXD; 1585 rx_ring->dev = &adapter->pdev->dev;
1592 1586 rx_ring->netdev = adapter->netdev;
1593 rx_ring->rx_buffer_info = kcalloc(rx_ring->count, 1587 rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
1594 sizeof(struct ixgbe_rx_buffer), 1588 rx_ring->rx_buf_len = IXGBE_RXBUFFER_2048;
1595 GFP_KERNEL); 1589 rx_ring->numa_node = adapter->node;
1596 if (!(rx_ring->rx_buffer_info)) { 1590
1591 err = ixgbe_setup_rx_resources(rx_ring);
1592 if (err) {
1597 ret_val = 4; 1593 ret_val = 4;
1598 goto err_nomem; 1594 goto err_nomem;
1599 } 1595 }
1600 1596
1601 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
1602 rx_ring->size = ALIGN(rx_ring->size, 4096);
1603 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
1604 &rx_ring->dma, GFP_KERNEL);
1605 if (!(rx_ring->desc)) {
1606 ret_val = 5;
1607 goto err_nomem;
1608 }
1609 rx_ring->next_to_use = rx_ring->next_to_clean = 0;
1610
1611 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); 1597 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
1612 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN); 1598 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN);
1613 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAL(0),
1614 ((u64)rx_ring->dma & 0xFFFFFFFF));
1615 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAH(0),
1616 ((u64) rx_ring->dma >> 32));
1617 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDLEN(0), rx_ring->size);
1618 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDH(0), 0);
1619 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(0), 0);
1620 1599
1621 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 1600 ixgbe_configure_rx_ring(adapter, rx_ring);
1622 reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
1623 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_data);
1624
1625 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1626 reg_data &= ~IXGBE_HLREG0_LPBK;
1627 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1628
1629 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_RDRXCTL);
1630#define IXGBE_RDRXCTL_RDMTS_MASK 0x00000003 /* Receive Descriptor Minimum
1631 Threshold Size mask */
1632 reg_data &= ~IXGBE_RDRXCTL_RDMTS_MASK;
1633 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDRXCTL, reg_data);
1634
1635 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_MCSTCTRL);
1636#define IXGBE_MCSTCTRL_MO_MASK 0x00000003 /* Multicast Offset mask */
1637 reg_data &= ~IXGBE_MCSTCTRL_MO_MASK;
1638 reg_data |= adapter->hw.mac.mc_filter_type;
1639 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MCSTCTRL, reg_data);
1640
1641 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(0));
1642 reg_data |= IXGBE_RXDCTL_ENABLE;
1643 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(0), reg_data);
1644 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1645 int j = adapter->rx_ring[0]->reg_idx;
1646 u32 k;
1647 for (k = 0; k < 10; k++) {
1648 if (IXGBE_READ_REG(&adapter->hw,
1649 IXGBE_RXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
1650 break;
1651 else
1652 msleep(1);
1653 }
1654 }
1655 1601
1656 rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS; 1602 rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS;
1657 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl); 1603 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
1658 1604
1659 for (i = 0; i < rx_ring->count; i++) {
1660 union ixgbe_adv_rx_desc *rx_desc =
1661 IXGBE_RX_DESC_ADV(*rx_ring, i);
1662 struct sk_buff *skb;
1663
1664 skb = alloc_skb(IXGBE_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL);
1665 if (!skb) {
1666 ret_val = 6;
1667 goto err_nomem;
1668 }
1669 skb_reserve(skb, NET_IP_ALIGN);
1670 rx_ring->rx_buffer_info[i].skb = skb;
1671 rx_ring->rx_buffer_info[i].dma =
1672 dma_map_single(&pdev->dev, skb->data,
1673 IXGBE_RXBUFFER_2048, DMA_FROM_DEVICE);
1674 rx_desc->read.pkt_addr =
1675 cpu_to_le64(rx_ring->rx_buffer_info[i].dma);
1676 memset(skb->data, 0x00, skb->len);
1677 }
1678
1679 return 0; 1605 return 0;
1680 1606
1681err_nomem: 1607err_nomem:
@@ -1688,17 +1614,29 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
1688 struct ixgbe_hw *hw = &adapter->hw; 1614 struct ixgbe_hw *hw = &adapter->hw;
1689 u32 reg_data; 1615 u32 reg_data;
1690 1616
1691 /* right now we only support MAC loopback in the driver */ 1617 /* X540 needs to set the MACC.FLU bit to force link up */
1618 if (adapter->hw.mac.type == ixgbe_mac_X540) {
1619 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_MACC);
1620 reg_data |= IXGBE_MACC_FLU;
1621 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MACC, reg_data);
1622 }
1692 1623
1693 /* Setup MAC loopback */ 1624 /* right now we only support MAC loopback in the driver */
1694 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0); 1625 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1626 /* Setup MAC loopback */
1695 reg_data |= IXGBE_HLREG0_LPBK; 1627 reg_data |= IXGBE_HLREG0_LPBK;
1696 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data); 1628 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1697 1629
1630 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1631 reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
1632 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_data);
1633
1698 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_AUTOC); 1634 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_AUTOC);
1699 reg_data &= ~IXGBE_AUTOC_LMS_MASK; 1635 reg_data &= ~IXGBE_AUTOC_LMS_MASK;
1700 reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU; 1636 reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU;
1701 IXGBE_WRITE_REG(&adapter->hw, IXGBE_AUTOC, reg_data); 1637 IXGBE_WRITE_REG(&adapter->hw, IXGBE_AUTOC, reg_data);
1638 IXGBE_WRITE_FLUSH(&adapter->hw);
1639 usleep_range(10000, 20000);
1702 1640
1703 /* Disable Atlas Tx lanes; re-enabled in reset path */ 1641 /* Disable Atlas Tx lanes; re-enabled in reset path */
1704 if (hw->mac.type == ixgbe_mac_82598EB) { 1642 if (hw->mac.type == ixgbe_mac_82598EB) {
@@ -1756,15 +1694,80 @@ static int ixgbe_check_lbtest_frame(struct sk_buff *skb,
1756 return 13; 1694 return 13;
1757} 1695}
1758 1696
1697static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
1698 struct ixgbe_ring *tx_ring,
1699 unsigned int size)
1700{
1701 union ixgbe_adv_rx_desc *rx_desc;
1702 struct ixgbe_rx_buffer *rx_buffer_info;
1703 struct ixgbe_tx_buffer *tx_buffer_info;
1704 const int bufsz = rx_ring->rx_buf_len;
1705 u32 staterr;
1706 u16 rx_ntc, tx_ntc, count = 0;
1707
1708 /* initialize next to clean and descriptor values */
1709 rx_ntc = rx_ring->next_to_clean;
1710 tx_ntc = tx_ring->next_to_clean;
1711 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, rx_ntc);
1712 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1713
1714 while (staterr & IXGBE_RXD_STAT_DD) {
1715 /* check Rx buffer */
1716 rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
1717
1718 /* unmap Rx buffer, will be remapped by alloc_rx_buffers */
1719 dma_unmap_single(rx_ring->dev,
1720 rx_buffer_info->dma,
1721 bufsz,
1722 DMA_FROM_DEVICE);
1723 rx_buffer_info->dma = 0;
1724
1725 /* verify contents of skb */
1726 if (!ixgbe_check_lbtest_frame(rx_buffer_info->skb, size))
1727 count++;
1728
1729 /* unmap buffer on Tx side */
1730 tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
1731 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
1732
1733 /* increment Rx/Tx next to clean counters */
1734 rx_ntc++;
1735 if (rx_ntc == rx_ring->count)
1736 rx_ntc = 0;
1737 tx_ntc++;
1738 if (tx_ntc == tx_ring->count)
1739 tx_ntc = 0;
1740
1741 /* fetch next descriptor */
1742 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, rx_ntc);
1743 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1744 }
1745
1746 /* re-map buffers to ring, store next to clean values */
1747 ixgbe_alloc_rx_buffers(rx_ring, count);
1748 rx_ring->next_to_clean = rx_ntc;
1749 tx_ring->next_to_clean = tx_ntc;
1750
1751 return count;
1752}
1753
1759static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter) 1754static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
1760{ 1755{
1761 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; 1756 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1762 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; 1757 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1763 struct pci_dev *pdev = adapter->pdev; 1758 int i, j, lc, good_cnt, ret_val = 0;
1764 int i, j, k, l, lc, good_cnt, ret_val = 0; 1759 unsigned int size = 1024;
1765 unsigned long time; 1760 netdev_tx_t tx_ret_val;
1761 struct sk_buff *skb;
1762
1763 /* allocate test skb */
1764 skb = alloc_skb(size, GFP_KERNEL);
1765 if (!skb)
1766 return 11;
1766 1767
1767 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(0), rx_ring->count - 1); 1768 /* place data into test skb */
1769 ixgbe_create_lbtest_frame(skb, size);
1770 skb_put(skb, size);
1768 1771
1769 /* 1772 /*
1770 * Calculate the loop count based on the largest descriptor ring 1773 * Calculate the loop count based on the largest descriptor ring
@@ -1777,54 +1780,38 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
1777 else 1780 else
1778 lc = ((rx_ring->count / 64) * 2) + 1; 1781 lc = ((rx_ring->count / 64) * 2) + 1;
1779 1782
1780 k = l = 0;
1781 for (j = 0; j <= lc; j++) { 1783 for (j = 0; j <= lc; j++) {
1782 for (i = 0; i < 64; i++) { 1784 /* reset count of good packets */
1783 ixgbe_create_lbtest_frame(
1784 tx_ring->tx_buffer_info[k].skb,
1785 1024);
1786 dma_sync_single_for_device(&pdev->dev,
1787 tx_ring->tx_buffer_info[k].dma,
1788 tx_ring->tx_buffer_info[k].length,
1789 DMA_TO_DEVICE);
1790 if (unlikely(++k == tx_ring->count))
1791 k = 0;
1792 }
1793 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(0), k);
1794 msleep(200);
1795 /* set the start time for the receive */
1796 time = jiffies;
1797 good_cnt = 0; 1785 good_cnt = 0;
1798 do { 1786
1799 /* receive the sent packets */ 1787 /* place 64 packets on the transmit queue*/
1800 dma_sync_single_for_cpu(&pdev->dev, 1788 for (i = 0; i < 64; i++) {
1801 rx_ring->rx_buffer_info[l].dma, 1789 skb_get(skb);
1802 IXGBE_RXBUFFER_2048, 1790 tx_ret_val = ixgbe_xmit_frame_ring(skb,
1803 DMA_FROM_DEVICE); 1791 adapter,
1804 ret_val = ixgbe_check_lbtest_frame( 1792 tx_ring);
1805 rx_ring->rx_buffer_info[l].skb, 1024); 1793 if (tx_ret_val == NETDEV_TX_OK)
1806 if (!ret_val)
1807 good_cnt++; 1794 good_cnt++;
1808 if (++l == rx_ring->count) 1795 }
1809 l = 0; 1796
1810 /*
1811 * time + 20 msecs (200 msecs on 2.4) is more than
1812 * enough time to complete the receives, if it's
1813 * exceeded, break and error off
1814 */
1815 } while (good_cnt < 64 && jiffies < (time + 20));
1816 if (good_cnt != 64) { 1797 if (good_cnt != 64) {
1817 /* ret_val is the same as mis-compare */ 1798 ret_val = 12;
1818 ret_val = 13;
1819 break; 1799 break;
1820 } 1800 }
1821 if (jiffies >= (time + 20)) { 1801
1822 /* Error code for time out error */ 1802 /* allow 200 milliseconds for packets to go from Tx to Rx */
1823 ret_val = 14; 1803 msleep(200);
1804
1805 good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size);
1806 if (good_cnt != 64) {
1807 ret_val = 13;
1824 break; 1808 break;
1825 } 1809 }
1826 } 1810 }
1827 1811
1812 /* free the original skb */
1813 kfree_skb(skb);
1814
1828 return ret_val; 1815 return ret_val;
1829} 1816}
1830 1817
@@ -1947,7 +1934,25 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
1947 struct ixgbe_hw *hw = &adapter->hw; 1934 struct ixgbe_hw *hw = &adapter->hw;
1948 int retval = 1; 1935 int retval = 1;
1949 1936
1937 /* WOL not supported except for the following */
1950 switch(hw->device_id) { 1938 switch(hw->device_id) {
1939 case IXGBE_DEV_ID_82599_SFP:
1940 /* Only this subdevice supports WOL */
1941 if (hw->subsystem_device_id != IXGBE_SUBDEV_ID_82599_SFP) {
1942 wol->supported = 0;
1943 break;
1944 }
1945 retval = 0;
1946 break;
1947 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
1948 /* All except this subdevice support WOL */
1949 if (hw->subsystem_device_id ==
1950 IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) {
1951 wol->supported = 0;
1952 break;
1953 }
1954 retval = 0;
1955 break;
1951 case IXGBE_DEV_ID_82599_KX4: 1956 case IXGBE_DEV_ID_82599_KX4:
1952 retval = 0; 1957 retval = 0;
1953 break; 1958 break;
@@ -2017,25 +2022,30 @@ static int ixgbe_nway_reset(struct net_device *netdev)
2017 return 0; 2022 return 0;
2018} 2023}
2019 2024
2020static int ixgbe_phys_id(struct net_device *netdev, u32 data) 2025static int ixgbe_set_phys_id(struct net_device *netdev,
2026 enum ethtool_phys_id_state state)
2021{ 2027{
2022 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2028 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2023 struct ixgbe_hw *hw = &adapter->hw; 2029 struct ixgbe_hw *hw = &adapter->hw;
2024 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2025 u32 i;
2026 2030
2027 if (!data || data > 300) 2031 switch (state) {
2028 data = 300; 2032 case ETHTOOL_ID_ACTIVE:
2033 adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2034 return 2;
2029 2035
2030 for (i = 0; i < (data * 1000); i += 400) { 2036 case ETHTOOL_ID_ON:
2031 hw->mac.ops.led_on(hw, IXGBE_LED_ON); 2037 hw->mac.ops.led_on(hw, IXGBE_LED_ON);
2032 msleep_interruptible(200); 2038 break;
2039
2040 case ETHTOOL_ID_OFF:
2033 hw->mac.ops.led_off(hw, IXGBE_LED_ON); 2041 hw->mac.ops.led_off(hw, IXGBE_LED_ON);
2034 msleep_interruptible(200); 2042 break;
2035 }
2036 2043
2037 /* Restore LED settings */ 2044 case ETHTOOL_ID_INACTIVE:
2038 IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, led_reg); 2045 /* Restore LED settings */
2046 IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg);
2047 break;
2048 }
2039 2049
2040 return 0; 2050 return 0;
2041} 2051}
@@ -2085,6 +2095,41 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
2085 return 0; 2095 return 0;
2086} 2096}
2087 2097
2098/*
2099 * this function must be called before setting the new value of
2100 * rx_itr_setting
2101 */
2102static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter,
2103 struct ethtool_coalesce *ec)
2104{
2105 struct net_device *netdev = adapter->netdev;
2106
2107 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
2108 return false;
2109
2110 /* if interrupt rate is too high then disable RSC */
2111 if (ec->rx_coalesce_usecs != 1 &&
2112 ec->rx_coalesce_usecs <= 1000000/IXGBE_MAX_RSC_INT_RATE) {
2113 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
2114 e_info(probe, "rx-usecs set too low, "
2115 "disabling RSC\n");
2116 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
2117 return true;
2118 }
2119 } else {
2120 /* check the feature flag value and enable RSC if necessary */
2121 if ((netdev->features & NETIF_F_LRO) &&
2122 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
2123 e_info(probe, "rx-usecs set to %d, "
2124 "re-enabling RSC\n",
2125 ec->rx_coalesce_usecs);
2126 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
2127 return true;
2128 }
2129 }
2130 return false;
2131}
2132
2088static int ixgbe_set_coalesce(struct net_device *netdev, 2133static int ixgbe_set_coalesce(struct net_device *netdev,
2089 struct ethtool_coalesce *ec) 2134 struct ethtool_coalesce *ec)
2090{ 2135{
@@ -2102,17 +2147,14 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2102 adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq; 2147 adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq;
2103 2148
2104 if (ec->rx_coalesce_usecs > 1) { 2149 if (ec->rx_coalesce_usecs > 1) {
2105 u32 max_int;
2106 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
2107 max_int = IXGBE_MAX_RSC_INT_RATE;
2108 else
2109 max_int = IXGBE_MAX_INT_RATE;
2110
2111 /* check the limits */ 2150 /* check the limits */
2112 if ((1000000/ec->rx_coalesce_usecs > max_int) || 2151 if ((1000000/ec->rx_coalesce_usecs > IXGBE_MAX_INT_RATE) ||
2113 (1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE)) 2152 (1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE))
2114 return -EINVAL; 2153 return -EINVAL;
2115 2154
2155 /* check the old value and enable RSC if necessary */
2156 need_reset = ixgbe_update_rsc(adapter, ec);
2157
2116 /* store the value in ints/second */ 2158 /* store the value in ints/second */
2117 adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs; 2159 adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs;
2118 2160
@@ -2121,32 +2163,21 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2121 /* clear the lower bit as its used for dynamic state */ 2163 /* clear the lower bit as its used for dynamic state */
2122 adapter->rx_itr_setting &= ~1; 2164 adapter->rx_itr_setting &= ~1;
2123 } else if (ec->rx_coalesce_usecs == 1) { 2165 } else if (ec->rx_coalesce_usecs == 1) {
2166 /* check the old value and enable RSC if necessary */
2167 need_reset = ixgbe_update_rsc(adapter, ec);
2168
2124 /* 1 means dynamic mode */ 2169 /* 1 means dynamic mode */
2125 adapter->rx_eitr_param = 20000; 2170 adapter->rx_eitr_param = 20000;
2126 adapter->rx_itr_setting = 1; 2171 adapter->rx_itr_setting = 1;
2127 } else { 2172 } else {
2173 /* check the old value and enable RSC if necessary */
2174 need_reset = ixgbe_update_rsc(adapter, ec);
2128 /* 2175 /*
2129 * any other value means disable eitr, which is best 2176 * any other value means disable eitr, which is best
2130 * served by setting the interrupt rate very high 2177 * served by setting the interrupt rate very high
2131 */ 2178 */
2132 adapter->rx_eitr_param = IXGBE_MAX_INT_RATE; 2179 adapter->rx_eitr_param = IXGBE_MAX_INT_RATE;
2133 adapter->rx_itr_setting = 0; 2180 adapter->rx_itr_setting = 0;
2134
2135 /*
2136 * if hardware RSC is enabled, disable it when
2137 * setting low latency mode, to avoid errata, assuming
2138 * that when the user set low latency mode they want
2139 * it at the cost of anything else
2140 */
2141 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
2142 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
2143 if (netdev->features & NETIF_F_LRO) {
2144 netdev->features &= ~NETIF_F_LRO;
2145 e_info(probe, "rx-usecs set to 0, "
2146 "disabling RSC\n");
2147 }
2148 need_reset = true;
2149 }
2150 } 2181 }
2151 2182
2152 if (ec->tx_coalesce_usecs > 1) { 2183 if (ec->tx_coalesce_usecs > 1) {
@@ -2218,33 +2249,59 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data)
2218 bool need_reset = false; 2249 bool need_reset = false;
2219 int rc; 2250 int rc;
2220 2251
2221 rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_LRO | ETH_FLAG_NTUPLE); 2252#ifdef CONFIG_IXGBE_DCB
2253 if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
2254 !(data & ETH_FLAG_RXVLAN))
2255 return -EINVAL;
2256#endif
2257
2258 need_reset = (data & ETH_FLAG_RXVLAN) !=
2259 (netdev->features & NETIF_F_HW_VLAN_RX);
2260
2261 if ((data & ETH_FLAG_RXHASH) &&
2262 !(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
2263 return -EOPNOTSUPP;
2264
2265 rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_LRO | ETH_FLAG_NTUPLE |
2266 ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN |
2267 ETH_FLAG_RXHASH);
2222 if (rc) 2268 if (rc)
2223 return rc; 2269 return rc;
2224 2270
2225 /* if state changes we need to update adapter->flags and reset */ 2271 /* if state changes we need to update adapter->flags and reset */
2226 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) { 2272 if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
2227 /* 2273 (!!(data & ETH_FLAG_LRO) !=
2228 * cast both to bool and verify if they are set the same 2274 !!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) {
2229 * but only enable RSC if itr is non-zero, as 2275 if ((data & ETH_FLAG_LRO) &&
2230 * itr=0 and RSC are mutually exclusive 2276 (!adapter->rx_itr_setting ||
2231 */ 2277 (adapter->rx_itr_setting > IXGBE_MAX_RSC_INT_RATE))) {
2232 if (((!!(data & ETH_FLAG_LRO)) != 2278 e_info(probe, "rx-usecs set too low, "
2233 (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) && 2279 "not enabling RSC.\n");
2234 adapter->rx_itr_setting) { 2280 } else {
2235 adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED; 2281 adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
2236 switch (adapter->hw.mac.type) { 2282 switch (adapter->hw.mac.type) {
2237 case ixgbe_mac_82599EB: 2283 case ixgbe_mac_82599EB:
2238 need_reset = true; 2284 need_reset = true;
2239 break; 2285 break;
2286 case ixgbe_mac_X540: {
2287 int i;
2288 for (i = 0; i < adapter->num_rx_queues; i++) {
2289 struct ixgbe_ring *ring =
2290 adapter->rx_ring[i];
2291 if (adapter->flags2 &
2292 IXGBE_FLAG2_RSC_ENABLED) {
2293 ixgbe_configure_rscctl(adapter,
2294 ring);
2295 } else {
2296 ixgbe_clear_rscctl(adapter,
2297 ring);
2298 }
2299 }
2300 }
2301 break;
2240 default: 2302 default:
2241 break; 2303 break;
2242 } 2304 }
2243 } else if (!adapter->rx_itr_setting) {
2244 netdev->features &= ~NETIF_F_LRO;
2245 if (data & ETH_FLAG_LRO)
2246 e_info(probe, "rx-usecs set to 0, "
2247 "LRO/RSC cannot be enabled.\n");
2248 } 2305 }
2249 } 2306 }
2250 2307
@@ -2282,10 +2339,11 @@ static int ixgbe_set_rx_ntuple(struct net_device *dev,
2282 struct ethtool_rx_ntuple *cmd) 2339 struct ethtool_rx_ntuple *cmd)
2283{ 2340{
2284 struct ixgbe_adapter *adapter = netdev_priv(dev); 2341 struct ixgbe_adapter *adapter = netdev_priv(dev);
2285 struct ethtool_rx_ntuple_flow_spec fs = cmd->fs; 2342 struct ethtool_rx_ntuple_flow_spec *fs = &cmd->fs;
2286 struct ixgbe_atr_input input_struct; 2343 union ixgbe_atr_input input_struct;
2287 struct ixgbe_atr_input_masks input_masks; 2344 struct ixgbe_atr_input_masks input_masks;
2288 int target_queue; 2345 int target_queue;
2346 int err;
2289 2347
2290 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 2348 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2291 return -EOPNOTSUPP; 2349 return -EOPNOTSUPP;
@@ -2294,67 +2352,122 @@ static int ixgbe_set_rx_ntuple(struct net_device *dev,
2294 * Don't allow programming if the action is a queue greater than 2352 * Don't allow programming if the action is a queue greater than
2295 * the number of online Tx queues. 2353 * the number of online Tx queues.
2296 */ 2354 */
2297 if ((fs.action >= adapter->num_tx_queues) || 2355 if ((fs->action >= adapter->num_tx_queues) ||
2298 (fs.action < ETHTOOL_RXNTUPLE_ACTION_DROP)) 2356 (fs->action < ETHTOOL_RXNTUPLE_ACTION_DROP))
2299 return -EINVAL; 2357 return -EINVAL;
2300 2358
2301 memset(&input_struct, 0, sizeof(struct ixgbe_atr_input)); 2359 memset(&input_struct, 0, sizeof(union ixgbe_atr_input));
2302 memset(&input_masks, 0, sizeof(struct ixgbe_atr_input_masks)); 2360 memset(&input_masks, 0, sizeof(struct ixgbe_atr_input_masks));
2303 2361
2304 input_masks.src_ip_mask = fs.m_u.tcp_ip4_spec.ip4src; 2362 /* record flow type */
2305 input_masks.dst_ip_mask = fs.m_u.tcp_ip4_spec.ip4dst; 2363 switch (fs->flow_type) {
2306 input_masks.src_port_mask = fs.m_u.tcp_ip4_spec.psrc; 2364 case IPV4_FLOW:
2307 input_masks.dst_port_mask = fs.m_u.tcp_ip4_spec.pdst; 2365 input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
2308 input_masks.vlan_id_mask = fs.vlan_tag_mask; 2366 break;
2309 /* only use the lowest 2 bytes for flex bytes */
2310 input_masks.data_mask = (fs.data_mask & 0xffff);
2311
2312 switch (fs.flow_type) {
2313 case TCP_V4_FLOW: 2367 case TCP_V4_FLOW:
2314 ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_TCP); 2368 input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2315 break; 2369 break;
2316 case UDP_V4_FLOW: 2370 case UDP_V4_FLOW:
2317 ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_UDP); 2371 input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2318 break; 2372 break;
2319 case SCTP_V4_FLOW: 2373 case SCTP_V4_FLOW:
2320 ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_SCTP); 2374 input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2321 break; 2375 break;
2322 default: 2376 default:
2323 return -1; 2377 return -1;
2324 } 2378 }
2325 2379
2326 /* Mask bits from the inputs based on user-supplied mask */ 2380 /* copy vlan tag minus the CFI bit */
2327 ixgbe_atr_set_src_ipv4_82599(&input_struct, 2381 if ((fs->vlan_tag & 0xEFFF) || (~fs->vlan_tag_mask & 0xEFFF)) {
2328 (fs.h_u.tcp_ip4_spec.ip4src & ~fs.m_u.tcp_ip4_spec.ip4src)); 2382 input_struct.formatted.vlan_id = htons(fs->vlan_tag & 0xEFFF);
2329 ixgbe_atr_set_dst_ipv4_82599(&input_struct, 2383 if (!fs->vlan_tag_mask) {
2330 (fs.h_u.tcp_ip4_spec.ip4dst & ~fs.m_u.tcp_ip4_spec.ip4dst)); 2384 input_masks.vlan_id_mask = htons(0xEFFF);
2331 /* 82599 expects these to be byte-swapped for perfect filtering */ 2385 } else {
2332 ixgbe_atr_set_src_port_82599(&input_struct, 2386 switch (~fs->vlan_tag_mask & 0xEFFF) {
2333 ((ntohs(fs.h_u.tcp_ip4_spec.psrc)) & ~fs.m_u.tcp_ip4_spec.psrc)); 2387 /* all of these are valid vlan-mask values */
2334 ixgbe_atr_set_dst_port_82599(&input_struct, 2388 case 0xEFFF:
2335 ((ntohs(fs.h_u.tcp_ip4_spec.pdst)) & ~fs.m_u.tcp_ip4_spec.pdst)); 2389 case 0xE000:
2336 2390 case 0x0FFF:
2337 /* VLAN and Flex bytes are either completely masked or not */ 2391 case 0x0000:
2338 if (!fs.vlan_tag_mask) 2392 input_masks.vlan_id_mask =
2339 ixgbe_atr_set_vlan_id_82599(&input_struct, fs.vlan_tag); 2393 htons(~fs->vlan_tag_mask);
2340 2394 break;
2341 if (!input_masks.data_mask) 2395 /* exit with error if vlan-mask is invalid */
2342 /* make sure we only use the first 2 bytes of user data */ 2396 default:
2343 ixgbe_atr_set_flex_byte_82599(&input_struct, 2397 e_err(drv, "Partial VLAN ID or "
2344 (fs.data & 0xffff)); 2398 "priority mask in vlan-mask is not "
2399 "supported by hardware\n");
2400 return -1;
2401 }
2402 }
2403 }
2404
2405 /* make sure we only use the first 2 bytes of user data */
2406 if ((fs->data & 0xFFFF) || (~fs->data_mask & 0xFFFF)) {
2407 input_struct.formatted.flex_bytes = htons(fs->data & 0xFFFF);
2408 if (!(fs->data_mask & 0xFFFF)) {
2409 input_masks.flex_mask = 0xFFFF;
2410 } else if (~fs->data_mask & 0xFFFF) {
2411 e_err(drv, "Partial user-def-mask is not "
2412 "supported by hardware\n");
2413 return -1;
2414 }
2415 }
2416
2417 /*
2418 * Copy input into formatted structures
2419 *
2420 * These assignments are based on the following logic
2421 * If neither input or mask are set assume value is masked out.
2422 * If input is set, but mask is not mask should default to accept all.
2423 * If input is not set, but mask is set then mask likely results in 0.
2424 * If input is set and mask is set then assign both.
2425 */
2426 if (fs->h_u.tcp_ip4_spec.ip4src || ~fs->m_u.tcp_ip4_spec.ip4src) {
2427 input_struct.formatted.src_ip[0] = fs->h_u.tcp_ip4_spec.ip4src;
2428 if (!fs->m_u.tcp_ip4_spec.ip4src)
2429 input_masks.src_ip_mask[0] = 0xFFFFFFFF;
2430 else
2431 input_masks.src_ip_mask[0] =
2432 ~fs->m_u.tcp_ip4_spec.ip4src;
2433 }
2434 if (fs->h_u.tcp_ip4_spec.ip4dst || ~fs->m_u.tcp_ip4_spec.ip4dst) {
2435 input_struct.formatted.dst_ip[0] = fs->h_u.tcp_ip4_spec.ip4dst;
2436 if (!fs->m_u.tcp_ip4_spec.ip4dst)
2437 input_masks.dst_ip_mask[0] = 0xFFFFFFFF;
2438 else
2439 input_masks.dst_ip_mask[0] =
2440 ~fs->m_u.tcp_ip4_spec.ip4dst;
2441 }
2442 if (fs->h_u.tcp_ip4_spec.psrc || ~fs->m_u.tcp_ip4_spec.psrc) {
2443 input_struct.formatted.src_port = fs->h_u.tcp_ip4_spec.psrc;
2444 if (!fs->m_u.tcp_ip4_spec.psrc)
2445 input_masks.src_port_mask = 0xFFFF;
2446 else
2447 input_masks.src_port_mask = ~fs->m_u.tcp_ip4_spec.psrc;
2448 }
2449 if (fs->h_u.tcp_ip4_spec.pdst || ~fs->m_u.tcp_ip4_spec.pdst) {
2450 input_struct.formatted.dst_port = fs->h_u.tcp_ip4_spec.pdst;
2451 if (!fs->m_u.tcp_ip4_spec.pdst)
2452 input_masks.dst_port_mask = 0xFFFF;
2453 else
2454 input_masks.dst_port_mask = ~fs->m_u.tcp_ip4_spec.pdst;
2455 }
2345 2456
2346 /* determine if we need to drop or route the packet */ 2457 /* determine if we need to drop or route the packet */
2347 if (fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP) 2458 if (fs->action == ETHTOOL_RXNTUPLE_ACTION_DROP)
2348 target_queue = MAX_RX_QUEUES - 1; 2459 target_queue = MAX_RX_QUEUES - 1;
2349 else 2460 else
2350 target_queue = fs.action; 2461 target_queue = fs->action;
2351 2462
2352 spin_lock(&adapter->fdir_perfect_lock); 2463 spin_lock(&adapter->fdir_perfect_lock);
2353 ixgbe_fdir_add_perfect_filter_82599(&adapter->hw, &input_struct, 2464 err = ixgbe_fdir_add_perfect_filter_82599(&adapter->hw,
2354 &input_masks, 0, target_queue); 2465 &input_struct,
2466 &input_masks, 0,
2467 target_queue);
2355 spin_unlock(&adapter->fdir_perfect_lock); 2468 spin_unlock(&adapter->fdir_perfect_lock);
2356 2469
2357 return 0; 2470 return err ? -1 : 0;
2358} 2471}
2359 2472
2360static const struct ethtool_ops ixgbe_ethtool_ops = { 2473static const struct ethtool_ops ixgbe_ethtool_ops = {
@@ -2385,7 +2498,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
2385 .set_tso = ixgbe_set_tso, 2498 .set_tso = ixgbe_set_tso,
2386 .self_test = ixgbe_diag_test, 2499 .self_test = ixgbe_diag_test,
2387 .get_strings = ixgbe_get_strings, 2500 .get_strings = ixgbe_get_strings,
2388 .phys_id = ixgbe_phys_id, 2501 .set_phys_id = ixgbe_set_phys_id,
2389 .get_sset_count = ixgbe_get_sset_count, 2502 .get_sset_count = ixgbe_get_sset_count,
2390 .get_ethtool_stats = ixgbe_get_ethtool_stats, 2503 .get_ethtool_stats = ixgbe_get_ethtool_stats,
2391 .get_coalesce = ixgbe_get_coalesce, 2504 .get_coalesce = ixgbe_get_coalesce,
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 072327c5e41a..05920726e824 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -68,7 +68,7 @@ static inline bool ixgbe_rx_is_fcoe(union ixgbe_adv_rx_desc *rx_desc)
68static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp) 68static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp)
69{ 69{
70 ddp->len = 0; 70 ddp->len = 0;
71 ddp->err = 0; 71 ddp->err = 1;
72 ddp->udl = NULL; 72 ddp->udl = NULL;
73 ddp->udp = 0UL; 73 ddp->udp = 0UL;
74 ddp->sgl = NULL; 74 ddp->sgl = NULL;
@@ -92,6 +92,7 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
92 struct ixgbe_fcoe *fcoe; 92 struct ixgbe_fcoe *fcoe;
93 struct ixgbe_adapter *adapter; 93 struct ixgbe_adapter *adapter;
94 struct ixgbe_fcoe_ddp *ddp; 94 struct ixgbe_fcoe_ddp *ddp;
95 u32 fcbuff;
95 96
96 if (!netdev) 97 if (!netdev)
97 goto out_ddp_put; 98 goto out_ddp_put;
@@ -115,7 +116,14 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
115 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0); 116 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0);
116 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW, 117 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW,
117 (xid | IXGBE_FCDMARW_WE)); 118 (xid | IXGBE_FCDMARW_WE));
119
120 /* guaranteed to be invalidated after 100us */
121 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW,
122 (xid | IXGBE_FCDMARW_RE));
123 fcbuff = IXGBE_READ_REG(&adapter->hw, IXGBE_FCBUFF);
118 spin_unlock_bh(&fcoe->lock); 124 spin_unlock_bh(&fcoe->lock);
125 if (fcbuff & IXGBE_FCBUFF_VALID)
126 udelay(100);
119 } 127 }
120 if (ddp->sgl) 128 if (ddp->sgl)
121 pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc, 129 pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc,
@@ -127,22 +135,19 @@ out_ddp_put:
127 return len; 135 return len;
128} 136}
129 137
138
130/** 139/**
131 * ixgbe_fcoe_ddp_get - called to set up ddp context 140 * ixgbe_fcoe_ddp_setup - called to set up ddp context
132 * @netdev: the corresponding net_device 141 * @netdev: the corresponding net_device
133 * @xid: the exchange id requesting ddp 142 * @xid: the exchange id requesting ddp
134 * @sgl: the scatter-gather list for this request 143 * @sgl: the scatter-gather list for this request
135 * @sgc: the number of scatter-gather items 144 * @sgc: the number of scatter-gather items
136 * 145 *
137 * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup
138 * and is expected to be called from ULD, e.g., FCP layer of libfc
139 * to set up ddp for the corresponding xid of the given sglist for
140 * the corresponding I/O.
141 *
142 * Returns : 1 for success and 0 for no ddp 146 * Returns : 1 for success and 0 for no ddp
143 */ 147 */
144int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, 148static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
145 struct scatterlist *sgl, unsigned int sgc) 149 struct scatterlist *sgl, unsigned int sgc,
150 int target_mode)
146{ 151{
147 struct ixgbe_adapter *adapter; 152 struct ixgbe_adapter *adapter;
148 struct ixgbe_hw *hw; 153 struct ixgbe_hw *hw;
@@ -151,13 +156,13 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
151 struct scatterlist *sg; 156 struct scatterlist *sg;
152 unsigned int i, j, dmacount; 157 unsigned int i, j, dmacount;
153 unsigned int len; 158 unsigned int len;
154 static const unsigned int bufflen = 4096; 159 static const unsigned int bufflen = IXGBE_FCBUFF_MIN;
155 unsigned int firstoff = 0; 160 unsigned int firstoff = 0;
156 unsigned int lastsize; 161 unsigned int lastsize;
157 unsigned int thisoff = 0; 162 unsigned int thisoff = 0;
158 unsigned int thislen = 0; 163 unsigned int thislen = 0;
159 u32 fcbuff, fcdmarw, fcfltrw; 164 u32 fcbuff, fcdmarw, fcfltrw, fcrxctl;
160 dma_addr_t addr; 165 dma_addr_t addr = 0;
161 166
162 if (!netdev || !sgl) 167 if (!netdev || !sgl)
163 return 0; 168 return 0;
@@ -168,6 +173,11 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
168 return 0; 173 return 0;
169 } 174 }
170 175
176 /* no DDP if we are already down or resetting */
177 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
178 test_bit(__IXGBE_RESETTING, &adapter->state))
179 return 0;
180
171 fcoe = &adapter->fcoe; 181 fcoe = &adapter->fcoe;
172 if (!fcoe->pool) { 182 if (!fcoe->pool) {
173 e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid); 183 e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
@@ -241,9 +251,30 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
241 /* only the last buffer may have non-full bufflen */ 251 /* only the last buffer may have non-full bufflen */
242 lastsize = thisoff + thislen; 252 lastsize = thisoff + thislen;
243 253
254 /*
255 * lastsize can not be buffer len.
256 * If it is then adding another buffer with lastsize = 1.
257 */
258 if (lastsize == bufflen) {
259 if (j >= IXGBE_BUFFCNT_MAX) {
260 e_err(drv, "xid=%x:%d,%d,%d:addr=%llx "
261 "not enough user buffers. We need an extra "
262 "buffer because lastsize is bufflen.\n",
263 xid, i, j, dmacount, (u64)addr);
264 goto out_noddp_free;
265 }
266
267 ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma);
268 j++;
269 lastsize = 1;
270 }
271
244 fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); 272 fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT);
245 fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); 273 fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT);
246 fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); 274 fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT);
275 /* Set WRCONTX bit to allow DDP for target */
276 if (target_mode)
277 fcbuff |= (IXGBE_FCBUFF_WRCONTX);
247 fcbuff |= (IXGBE_FCBUFF_VALID); 278 fcbuff |= (IXGBE_FCBUFF_VALID);
248 279
249 fcdmarw = xid; 280 fcdmarw = xid;
@@ -256,6 +287,16 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
256 /* program DMA context */ 287 /* program DMA context */
257 hw = &adapter->hw; 288 hw = &adapter->hw;
258 spin_lock_bh(&fcoe->lock); 289 spin_lock_bh(&fcoe->lock);
290
291 /* turn on last frame indication for target mode as FCP_RSPtarget is
292 * supposed to send FCP_RSP when it is done. */
293 if (target_mode && !test_bit(__IXGBE_FCOE_TARGET, &fcoe->mode)) {
294 set_bit(__IXGBE_FCOE_TARGET, &fcoe->mode);
295 fcrxctl = IXGBE_READ_REG(hw, IXGBE_FCRXCTRL);
296 fcrxctl |= IXGBE_FCRXCTRL_LASTSEQH;
297 IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl);
298 }
299
259 IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32)); 300 IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32));
260 IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32); 301 IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32);
261 IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff); 302 IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff);
@@ -264,6 +305,7 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
264 IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0); 305 IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0);
265 IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID); 306 IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID);
266 IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw); 307 IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw);
308
267 spin_unlock_bh(&fcoe->lock); 309 spin_unlock_bh(&fcoe->lock);
268 310
269 return 1; 311 return 1;
@@ -278,6 +320,47 @@ out_noddp_unmap:
278} 320}
279 321
280/** 322/**
323 * ixgbe_fcoe_ddp_get - called to set up ddp context in initiator mode
324 * @netdev: the corresponding net_device
325 * @xid: the exchange id requesting ddp
326 * @sgl: the scatter-gather list for this request
327 * @sgc: the number of scatter-gather items
328 *
329 * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup
330 * and is expected to be called from ULD, e.g., FCP layer of libfc
331 * to set up ddp for the corresponding xid of the given sglist for
332 * the corresponding I/O.
333 *
334 * Returns : 1 for success and 0 for no ddp
335 */
336int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
337 struct scatterlist *sgl, unsigned int sgc)
338{
339 return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0);
340}
341
342/**
343 * ixgbe_fcoe_ddp_target - called to set up ddp context in target mode
344 * @netdev: the corresponding net_device
345 * @xid: the exchange id requesting ddp
346 * @sgl: the scatter-gather list for this request
347 * @sgc: the number of scatter-gather items
348 *
349 * This is the implementation of net_device_ops.ndo_fcoe_ddp_target
350 * and is expected to be called from ULD, e.g., FCP layer of libfc
351 * to set up ddp for the corresponding xid of the given sglist for
352 * the corresponding I/O. The DDP in target mode is a write I/O request
353 * from the initiator.
354 *
355 * Returns : 1 for success and 0 for no ddp
356 */
357int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
358 struct scatterlist *sgl, unsigned int sgc)
359{
360 return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1);
361}
362
363/**
281 * ixgbe_fcoe_ddp - check ddp status and mark it done 364 * ixgbe_fcoe_ddp - check ddp status and mark it done
282 * @adapter: ixgbe adapter 365 * @adapter: ixgbe adapter
283 * @rx_desc: advanced rx descriptor 366 * @rx_desc: advanced rx descriptor
@@ -300,16 +383,18 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
300 struct ixgbe_fcoe *fcoe; 383 struct ixgbe_fcoe *fcoe;
301 struct ixgbe_fcoe_ddp *ddp; 384 struct ixgbe_fcoe_ddp *ddp;
302 struct fc_frame_header *fh; 385 struct fc_frame_header *fh;
386 struct fcoe_crc_eof *crc;
303 387
304 if (!ixgbe_rx_is_fcoe(rx_desc)) 388 if (!ixgbe_rx_is_fcoe(rx_desc))
305 goto ddp_out; 389 goto ddp_out;
306 390
307 skb->ip_summed = CHECKSUM_UNNECESSARY;
308 sterr = le32_to_cpu(rx_desc->wb.upper.status_error); 391 sterr = le32_to_cpu(rx_desc->wb.upper.status_error);
309 fcerr = (sterr & IXGBE_RXDADV_ERR_FCERR); 392 fcerr = (sterr & IXGBE_RXDADV_ERR_FCERR);
310 fceofe = (sterr & IXGBE_RXDADV_ERR_FCEOFE); 393 fceofe = (sterr & IXGBE_RXDADV_ERR_FCEOFE);
311 if (fcerr == IXGBE_FCERR_BADCRC) 394 if (fcerr == IXGBE_FCERR_BADCRC)
312 skb->ip_summed = CHECKSUM_NONE; 395 skb_checksum_none_assert(skb);
396 else
397 skb->ip_summed = CHECKSUM_UNNECESSARY;
313 398
314 if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q)) 399 if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
315 fh = (struct fc_frame_header *)(skb->data + 400 fh = (struct fc_frame_header *)(skb->data +
@@ -331,8 +416,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
331 if (!ddp->udl) 416 if (!ddp->udl)
332 goto ddp_out; 417 goto ddp_out;
333 418
334 ddp->err = (fcerr | fceofe); 419 if (fcerr | fceofe)
335 if (ddp->err)
336 goto ddp_out; 420 goto ddp_out;
337 421
338 fcstat = (sterr & IXGBE_RXDADV_STAT_FCSTAT); 422 fcstat = (sterr & IXGBE_RXDADV_STAT_FCSTAT);
@@ -343,6 +427,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
343 if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_FCPRSP) { 427 if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_FCPRSP) {
344 pci_unmap_sg(adapter->pdev, ddp->sgl, 428 pci_unmap_sg(adapter->pdev, ddp->sgl,
345 ddp->sgc, DMA_FROM_DEVICE); 429 ddp->sgc, DMA_FROM_DEVICE);
430 ddp->err = (fcerr | fceofe);
346 ddp->sgl = NULL; 431 ddp->sgl = NULL;
347 ddp->sgc = 0; 432 ddp->sgc = 0;
348 } 433 }
@@ -352,7 +437,18 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
352 else if (ddp->len) 437 else if (ddp->len)
353 rc = ddp->len; 438 rc = ddp->len;
354 } 439 }
355 440 /* In target mode, check the last data frame of the sequence.
441 * For DDP in target mode, data is already DDPed but the header
442 * indication of the last data frame ould allow is to tell if we
443 * got all the data and the ULP can send FCP_RSP back, as this is
444 * not a full fcoe frame, we fill the trailer here so it won't be
445 * dropped by the ULP stack.
446 */
447 if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) &&
448 (fctl & FC_FC_END_SEQ)) {
449 crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc));
450 crc->fcoe_eof = FC_EOF_T;
451 }
356ddp_out: 452ddp_out:
357 return rc; 453 return rc;
358} 454}
@@ -471,7 +567,7 @@ int ixgbe_fso(struct ixgbe_adapter *adapter,
471 567
472 /* write context desc */ 568 /* write context desc */
473 i = tx_ring->next_to_use; 569 i = tx_ring->next_to_use;
474 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i); 570 context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
475 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 571 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
476 context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); 572 context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof);
477 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); 573 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
@@ -518,6 +614,24 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
518 e_err(drv, "failed to allocated FCoE DDP pool\n"); 614 e_err(drv, "failed to allocated FCoE DDP pool\n");
519 615
520 spin_lock_init(&fcoe->lock); 616 spin_lock_init(&fcoe->lock);
617
618 /* Extra buffer to be shared by all DDPs for HW work around */
619 fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
620 if (fcoe->extra_ddp_buffer == NULL) {
621 e_err(drv, "failed to allocated extra DDP buffer\n");
622 goto out_extra_ddp_buffer_alloc;
623 }
624
625 fcoe->extra_ddp_buffer_dma =
626 dma_map_single(&adapter->pdev->dev,
627 fcoe->extra_ddp_buffer,
628 IXGBE_FCBUFF_MIN,
629 DMA_FROM_DEVICE);
630 if (dma_mapping_error(&adapter->pdev->dev,
631 fcoe->extra_ddp_buffer_dma)) {
632 e_err(drv, "failed to map extra DDP buffer\n");
633 goto out_extra_ddp_buffer_dma;
634 }
521 } 635 }
522 636
523 /* Enable L2 eth type filter for FCoE */ 637 /* Enable L2 eth type filter for FCoE */
@@ -567,6 +681,14 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
567 } 681 }
568 } 682 }
569#endif 683#endif
684
685 return;
686
687out_extra_ddp_buffer_dma:
688 kfree(fcoe->extra_ddp_buffer);
689out_extra_ddp_buffer_alloc:
690 pci_pool_destroy(fcoe->pool);
691 fcoe->pool = NULL;
570} 692}
571 693
572/** 694/**
@@ -586,6 +708,11 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)
586 if (fcoe->pool) { 708 if (fcoe->pool) {
587 for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) 709 for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++)
588 ixgbe_fcoe_ddp_put(adapter->netdev, i); 710 ixgbe_fcoe_ddp_put(adapter->netdev, i);
711 dma_unmap_single(&adapter->pdev->dev,
712 fcoe->extra_ddp_buffer_dma,
713 IXGBE_FCBUFF_MIN,
714 DMA_FROM_DEVICE);
715 kfree(fcoe->extra_ddp_buffer);
589 pci_pool_destroy(fcoe->pool); 716 pci_pool_destroy(fcoe->pool);
590 fcoe->pool = NULL; 717 fcoe->pool = NULL;
591 } 718 }
@@ -603,11 +730,13 @@ int ixgbe_fcoe_enable(struct net_device *netdev)
603{ 730{
604 int rc = -EINVAL; 731 int rc = -EINVAL;
605 struct ixgbe_adapter *adapter = netdev_priv(netdev); 732 struct ixgbe_adapter *adapter = netdev_priv(netdev);
733 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
606 734
607 735
608 if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) 736 if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
609 goto out_enable; 737 goto out_enable;
610 738
739 atomic_inc(&fcoe->refcnt);
611 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) 740 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
612 goto out_enable; 741 goto out_enable;
613 742
@@ -647,6 +776,7 @@ int ixgbe_fcoe_disable(struct net_device *netdev)
647{ 776{
648 int rc = -EINVAL; 777 int rc = -EINVAL;
649 struct ixgbe_adapter *adapter = netdev_priv(netdev); 778 struct ixgbe_adapter *adapter = netdev_priv(netdev);
779 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
650 780
651 if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) 781 if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
652 goto out_disable; 782 goto out_disable;
@@ -654,6 +784,9 @@ int ixgbe_fcoe_disable(struct net_device *netdev)
654 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) 784 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
655 goto out_disable; 785 goto out_disable;
656 786
787 if (!atomic_dec_and_test(&fcoe->refcnt))
788 goto out_disable;
789
657 e_info(drv, "Disabling FCoE offload features.\n"); 790 e_info(drv, "Disabling FCoE offload features.\n");
658 netdev->features &= ~NETIF_F_FCOE_CRC; 791 netdev->features &= ~NETIF_F_FCOE_CRC;
659 netdev->features &= ~NETIF_F_FSO; 792 netdev->features &= ~NETIF_F_FSO;
@@ -680,21 +813,6 @@ out_disable:
680 813
681#ifdef CONFIG_IXGBE_DCB 814#ifdef CONFIG_IXGBE_DCB
682/** 815/**
683 * ixgbe_fcoe_getapp - retrieves current user priority bitmap for FCoE
684 * @adapter : ixgbe adapter
685 *
686 * Finds out the corresponding user priority bitmap from the current
687 * traffic class that FCoE belongs to. Returns 0 as the invalid user
688 * priority bitmap to indicate an error.
689 *
690 * Returns : 802.1p user priority bitmap for FCoE
691 */
692u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter)
693{
694 return 1 << adapter->fcoe.up;
695}
696
697/**
698 * ixgbe_fcoe_setapp - sets the user priority bitmap for FCoE 816 * ixgbe_fcoe_setapp - sets the user priority bitmap for FCoE
699 * @adapter : ixgbe adapter 817 * @adapter : ixgbe adapter
700 * @up : 802.1p user priority bitmap 818 * @up : 802.1p user priority bitmap
@@ -771,5 +889,3 @@ int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
771 } 889 }
772 return rc; 890 return rc;
773} 891}
774
775
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h
index abf4b2b3f252..5a650a4ace66 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ixgbe/ixgbe_fcoe.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -52,6 +52,9 @@
52/* fcerr */ 52/* fcerr */
53#define IXGBE_FCERR_BADCRC 0x00100000 53#define IXGBE_FCERR_BADCRC 0x00100000
54 54
55/* FCoE DDP for target mode */
56#define __IXGBE_FCOE_TARGET 1
57
55struct ixgbe_fcoe_ddp { 58struct ixgbe_fcoe_ddp {
56 int len; 59 int len;
57 u32 err; 60 u32 err;
@@ -66,9 +69,13 @@ struct ixgbe_fcoe {
66 u8 tc; 69 u8 tc;
67 u8 up; 70 u8 up;
68#endif 71#endif
72 unsigned long mode;
73 atomic_t refcnt;
69 spinlock_t lock; 74 spinlock_t lock;
70 struct pci_pool *pool; 75 struct pci_pool *pool;
71 struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; 76 struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
77 unsigned char *extra_ddp_buffer;
78 dma_addr_t extra_ddp_buffer_dma;
72}; 79};
73 80
74#endif /* _IXGBE_FCOE_H */ 81#endif /* _IXGBE_FCOE_H */
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index e32af434cc9d..08e8e25c159d 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -41,6 +41,7 @@
41#include <net/ip6_checksum.h> 41#include <net/ip6_checksum.h>
42#include <linux/ethtool.h> 42#include <linux/ethtool.h>
43#include <linux/if_vlan.h> 43#include <linux/if_vlan.h>
44#include <linux/prefetch.h>
44#include <scsi/fc/fc_fcoe.h> 45#include <scsi/fc/fc_fcoe.h>
45 46
46#include "ixgbe.h" 47#include "ixgbe.h"
@@ -50,15 +51,21 @@
50 51
51char ixgbe_driver_name[] = "ixgbe"; 52char ixgbe_driver_name[] = "ixgbe";
52static const char ixgbe_driver_string[] = 53static const char ixgbe_driver_string[] =
53 "Intel(R) 10 Gigabit PCI Express Network Driver"; 54 "Intel(R) 10 Gigabit PCI Express Network Driver";
54 55#define MAJ 3
55#define DRV_VERSION "2.0.84-k2" 56#define MIN 3
57#define BUILD 8
58#define KFIX 2
59#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
60 __stringify(BUILD) "-k" __stringify(KFIX)
56const char ixgbe_driver_version[] = DRV_VERSION; 61const char ixgbe_driver_version[] = DRV_VERSION;
57static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation."; 62static const char ixgbe_copyright[] =
63 "Copyright (c) 1999-2011 Intel Corporation.";
58 64
59static const struct ixgbe_info *ixgbe_info_tbl[] = { 65static const struct ixgbe_info *ixgbe_info_tbl[] = {
60 [board_82598] = &ixgbe_82598_info, 66 [board_82598] = &ixgbe_82598_info,
61 [board_82599] = &ixgbe_82599_info, 67 [board_82599] = &ixgbe_82599_info,
68 [board_X540] = &ixgbe_X540_info,
62}; 69};
63 70
64/* ixgbe_pci_tbl - PCI Device ID Table 71/* ixgbe_pci_tbl - PCI Device ID Table
@@ -108,10 +115,20 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
108 board_82599 }, 115 board_82599 },
109 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), 116 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
110 board_82599 }, 117 board_82599 },
118 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE),
119 board_82599 },
120 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE),
121 board_82599 },
111 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), 122 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM),
112 board_82599 }, 123 board_82599 },
113 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), 124 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE),
114 board_82599 }, 125 board_82599 },
126 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T),
127 board_X540 },
128 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2),
129 board_82599 },
130 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS),
131 board_82599 },
115 132
116 /* required last entry */ 133 /* required last entry */
117 {0, } 134 {0, }
@@ -120,7 +137,7 @@ MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
120 137
121#ifdef CONFIG_IXGBE_DCA 138#ifdef CONFIG_IXGBE_DCA
122static int ixgbe_notify_dca(struct notifier_block *, unsigned long event, 139static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
123 void *p); 140 void *p);
124static struct notifier_block dca_notifier = { 141static struct notifier_block dca_notifier = {
125 .notifier_call = ixgbe_notify_dca, 142 .notifier_call = ixgbe_notify_dca,
126 .next = NULL, 143 .next = NULL,
@@ -131,8 +148,8 @@ static struct notifier_block dca_notifier = {
131#ifdef CONFIG_PCI_IOV 148#ifdef CONFIG_PCI_IOV
132static unsigned int max_vfs; 149static unsigned int max_vfs;
133module_param(max_vfs, uint, 0); 150module_param(max_vfs, uint, 0);
134MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate " 151MODULE_PARM_DESC(max_vfs,
135 "per physical function"); 152 "Maximum number of virtual functions to allocate per physical function");
136#endif /* CONFIG_PCI_IOV */ 153#endif /* CONFIG_PCI_IOV */
137 154
138MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 155MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
@@ -169,14 +186,30 @@ static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
169 186
170 /* take a breather then clean up driver data */ 187 /* take a breather then clean up driver data */
171 msleep(100); 188 msleep(100);
172 if (adapter->vfinfo) 189
173 kfree(adapter->vfinfo); 190 kfree(adapter->vfinfo);
174 adapter->vfinfo = NULL; 191 adapter->vfinfo = NULL;
175 192
176 adapter->num_vfs = 0; 193 adapter->num_vfs = 0;
177 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; 194 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
178} 195}
179 196
197static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
198{
199 if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
200 !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state))
201 schedule_work(&adapter->service_task);
202}
203
204static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
205{
206 BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
207
208 /* flush memory to make sure state is correct before next watchog */
209 smp_mb__before_clear_bit();
210 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
211}
212
180struct ixgbe_reg_info { 213struct ixgbe_reg_info {
181 u32 ofs; 214 u32 ofs;
182 char *name; 215 char *name;
@@ -282,17 +315,17 @@ static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
282 regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); 315 regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
283 break; 316 break;
284 default: 317 default:
285 printk(KERN_INFO "%-15s %08x\n", reginfo->name, 318 pr_info("%-15s %08x\n", reginfo->name,
286 IXGBE_READ_REG(hw, reginfo->ofs)); 319 IXGBE_READ_REG(hw, reginfo->ofs));
287 return; 320 return;
288 } 321 }
289 322
290 for (i = 0; i < 8; i++) { 323 for (i = 0; i < 8; i++) {
291 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7); 324 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
292 printk(KERN_ERR "%-15s ", rname); 325 pr_err("%-15s", rname);
293 for (j = 0; j < 8; j++) 326 for (j = 0; j < 8; j++)
294 printk(KERN_CONT "%08x ", regs[i*8+j]); 327 pr_cont(" %08x", regs[i*8+j]);
295 printk(KERN_CONT "\n"); 328 pr_cont("\n");
296 } 329 }
297 330
298} 331}
@@ -322,18 +355,18 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
322 /* Print netdevice Info */ 355 /* Print netdevice Info */
323 if (netdev) { 356 if (netdev) {
324 dev_info(&adapter->pdev->dev, "Net device Info\n"); 357 dev_info(&adapter->pdev->dev, "Net device Info\n");
325 printk(KERN_INFO "Device Name state " 358 pr_info("Device Name state "
326 "trans_start last_rx\n"); 359 "trans_start last_rx\n");
327 printk(KERN_INFO "%-15s %016lX %016lX %016lX\n", 360 pr_info("%-15s %016lX %016lX %016lX\n",
328 netdev->name, 361 netdev->name,
329 netdev->state, 362 netdev->state,
330 netdev->trans_start, 363 netdev->trans_start,
331 netdev->last_rx); 364 netdev->last_rx);
332 } 365 }
333 366
334 /* Print Registers */ 367 /* Print Registers */
335 dev_info(&adapter->pdev->dev, "Register Dump\n"); 368 dev_info(&adapter->pdev->dev, "Register Dump\n");
336 printk(KERN_INFO " Register Name Value\n"); 369 pr_info(" Register Name Value\n");
337 for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl; 370 for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
338 reginfo->name; reginfo++) { 371 reginfo->name; reginfo++) {
339 ixgbe_regdump(hw, reginfo); 372 ixgbe_regdump(hw, reginfo);
@@ -344,13 +377,12 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
344 goto exit; 377 goto exit;
345 378
346 dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); 379 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
347 printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ] " 380 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
348 "leng ntw timestamp\n");
349 for (n = 0; n < adapter->num_tx_queues; n++) { 381 for (n = 0; n < adapter->num_tx_queues; n++) {
350 tx_ring = adapter->tx_ring[n]; 382 tx_ring = adapter->tx_ring[n];
351 tx_buffer_info = 383 tx_buffer_info =
352 &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; 384 &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
353 printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n", 385 pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n",
354 n, tx_ring->next_to_use, tx_ring->next_to_clean, 386 n, tx_ring->next_to_use, tx_ring->next_to_clean,
355 (u64)tx_buffer_info->dma, 387 (u64)tx_buffer_info->dma,
356 tx_buffer_info->length, 388 tx_buffer_info->length,
@@ -377,18 +409,18 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
377 409
378 for (n = 0; n < adapter->num_tx_queues; n++) { 410 for (n = 0; n < adapter->num_tx_queues; n++) {
379 tx_ring = adapter->tx_ring[n]; 411 tx_ring = adapter->tx_ring[n];
380 printk(KERN_INFO "------------------------------------\n"); 412 pr_info("------------------------------------\n");
381 printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index); 413 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
382 printk(KERN_INFO "------------------------------------\n"); 414 pr_info("------------------------------------\n");
383 printk(KERN_INFO "T [desc] [address 63:0 ] " 415 pr_info("T [desc] [address 63:0 ] "
384 "[PlPOIdStDDt Ln] [bi->dma ] " 416 "[PlPOIdStDDt Ln] [bi->dma ] "
385 "leng ntw timestamp bi->skb\n"); 417 "leng ntw timestamp bi->skb\n");
386 418
387 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { 419 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
388 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); 420 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
389 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 421 tx_buffer_info = &tx_ring->tx_buffer_info[i];
390 u0 = (struct my_u0 *)tx_desc; 422 u0 = (struct my_u0 *)tx_desc;
391 printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX" 423 pr_info("T [0x%03X] %016llX %016llX %016llX"
392 " %04X %3X %016llX %p", i, 424 " %04X %3X %016llX %p", i,
393 le64_to_cpu(u0->a), 425 le64_to_cpu(u0->a),
394 le64_to_cpu(u0->b), 426 le64_to_cpu(u0->b),
@@ -399,13 +431,13 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
399 tx_buffer_info->skb); 431 tx_buffer_info->skb);
400 if (i == tx_ring->next_to_use && 432 if (i == tx_ring->next_to_use &&
401 i == tx_ring->next_to_clean) 433 i == tx_ring->next_to_clean)
402 printk(KERN_CONT " NTC/U\n"); 434 pr_cont(" NTC/U\n");
403 else if (i == tx_ring->next_to_use) 435 else if (i == tx_ring->next_to_use)
404 printk(KERN_CONT " NTU\n"); 436 pr_cont(" NTU\n");
405 else if (i == tx_ring->next_to_clean) 437 else if (i == tx_ring->next_to_clean)
406 printk(KERN_CONT " NTC\n"); 438 pr_cont(" NTC\n");
407 else 439 else
408 printk(KERN_CONT "\n"); 440 pr_cont("\n");
409 441
410 if (netif_msg_pktdata(adapter) && 442 if (netif_msg_pktdata(adapter) &&
411 tx_buffer_info->dma != 0) 443 tx_buffer_info->dma != 0)
@@ -419,11 +451,11 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
419 /* Print RX Rings Summary */ 451 /* Print RX Rings Summary */
420rx_ring_summary: 452rx_ring_summary:
421 dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); 453 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
422 printk(KERN_INFO "Queue [NTU] [NTC]\n"); 454 pr_info("Queue [NTU] [NTC]\n");
423 for (n = 0; n < adapter->num_rx_queues; n++) { 455 for (n = 0; n < adapter->num_rx_queues; n++) {
424 rx_ring = adapter->rx_ring[n]; 456 rx_ring = adapter->rx_ring[n];
425 printk(KERN_INFO "%5d %5X %5X\n", n, 457 pr_info("%5d %5X %5X\n",
426 rx_ring->next_to_use, rx_ring->next_to_clean); 458 n, rx_ring->next_to_use, rx_ring->next_to_clean);
427 } 459 }
428 460
429 /* Print RX Rings */ 461 /* Print RX Rings */
@@ -454,30 +486,30 @@ rx_ring_summary:
454 */ 486 */
455 for (n = 0; n < adapter->num_rx_queues; n++) { 487 for (n = 0; n < adapter->num_rx_queues; n++) {
456 rx_ring = adapter->rx_ring[n]; 488 rx_ring = adapter->rx_ring[n];
457 printk(KERN_INFO "------------------------------------\n"); 489 pr_info("------------------------------------\n");
458 printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index); 490 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
459 printk(KERN_INFO "------------------------------------\n"); 491 pr_info("------------------------------------\n");
460 printk(KERN_INFO "R [desc] [ PktBuf A0] " 492 pr_info("R [desc] [ PktBuf A0] "
461 "[ HeadBuf DD] [bi->dma ] [bi->skb] " 493 "[ HeadBuf DD] [bi->dma ] [bi->skb] "
462 "<-- Adv Rx Read format\n"); 494 "<-- Adv Rx Read format\n");
463 printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] " 495 pr_info("RWB[desc] [PcsmIpSHl PtRs] "
464 "[vl er S cks ln] ---------------- [bi->skb] " 496 "[vl er S cks ln] ---------------- [bi->skb] "
465 "<-- Adv Rx Write-Back format\n"); 497 "<-- Adv Rx Write-Back format\n");
466 498
467 for (i = 0; i < rx_ring->count; i++) { 499 for (i = 0; i < rx_ring->count; i++) {
468 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 500 rx_buffer_info = &rx_ring->rx_buffer_info[i];
469 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); 501 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
470 u0 = (struct my_u0 *)rx_desc; 502 u0 = (struct my_u0 *)rx_desc;
471 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 503 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
472 if (staterr & IXGBE_RXD_STAT_DD) { 504 if (staterr & IXGBE_RXD_STAT_DD) {
473 /* Descriptor Done */ 505 /* Descriptor Done */
474 printk(KERN_INFO "RWB[0x%03X] %016llX " 506 pr_info("RWB[0x%03X] %016llX "
475 "%016llX ---------------- %p", i, 507 "%016llX ---------------- %p", i,
476 le64_to_cpu(u0->a), 508 le64_to_cpu(u0->a),
477 le64_to_cpu(u0->b), 509 le64_to_cpu(u0->b),
478 rx_buffer_info->skb); 510 rx_buffer_info->skb);
479 } else { 511 } else {
480 printk(KERN_INFO "R [0x%03X] %016llX " 512 pr_info("R [0x%03X] %016llX "
481 "%016llX %016llX %p", i, 513 "%016llX %016llX %p", i,
482 le64_to_cpu(u0->a), 514 le64_to_cpu(u0->a),
483 le64_to_cpu(u0->b), 515 le64_to_cpu(u0->b),
@@ -503,11 +535,11 @@ rx_ring_summary:
503 } 535 }
504 536
505 if (i == rx_ring->next_to_use) 537 if (i == rx_ring->next_to_use)
506 printk(KERN_CONT " NTU\n"); 538 pr_cont(" NTU\n");
507 else if (i == rx_ring->next_to_clean) 539 else if (i == rx_ring->next_to_clean)
508 printk(KERN_CONT " NTC\n"); 540 pr_cont(" NTC\n");
509 else 541 else
510 printk(KERN_CONT "\n"); 542 pr_cont("\n");
511 543
512 } 544 }
513 } 545 }
@@ -523,7 +555,7 @@ static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
523 /* Let firmware take over control of h/w */ 555 /* Let firmware take over control of h/w */
524 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 556 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
525 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, 557 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
526 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD); 558 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
527} 559}
528 560
529static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) 561static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
@@ -533,7 +565,7 @@ static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
533 /* Let firmware know the driver has taken over */ 565 /* Let firmware know the driver has taken over */
534 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 566 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
535 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, 567 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
536 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); 568 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
537} 569}
538 570
539/* 571/*
@@ -545,7 +577,7 @@ static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
545 * 577 *
546 */ 578 */
547static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, 579static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
548 u8 queue, u8 msix_vector) 580 u8 queue, u8 msix_vector)
549{ 581{
550 u32 ivar, index; 582 u32 ivar, index;
551 struct ixgbe_hw *hw = &adapter->hw; 583 struct ixgbe_hw *hw = &adapter->hw;
@@ -561,6 +593,7 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
561 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 593 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
562 break; 594 break;
563 case ixgbe_mac_82599EB: 595 case ixgbe_mac_82599EB:
596 case ixgbe_mac_X540:
564 if (direction == -1) { 597 if (direction == -1) {
565 /* other causes */ 598 /* other causes */
566 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 599 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
@@ -586,33 +619,38 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
586} 619}
587 620
588static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, 621static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
589 u64 qmask) 622 u64 qmask)
590{ 623{
591 u32 mask; 624 u32 mask;
592 625
593 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 626 switch (adapter->hw.mac.type) {
627 case ixgbe_mac_82598EB:
594 mask = (IXGBE_EIMS_RTX_QUEUE & qmask); 628 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
595 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); 629 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
596 } else { 630 break;
631 case ixgbe_mac_82599EB:
632 case ixgbe_mac_X540:
597 mask = (qmask & 0xFFFFFFFF); 633 mask = (qmask & 0xFFFFFFFF);
598 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); 634 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
599 mask = (qmask >> 32); 635 mask = (qmask >> 32);
600 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask); 636 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
637 break;
638 default:
639 break;
601 } 640 }
602} 641}
603 642
604static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, 643void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
605 struct ixgbe_tx_buffer 644 struct ixgbe_tx_buffer *tx_buffer_info)
606 *tx_buffer_info)
607{ 645{
608 if (tx_buffer_info->dma) { 646 if (tx_buffer_info->dma) {
609 if (tx_buffer_info->mapped_as_page) 647 if (tx_buffer_info->mapped_as_page)
610 dma_unmap_page(&adapter->pdev->dev, 648 dma_unmap_page(tx_ring->dev,
611 tx_buffer_info->dma, 649 tx_buffer_info->dma,
612 tx_buffer_info->length, 650 tx_buffer_info->length,
613 DMA_TO_DEVICE); 651 DMA_TO_DEVICE);
614 else 652 else
615 dma_unmap_single(&adapter->pdev->dev, 653 dma_unmap_single(tx_ring->dev,
616 tx_buffer_info->dma, 654 tx_buffer_info->dma,
617 tx_buffer_info->length, 655 tx_buffer_info->length,
618 DMA_TO_DEVICE); 656 DMA_TO_DEVICE);
@@ -627,92 +665,166 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
627} 665}
628 666
629/** 667/**
630 * ixgbe_tx_xon_state - check the tx ring xon state 668 * ixgbe_dcb_txq_to_tc - convert a reg index to a traffic class
631 * @adapter: the ixgbe adapter 669 * @adapter: driver private struct
632 * @tx_ring: the corresponding tx_ring 670 * @index: reg idx of queue to query (0-127)
633 * 671 *
634 * If not in DCB mode, checks TFCS.TXOFF, otherwise, find out the 672 * Helper function to determine the traffic index for a particular
635 * corresponding TC of this tx_ring when checking TFCS. 673 * register index.
636 * 674 *
637 * Returns : true if in xon state (currently not paused) 675 * Returns : a tc index for use in range 0-7, or 0-3
638 */ 676 */
639static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter, 677static u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx)
640 struct ixgbe_ring *tx_ring)
641{ 678{
642 u32 txoff = IXGBE_TFCS_TXOFF; 679 int tc = -1;
680 int dcb_i = netdev_get_num_tc(adapter->netdev);
643 681
644#ifdef CONFIG_IXGBE_DCB 682 /* if DCB is not enabled the queues have no TC */
645 if (adapter->dcb_cfg.pfc_mode_enable) { 683 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
646 int tc; 684 return tc;
647 int reg_idx = tx_ring->reg_idx;
648 int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
649 685
650 switch (adapter->hw.mac.type) { 686 /* check valid range */
687 if (reg_idx >= adapter->hw.mac.max_tx_queues)
688 return tc;
689
690 switch (adapter->hw.mac.type) {
691 case ixgbe_mac_82598EB:
692 tc = reg_idx >> 2;
693 break;
694 default:
695 if (dcb_i != 4 && dcb_i != 8)
696 break;
697
698 /* if VMDq is enabled the lowest order bits determine TC */
699 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
700 IXGBE_FLAG_VMDQ_ENABLED)) {
701 tc = reg_idx & (dcb_i - 1);
702 break;
703 }
704
705 /*
706 * Convert the reg_idx into the correct TC. This bitmask
707 * targets the last full 32 ring traffic class and assigns
708 * it a value of 1. From there the rest of the rings are
709 * based on shifting the mask further up to include the
710 * reg_idx / 16 and then reg_idx / 8. It assumes dcB_i
711 * will only ever be 8 or 4 and that reg_idx will never
712 * be greater then 128. The code without the power of 2
713 * optimizations would be:
714 * (((reg_idx % 32) + 32) * dcb_i) >> (9 - reg_idx / 32)
715 */
716 tc = ((reg_idx & 0X1F) + 0x20) * dcb_i;
717 tc >>= 9 - (reg_idx >> 5);
718 }
719
720 return tc;
721}
722
723static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
724{
725 struct ixgbe_hw *hw = &adapter->hw;
726 struct ixgbe_hw_stats *hwstats = &adapter->stats;
727 u32 data = 0;
728 u32 xoff[8] = {0};
729 int i;
730
731 if ((hw->fc.current_mode == ixgbe_fc_full) ||
732 (hw->fc.current_mode == ixgbe_fc_rx_pause)) {
733 switch (hw->mac.type) {
651 case ixgbe_mac_82598EB: 734 case ixgbe_mac_82598EB:
652 tc = reg_idx >> 2; 735 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
653 txoff = IXGBE_TFCS_TXOFF0;
654 break; 736 break;
655 case ixgbe_mac_82599EB: 737 default:
656 tc = 0; 738 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
657 txoff = IXGBE_TFCS_TXOFF; 739 }
658 if (dcb_i == 8) { 740 hwstats->lxoffrxc += data;
659 /* TC0, TC1 */ 741
660 tc = reg_idx >> 5; 742 /* refill credits (no tx hang) if we received xoff */
661 if (tc == 2) /* TC2, TC3 */ 743 if (!data)
662 tc += (reg_idx - 64) >> 4; 744 return;
663 else if (tc == 3) /* TC4, TC5, TC6, TC7 */ 745
664 tc += 1 + ((reg_idx - 96) >> 3); 746 for (i = 0; i < adapter->num_tx_queues; i++)
665 } else if (dcb_i == 4) { 747 clear_bit(__IXGBE_HANG_CHECK_ARMED,
666 /* TC0, TC1 */ 748 &adapter->tx_ring[i]->state);
667 tc = reg_idx >> 6; 749 return;
668 if (tc == 1) { 750 } else if (!(adapter->dcb_cfg.pfc_mode_enable))
669 tc += (reg_idx - 64) >> 5; 751 return;
670 if (tc == 2) /* TC2, TC3 */ 752
671 tc += (reg_idx - 96) >> 4; 753 /* update stats for each tc, only valid with PFC enabled */
672 } 754 for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
673 } 755 switch (hw->mac.type) {
756 case ixgbe_mac_82598EB:
757 xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
674 break; 758 break;
675 default: 759 default:
676 tc = 0; 760 xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
677 } 761 }
678 txoff <<= tc; 762 hwstats->pxoffrxc[i] += xoff[i];
763 }
764
765 /* disarm tx queues that have received xoff frames */
766 for (i = 0; i < adapter->num_tx_queues; i++) {
767 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
768 u32 tc = ixgbe_dcb_txq_to_tc(adapter, tx_ring->reg_idx);
769
770 if (xoff[tc])
771 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
679 } 772 }
680#endif
681 return IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & txoff;
682} 773}
683 774
684static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, 775static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
685 struct ixgbe_ring *tx_ring,
686 unsigned int eop)
687{ 776{
777 return ring->tx_stats.completed;
778}
779
780static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
781{
782 struct ixgbe_adapter *adapter = netdev_priv(ring->netdev);
688 struct ixgbe_hw *hw = &adapter->hw; 783 struct ixgbe_hw *hw = &adapter->hw;
689 784
690 /* Detect a transmit hang in hardware, this serializes the 785 u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
691 * check with the clearing of time_stamp and movement of eop */ 786 u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
692 adapter->detect_tx_hung = false; 787
693 if (tx_ring->tx_buffer_info[eop].time_stamp && 788 if (head != tail)
694 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) && 789 return (head < tail) ?
695 ixgbe_tx_xon_state(adapter, tx_ring)) { 790 tail - head : (tail + ring->count - head);
696 /* detected Tx unit hang */ 791
697 union ixgbe_adv_tx_desc *tx_desc; 792 return 0;
698 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); 793}
699 e_err(drv, "Detected Tx Unit Hang\n" 794
700 " Tx Queue <%d>\n" 795static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
701 " TDH, TDT <%x>, <%x>\n" 796{
702 " next_to_use <%x>\n" 797 u32 tx_done = ixgbe_get_tx_completed(tx_ring);
703 " next_to_clean <%x>\n" 798 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
704 "tx_buffer_info[next_to_clean]\n" 799 u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
705 " time_stamp <%lx>\n" 800 bool ret = false;
706 " jiffies <%lx>\n", 801
707 tx_ring->queue_index, 802 clear_check_for_tx_hang(tx_ring);
708 IXGBE_READ_REG(hw, tx_ring->head), 803
709 IXGBE_READ_REG(hw, tx_ring->tail), 804 /*
710 tx_ring->next_to_use, eop, 805 * Check for a hung queue, but be thorough. This verifies
711 tx_ring->tx_buffer_info[eop].time_stamp, jiffies); 806 * that a transmit has been completed since the previous
712 return true; 807 * check AND there is at least one packet pending. The
808 * ARMED bit is set to indicate a potential hang. The
809 * bit is cleared if a pause frame is received to remove
810 * false hang detection due to PFC or 802.3x frames. By
811 * requiring this to fail twice we avoid races with
812 * pfc clearing the ARMED bit and conditions where we
813 * run the check_tx_hang logic with a transmit completion
814 * pending but without time to complete it yet.
815 */
816 if ((tx_done_old == tx_done) && tx_pending) {
817 /* make sure it is true for two checks in a row */
818 ret = test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
819 &tx_ring->state);
820 } else {
821 /* update completed stats and continue */
822 tx_ring->tx_stats.tx_done_old = tx_done;
823 /* reset the countdown */
824 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
713 } 825 }
714 826
715 return false; 827 return ret;
716} 828}
717 829
718#define IXGBE_MAX_TXD_PWR 14 830#define IXGBE_MAX_TXD_PWR 14
@@ -724,7 +836,19 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
724#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \ 836#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
725 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */ 837 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
726 838
727static void ixgbe_tx_timeout(struct net_device *netdev); 839/**
840 * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout
841 * @adapter: driver private struct
842 **/
843static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
844{
845
846 /* Do the reset outside of interrupt context */
847 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
848 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
849 ixgbe_service_event_schedule(adapter);
850 }
851}
728 852
729/** 853/**
730 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes 854 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
@@ -732,163 +856,195 @@ static void ixgbe_tx_timeout(struct net_device *netdev);
732 * @tx_ring: tx ring to clean 856 * @tx_ring: tx ring to clean
733 **/ 857 **/
734static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, 858static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
735 struct ixgbe_ring *tx_ring) 859 struct ixgbe_ring *tx_ring)
736{ 860{
737 struct ixgbe_adapter *adapter = q_vector->adapter; 861 struct ixgbe_adapter *adapter = q_vector->adapter;
738 struct net_device *netdev = adapter->netdev;
739 union ixgbe_adv_tx_desc *tx_desc, *eop_desc; 862 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
740 struct ixgbe_tx_buffer *tx_buffer_info; 863 struct ixgbe_tx_buffer *tx_buffer_info;
741 unsigned int i, eop, count = 0;
742 unsigned int total_bytes = 0, total_packets = 0; 864 unsigned int total_bytes = 0, total_packets = 0;
865 u16 i, eop, count = 0;
743 866
744 i = tx_ring->next_to_clean; 867 i = tx_ring->next_to_clean;
745 eop = tx_ring->tx_buffer_info[i].next_to_watch; 868 eop = tx_ring->tx_buffer_info[i].next_to_watch;
746 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); 869 eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
747 870
748 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && 871 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
749 (count < tx_ring->work_limit)) { 872 (count < tx_ring->work_limit)) {
750 bool cleaned = false; 873 bool cleaned = false;
751 rmb(); /* read buffer_info after eop_desc */ 874 rmb(); /* read buffer_info after eop_desc */
752 for ( ; !cleaned; count++) { 875 for ( ; !cleaned; count++) {
753 struct sk_buff *skb; 876 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
754 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
755 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 877 tx_buffer_info = &tx_ring->tx_buffer_info[i];
756 cleaned = (i == eop);
757 skb = tx_buffer_info->skb;
758
759 if (cleaned && skb) {
760 unsigned int segs, bytecount;
761 unsigned int hlen = skb_headlen(skb);
762
763 /* gso_segs is currently only valid for tcp */
764 segs = skb_shinfo(skb)->gso_segs ?: 1;
765#ifdef IXGBE_FCOE
766 /* adjust for FCoE Sequence Offload */
767 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
768 && (skb->protocol == htons(ETH_P_FCOE)) &&
769 skb_is_gso(skb)) {
770 hlen = skb_transport_offset(skb) +
771 sizeof(struct fc_frame_header) +
772 sizeof(struct fcoe_crc_eof);
773 segs = DIV_ROUND_UP(skb->len - hlen,
774 skb_shinfo(skb)->gso_size);
775 }
776#endif /* IXGBE_FCOE */
777 /* multiply data chunks by size of headers */
778 bytecount = ((segs - 1) * hlen) + skb->len;
779 total_packets += segs;
780 total_bytes += bytecount;
781 }
782
783 ixgbe_unmap_and_free_tx_resource(adapter,
784 tx_buffer_info);
785 878
786 tx_desc->wb.status = 0; 879 tx_desc->wb.status = 0;
880 cleaned = (i == eop);
787 881
788 i++; 882 i++;
789 if (i == tx_ring->count) 883 if (i == tx_ring->count)
790 i = 0; 884 i = 0;
885
886 if (cleaned && tx_buffer_info->skb) {
887 total_bytes += tx_buffer_info->bytecount;
888 total_packets += tx_buffer_info->gso_segs;
889 }
890
891 ixgbe_unmap_and_free_tx_resource(tx_ring,
892 tx_buffer_info);
791 } 893 }
792 894
895 tx_ring->tx_stats.completed++;
793 eop = tx_ring->tx_buffer_info[i].next_to_watch; 896 eop = tx_ring->tx_buffer_info[i].next_to_watch;
794 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); 897 eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
795 } 898 }
796 899
797 tx_ring->next_to_clean = i; 900 tx_ring->next_to_clean = i;
901 tx_ring->total_bytes += total_bytes;
902 tx_ring->total_packets += total_packets;
903 u64_stats_update_begin(&tx_ring->syncp);
904 tx_ring->stats.packets += total_packets;
905 tx_ring->stats.bytes += total_bytes;
906 u64_stats_update_end(&tx_ring->syncp);
907
908 if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
909 /* schedule immediate reset if we believe we hung */
910 struct ixgbe_hw *hw = &adapter->hw;
911 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
912 e_err(drv, "Detected Tx Unit Hang\n"
913 " Tx Queue <%d>\n"
914 " TDH, TDT <%x>, <%x>\n"
915 " next_to_use <%x>\n"
916 " next_to_clean <%x>\n"
917 "tx_buffer_info[next_to_clean]\n"
918 " time_stamp <%lx>\n"
919 " jiffies <%lx>\n",
920 tx_ring->queue_index,
921 IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
922 IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
923 tx_ring->next_to_use, eop,
924 tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
925
926 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
927
928 e_info(probe,
929 "tx hang %d detected on queue %d, resetting adapter\n",
930 adapter->tx_timeout_count + 1, tx_ring->queue_index);
931
932 /* schedule immediate reset if we believe we hung */
933 ixgbe_tx_timeout_reset(adapter);
934
935 /* the adapter is about to reset, no point in enabling stuff */
936 return true;
937 }
798 938
799#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 939#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
800 if (unlikely(count && netif_carrier_ok(netdev) && 940 if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
801 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 941 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
802 /* Make sure that anybody stopping the queue after this 942 /* Make sure that anybody stopping the queue after this
803 * sees the new next_to_clean. 943 * sees the new next_to_clean.
804 */ 944 */
805 smp_mb(); 945 smp_mb();
806 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && 946 if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) &&
807 !test_bit(__IXGBE_DOWN, &adapter->state)) { 947 !test_bit(__IXGBE_DOWN, &adapter->state)) {
808 netif_wake_subqueue(netdev, tx_ring->queue_index); 948 netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index);
809 ++tx_ring->restart_queue; 949 ++tx_ring->tx_stats.restart_queue;
810 } 950 }
811 } 951 }
812 952
813 if (adapter->detect_tx_hung) { 953 return count < tx_ring->work_limit;
814 if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
815 /* schedule immediate reset if we believe we hung */
816 e_info(probe, "tx hang %d detected, resetting "
817 "adapter\n", adapter->tx_timeout_count + 1);
818 ixgbe_tx_timeout(adapter->netdev);
819 }
820 }
821
822 /* re-arm the interrupt */
823 if (count >= tx_ring->work_limit)
824 ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx));
825
826 tx_ring->total_bytes += total_bytes;
827 tx_ring->total_packets += total_packets;
828 tx_ring->stats.packets += total_packets;
829 tx_ring->stats.bytes += total_bytes;
830 return (count < tx_ring->work_limit);
831} 954}
832 955
833#ifdef CONFIG_IXGBE_DCA 956#ifdef CONFIG_IXGBE_DCA
834static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, 957static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
835 struct ixgbe_ring *rx_ring) 958 struct ixgbe_ring *rx_ring,
959 int cpu)
836{ 960{
961 struct ixgbe_hw *hw = &adapter->hw;
837 u32 rxctrl; 962 u32 rxctrl;
838 int cpu = get_cpu(); 963 u8 reg_idx = rx_ring->reg_idx;
839 int q = rx_ring->reg_idx; 964
840 965 rxctrl = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(reg_idx));
841 if (rx_ring->cpu != cpu) { 966 switch (hw->mac.type) {
842 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q)); 967 case ixgbe_mac_82598EB:
843 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 968 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
844 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK; 969 rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
845 rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); 970 break;
846 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 971 case ixgbe_mac_82599EB:
847 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599; 972 case ixgbe_mac_X540:
848 rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << 973 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
849 IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599); 974 rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
850 } 975 IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
851 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN; 976 break;
852 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN; 977 default:
853 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN); 978 break;
854 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
855 IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
856 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
857 rx_ring->cpu = cpu;
858 } 979 }
859 put_cpu(); 980 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
981 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
982 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
983 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
860} 984}
861 985
862static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, 986static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
863 struct ixgbe_ring *tx_ring) 987 struct ixgbe_ring *tx_ring,
988 int cpu)
864{ 989{
990 struct ixgbe_hw *hw = &adapter->hw;
865 u32 txctrl; 991 u32 txctrl;
992 u8 reg_idx = tx_ring->reg_idx;
993
994 switch (hw->mac.type) {
995 case ixgbe_mac_82598EB:
996 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(reg_idx));
997 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
998 txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
999 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
1000 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl);
1001 break;
1002 case ixgbe_mac_82599EB:
1003 case ixgbe_mac_X540:
1004 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx));
1005 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
1006 txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
1007 IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
1008 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
1009 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl);
1010 break;
1011 default:
1012 break;
1013 }
1014}
1015
1016static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
1017{
1018 struct ixgbe_adapter *adapter = q_vector->adapter;
866 int cpu = get_cpu(); 1019 int cpu = get_cpu();
867 int q = tx_ring->reg_idx; 1020 long r_idx;
868 struct ixgbe_hw *hw = &adapter->hw; 1021 int i;
869 1022
870 if (tx_ring->cpu != cpu) { 1023 if (q_vector->cpu == cpu)
871 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 1024 goto out_no_update;
872 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(q)); 1025
873 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK; 1026 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
874 txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); 1027 for (i = 0; i < q_vector->txr_count; i++) {
875 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; 1028 ixgbe_update_tx_dca(adapter, adapter->tx_ring[r_idx], cpu);
876 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(q), txctrl); 1029 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
877 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 1030 r_idx + 1);
878 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(q));
879 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
880 txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
881 IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
882 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
883 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(q), txctrl);
884 }
885 tx_ring->cpu = cpu;
886 } 1031 }
1032
1033 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1034 for (i = 0; i < q_vector->rxr_count; i++) {
1035 ixgbe_update_rx_dca(adapter, adapter->rx_ring[r_idx], cpu);
1036 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1037 r_idx + 1);
1038 }
1039
1040 q_vector->cpu = cpu;
1041out_no_update:
887 put_cpu(); 1042 put_cpu();
888} 1043}
889 1044
890static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) 1045static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
891{ 1046{
1047 int num_q_vectors;
892 int i; 1048 int i;
893 1049
894 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED)) 1050 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
@@ -897,22 +1053,25 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
897 /* always use CB2 mode, difference is masked in the CB driver */ 1053 /* always use CB2 mode, difference is masked in the CB driver */
898 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); 1054 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
899 1055
900 for (i = 0; i < adapter->num_tx_queues; i++) { 1056 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
901 adapter->tx_ring[i]->cpu = -1; 1057 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
902 ixgbe_update_tx_dca(adapter, adapter->tx_ring[i]); 1058 else
903 } 1059 num_q_vectors = 1;
904 for (i = 0; i < adapter->num_rx_queues; i++) { 1060
905 adapter->rx_ring[i]->cpu = -1; 1061 for (i = 0; i < num_q_vectors; i++) {
906 ixgbe_update_rx_dca(adapter, adapter->rx_ring[i]); 1062 adapter->q_vector[i]->cpu = -1;
1063 ixgbe_update_dca(adapter->q_vector[i]);
907 } 1064 }
908} 1065}
909 1066
910static int __ixgbe_notify_dca(struct device *dev, void *data) 1067static int __ixgbe_notify_dca(struct device *dev, void *data)
911{ 1068{
912 struct net_device *netdev = dev_get_drvdata(dev); 1069 struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
913 struct ixgbe_adapter *adapter = netdev_priv(netdev);
914 unsigned long event = *(unsigned long *)data; 1070 unsigned long event = *(unsigned long *)data;
915 1071
1072 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
1073 return 0;
1074
916 switch (event) { 1075 switch (event) {
917 case DCA_PROVIDER_ADD: 1076 case DCA_PROVIDER_ADD:
918 /* if we're already enabled, don't do it again */ 1077 /* if we're already enabled, don't do it again */
@@ -935,8 +1094,14 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
935 1094
936 return 0; 1095 return 0;
937} 1096}
938
939#endif /* CONFIG_IXGBE_DCA */ 1097#endif /* CONFIG_IXGBE_DCA */
1098
1099static inline void ixgbe_rx_hash(union ixgbe_adv_rx_desc *rx_desc,
1100 struct sk_buff *skb)
1101{
1102 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
1103}
1104
940/** 1105/**
941 * ixgbe_receive_skb - Send a completed packet up the stack 1106 * ixgbe_receive_skb - Send a completed packet up the stack
942 * @adapter: board private structure 1107 * @adapter: board private structure
@@ -946,27 +1111,22 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
946 * @rx_desc: rx descriptor 1111 * @rx_desc: rx descriptor
947 **/ 1112 **/
948static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector, 1113static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
949 struct sk_buff *skb, u8 status, 1114 struct sk_buff *skb, u8 status,
950 struct ixgbe_ring *ring, 1115 struct ixgbe_ring *ring,
951 union ixgbe_adv_rx_desc *rx_desc) 1116 union ixgbe_adv_rx_desc *rx_desc)
952{ 1117{
953 struct ixgbe_adapter *adapter = q_vector->adapter; 1118 struct ixgbe_adapter *adapter = q_vector->adapter;
954 struct napi_struct *napi = &q_vector->napi; 1119 struct napi_struct *napi = &q_vector->napi;
955 bool is_vlan = (status & IXGBE_RXD_STAT_VP); 1120 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
956 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); 1121 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
957 1122
958 skb_record_rx_queue(skb, ring->queue_index); 1123 if (is_vlan && (tag & VLAN_VID_MASK))
959 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) { 1124 __vlan_hwaccel_put_tag(skb, tag);
960 if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK)) 1125
961 vlan_gro_receive(napi, adapter->vlgrp, tag, skb); 1126 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
962 else 1127 napi_gro_receive(napi, skb);
963 napi_gro_receive(napi, skb); 1128 else
964 } else { 1129 netif_rx(skb);
965 if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK))
966 vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
967 else
968 netif_rx(skb);
969 }
970} 1130}
971 1131
972/** 1132/**
@@ -981,7 +1141,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
981{ 1141{
982 u32 status_err = le32_to_cpu(rx_desc->wb.upper.status_error); 1142 u32 status_err = le32_to_cpu(rx_desc->wb.upper.status_error);
983 1143
984 skb->ip_summed = CHECKSUM_NONE; 1144 skb_checksum_none_assert(skb);
985 1145
986 /* Rx csum disabled */ 1146 /* Rx csum disabled */
987 if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED)) 1147 if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
@@ -1016,8 +1176,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
1016 skb->ip_summed = CHECKSUM_UNNECESSARY; 1176 skb->ip_summed = CHECKSUM_UNNECESSARY;
1017} 1177}
1018 1178
1019static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw, 1179static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
1020 struct ixgbe_ring *rx_ring, u32 val)
1021{ 1180{
1022 /* 1181 /*
1023 * Force memory writes to complete before letting h/w 1182 * Force memory writes to complete before letting h/w
@@ -1026,130 +1185,133 @@ static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
1026 * such as IA-64). 1185 * such as IA-64).
1027 */ 1186 */
1028 wmb(); 1187 wmb();
1029 IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->reg_idx), val); 1188 writel(val, rx_ring->tail);
1030} 1189}
1031 1190
1032/** 1191/**
1033 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split 1192 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
1034 * @adapter: address of board private structure 1193 * @rx_ring: ring to place buffers on
1194 * @cleaned_count: number of buffers to replace
1035 **/ 1195 **/
1036static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, 1196void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
1037 struct ixgbe_ring *rx_ring,
1038 int cleaned_count)
1039{ 1197{
1040 struct pci_dev *pdev = adapter->pdev;
1041 union ixgbe_adv_rx_desc *rx_desc; 1198 union ixgbe_adv_rx_desc *rx_desc;
1042 struct ixgbe_rx_buffer *bi; 1199 struct ixgbe_rx_buffer *bi;
1043 unsigned int i; 1200 struct sk_buff *skb;
1201 u16 i = rx_ring->next_to_use;
1044 1202
1045 i = rx_ring->next_to_use; 1203 /* do nothing if no valid netdev defined */
1046 bi = &rx_ring->rx_buffer_info[i]; 1204 if (!rx_ring->netdev)
1205 return;
1047 1206
1048 while (cleaned_count--) { 1207 while (cleaned_count--) {
1049 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); 1208 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
1209 bi = &rx_ring->rx_buffer_info[i];
1210 skb = bi->skb;
1211
1212 if (!skb) {
1213 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1214 rx_ring->rx_buf_len);
1215 if (!skb) {
1216 rx_ring->rx_stats.alloc_rx_buff_failed++;
1217 goto no_buffers;
1218 }
1219 /* initialize queue mapping */
1220 skb_record_rx_queue(skb, rx_ring->queue_index);
1221 bi->skb = skb;
1222 }
1050 1223
1051 if (!bi->page_dma && 1224 if (!bi->dma) {
1052 (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) { 1225 bi->dma = dma_map_single(rx_ring->dev,
1226 skb->data,
1227 rx_ring->rx_buf_len,
1228 DMA_FROM_DEVICE);
1229 if (dma_mapping_error(rx_ring->dev, bi->dma)) {
1230 rx_ring->rx_stats.alloc_rx_buff_failed++;
1231 bi->dma = 0;
1232 goto no_buffers;
1233 }
1234 }
1235
1236 if (ring_is_ps_enabled(rx_ring)) {
1053 if (!bi->page) { 1237 if (!bi->page) {
1054 bi->page = alloc_page(GFP_ATOMIC); 1238 bi->page = netdev_alloc_page(rx_ring->netdev);
1055 if (!bi->page) { 1239 if (!bi->page) {
1056 adapter->alloc_rx_page_failed++; 1240 rx_ring->rx_stats.alloc_rx_page_failed++;
1057 goto no_buffers; 1241 goto no_buffers;
1058 } 1242 }
1059 bi->page_offset = 0;
1060 } else {
1061 /* use a half page if we're re-using */
1062 bi->page_offset ^= (PAGE_SIZE / 2);
1063 } 1243 }
1064 1244
1065 bi->page_dma = dma_map_page(&pdev->dev, bi->page, 1245 if (!bi->page_dma) {
1066 bi->page_offset, 1246 /* use a half page if we're re-using */
1067 (PAGE_SIZE / 2), 1247 bi->page_offset ^= PAGE_SIZE / 2;
1068 DMA_FROM_DEVICE); 1248 bi->page_dma = dma_map_page(rx_ring->dev,
1069 } 1249 bi->page,
1070 1250 bi->page_offset,
1071 if (!bi->skb) { 1251 PAGE_SIZE / 2,
1072 struct sk_buff *skb; 1252 DMA_FROM_DEVICE);
1073 /* netdev_alloc_skb reserves 32 bytes up front!! */ 1253 if (dma_mapping_error(rx_ring->dev,
1074 uint bufsz = rx_ring->rx_buf_len + SMP_CACHE_BYTES; 1254 bi->page_dma)) {
1075 skb = netdev_alloc_skb(adapter->netdev, bufsz); 1255 rx_ring->rx_stats.alloc_rx_page_failed++;
1076 1256 bi->page_dma = 0;
1077 if (!skb) { 1257 goto no_buffers;
1078 adapter->alloc_rx_buff_failed++; 1258 }
1079 goto no_buffers;
1080 } 1259 }
1081 1260
1082 /* advance the data pointer to the next cache line */ 1261 /* Refresh the desc even if buffer_addrs didn't change
1083 skb_reserve(skb, (PTR_ALIGN(skb->data, SMP_CACHE_BYTES) 1262 * because each write-back erases this info. */
1084 - skb->data));
1085
1086 bi->skb = skb;
1087 bi->dma = dma_map_single(&pdev->dev, skb->data,
1088 rx_ring->rx_buf_len,
1089 DMA_FROM_DEVICE);
1090 }
1091 /* Refresh the desc even if buffer_addrs didn't change because
1092 * each write-back erases this info. */
1093 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
1094 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); 1263 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
1095 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); 1264 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
1096 } else { 1265 } else {
1097 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); 1266 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
1267 rx_desc->read.hdr_addr = 0;
1098 } 1268 }
1099 1269
1100 i++; 1270 i++;
1101 if (i == rx_ring->count) 1271 if (i == rx_ring->count)
1102 i = 0; 1272 i = 0;
1103 bi = &rx_ring->rx_buffer_info[i];
1104 } 1273 }
1105 1274
1106no_buffers: 1275no_buffers:
1107 if (rx_ring->next_to_use != i) { 1276 if (rx_ring->next_to_use != i) {
1108 rx_ring->next_to_use = i; 1277 rx_ring->next_to_use = i;
1109 if (i-- == 0) 1278 ixgbe_release_rx_desc(rx_ring, i);
1110 i = (rx_ring->count - 1);
1111
1112 ixgbe_release_rx_desc(&adapter->hw, rx_ring, i);
1113 } 1279 }
1114} 1280}
1115 1281
1116static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc) 1282static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc)
1117{
1118 return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
1119}
1120
1121static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
1122{ 1283{
1123 return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; 1284 /* HW will not DMA in data larger than the given buffer, even if it
1124} 1285 * parses the (NFS, of course) header to be larger. In that case, it
1125 1286 * fills the header buffer and spills the rest into the page.
1126static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc) 1287 */
1127{ 1288 u16 hdr_info = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info);
1128 return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) & 1289 u16 hlen = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
1129 IXGBE_RXDADV_RSCCNT_MASK) >> 1290 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
1130 IXGBE_RXDADV_RSCCNT_SHIFT; 1291 if (hlen > IXGBE_RX_HDR_SIZE)
1292 hlen = IXGBE_RX_HDR_SIZE;
1293 return hlen;
1131} 1294}
1132 1295
1133/** 1296/**
1134 * ixgbe_transform_rsc_queue - change rsc queue into a full packet 1297 * ixgbe_transform_rsc_queue - change rsc queue into a full packet
1135 * @skb: pointer to the last skb in the rsc queue 1298 * @skb: pointer to the last skb in the rsc queue
1136 * @count: pointer to number of packets coalesced in this context
1137 * 1299 *
1138 * This function changes a queue full of hw rsc buffers into a completed 1300 * This function changes a queue full of hw rsc buffers into a completed
1139 * packet. It uses the ->prev pointers to find the first packet and then 1301 * packet. It uses the ->prev pointers to find the first packet and then
1140 * turns it into the frag list owner. 1302 * turns it into the frag list owner.
1141 **/ 1303 **/
1142static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb, 1304static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
1143 u64 *count)
1144{ 1305{
1145 unsigned int frag_list_size = 0; 1306 unsigned int frag_list_size = 0;
1307 unsigned int skb_cnt = 1;
1146 1308
1147 while (skb->prev) { 1309 while (skb->prev) {
1148 struct sk_buff *prev = skb->prev; 1310 struct sk_buff *prev = skb->prev;
1149 frag_list_size += skb->len; 1311 frag_list_size += skb->len;
1150 skb->prev = NULL; 1312 skb->prev = NULL;
1151 skb = prev; 1313 skb = prev;
1152 *count += 1; 1314 skb_cnt++;
1153 } 1315 }
1154 1316
1155 skb_shinfo(skb)->frag_list = skb->next; 1317 skb_shinfo(skb)->frag_list = skb->next;
@@ -1157,69 +1319,59 @@ static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
1157 skb->len += frag_list_size; 1319 skb->len += frag_list_size;
1158 skb->data_len += frag_list_size; 1320 skb->data_len += frag_list_size;
1159 skb->truesize += frag_list_size; 1321 skb->truesize += frag_list_size;
1322 IXGBE_RSC_CB(skb)->skb_cnt = skb_cnt;
1323
1160 return skb; 1324 return skb;
1161} 1325}
1162 1326
1163struct ixgbe_rsc_cb { 1327static inline bool ixgbe_get_rsc_state(union ixgbe_adv_rx_desc *rx_desc)
1164 dma_addr_t dma; 1328{
1165 bool delay_unmap; 1329 return !!(le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
1166}; 1330 IXGBE_RXDADV_RSCCNT_MASK);
1167 1331}
1168#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
1169 1332
1170static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, 1333static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1171 struct ixgbe_ring *rx_ring, 1334 struct ixgbe_ring *rx_ring,
1172 int *work_done, int work_to_do) 1335 int *work_done, int work_to_do)
1173{ 1336{
1174 struct ixgbe_adapter *adapter = q_vector->adapter; 1337 struct ixgbe_adapter *adapter = q_vector->adapter;
1175 struct net_device *netdev = adapter->netdev;
1176 struct pci_dev *pdev = adapter->pdev;
1177 union ixgbe_adv_rx_desc *rx_desc, *next_rxd; 1338 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
1178 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; 1339 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
1179 struct sk_buff *skb; 1340 struct sk_buff *skb;
1180 unsigned int i, rsc_count = 0;
1181 u32 len, staterr;
1182 u16 hdr_info;
1183 bool cleaned = false;
1184 int cleaned_count = 0;
1185 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 1341 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1342 const int current_node = numa_node_id();
1186#ifdef IXGBE_FCOE 1343#ifdef IXGBE_FCOE
1187 int ddp_bytes = 0; 1344 int ddp_bytes = 0;
1188#endif /* IXGBE_FCOE */ 1345#endif /* IXGBE_FCOE */
1346 u32 staterr;
1347 u16 i;
1348 u16 cleaned_count = 0;
1349 bool pkt_is_rsc = false;
1189 1350
1190 i = rx_ring->next_to_clean; 1351 i = rx_ring->next_to_clean;
1191 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); 1352 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
1192 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 1353 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1193 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1194 1354
1195 while (staterr & IXGBE_RXD_STAT_DD) { 1355 while (staterr & IXGBE_RXD_STAT_DD) {
1196 u32 upper_len = 0; 1356 u32 upper_len = 0;
1197 if (*work_done >= work_to_do)
1198 break;
1199 (*work_done)++;
1200 1357
1201 rmb(); /* read descriptor and rx_buffer_info after status DD */ 1358 rmb(); /* read descriptor and rx_buffer_info after status DD */
1202 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
1203 hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
1204 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
1205 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
1206 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
1207 if ((len > IXGBE_RX_HDR_SIZE) ||
1208 (upper_len && !(hdr_info & IXGBE_RXDADV_SPH)))
1209 len = IXGBE_RX_HDR_SIZE;
1210 } else {
1211 len = le16_to_cpu(rx_desc->wb.upper.length);
1212 }
1213 1359
1214 cleaned = true; 1360 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1361
1215 skb = rx_buffer_info->skb; 1362 skb = rx_buffer_info->skb;
1216 prefetch(skb->data);
1217 rx_buffer_info->skb = NULL; 1363 rx_buffer_info->skb = NULL;
1364 prefetch(skb->data);
1365
1366 if (ring_is_rsc_enabled(rx_ring))
1367 pkt_is_rsc = ixgbe_get_rsc_state(rx_desc);
1218 1368
1369 /* if this is a skb from previous receive DMA will be 0 */
1219 if (rx_buffer_info->dma) { 1370 if (rx_buffer_info->dma) {
1220 if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && 1371 u16 hlen;
1221 (!(staterr & IXGBE_RXD_STAT_EOP)) && 1372 if (pkt_is_rsc &&
1222 (!(skb->prev))) { 1373 !(staterr & IXGBE_RXD_STAT_EOP) &&
1374 !skb->prev) {
1223 /* 1375 /*
1224 * When HWRSC is enabled, delay unmapping 1376 * When HWRSC is enabled, delay unmapping
1225 * of the first packet. It carries the 1377 * of the first packet. It carries the
@@ -1230,29 +1382,42 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1230 IXGBE_RSC_CB(skb)->delay_unmap = true; 1382 IXGBE_RSC_CB(skb)->delay_unmap = true;
1231 IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma; 1383 IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
1232 } else { 1384 } else {
1233 dma_unmap_single(&pdev->dev, 1385 dma_unmap_single(rx_ring->dev,
1234 rx_buffer_info->dma, 1386 rx_buffer_info->dma,
1235 rx_ring->rx_buf_len, 1387 rx_ring->rx_buf_len,
1236 DMA_FROM_DEVICE); 1388 DMA_FROM_DEVICE);
1237 } 1389 }
1238 rx_buffer_info->dma = 0; 1390 rx_buffer_info->dma = 0;
1239 skb_put(skb, len); 1391
1392 if (ring_is_ps_enabled(rx_ring)) {
1393 hlen = ixgbe_get_hlen(rx_desc);
1394 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
1395 } else {
1396 hlen = le16_to_cpu(rx_desc->wb.upper.length);
1397 }
1398
1399 skb_put(skb, hlen);
1400 } else {
1401 /* assume packet split since header is unmapped */
1402 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
1240 } 1403 }
1241 1404
1242 if (upper_len) { 1405 if (upper_len) {
1243 dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma, 1406 dma_unmap_page(rx_ring->dev,
1244 PAGE_SIZE / 2, DMA_FROM_DEVICE); 1407 rx_buffer_info->page_dma,
1408 PAGE_SIZE / 2,
1409 DMA_FROM_DEVICE);
1245 rx_buffer_info->page_dma = 0; 1410 rx_buffer_info->page_dma = 0;
1246 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 1411 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1247 rx_buffer_info->page, 1412 rx_buffer_info->page,
1248 rx_buffer_info->page_offset, 1413 rx_buffer_info->page_offset,
1249 upper_len); 1414 upper_len);
1250 1415
1251 if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) || 1416 if ((page_count(rx_buffer_info->page) == 1) &&
1252 (page_count(rx_buffer_info->page) != 1)) 1417 (page_to_nid(rx_buffer_info->page) == current_node))
1253 rx_buffer_info->page = NULL;
1254 else
1255 get_page(rx_buffer_info->page); 1418 get_page(rx_buffer_info->page);
1419 else
1420 rx_buffer_info->page = NULL;
1256 1421
1257 skb->len += upper_len; 1422 skb->len += upper_len;
1258 skb->data_len += upper_len; 1423 skb->data_len += upper_len;
@@ -1263,14 +1428,11 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1263 if (i == rx_ring->count) 1428 if (i == rx_ring->count)
1264 i = 0; 1429 i = 0;
1265 1430
1266 next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i); 1431 next_rxd = IXGBE_RX_DESC_ADV(rx_ring, i);
1267 prefetch(next_rxd); 1432 prefetch(next_rxd);
1268 cleaned_count++; 1433 cleaned_count++;
1269 1434
1270 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) 1435 if (pkt_is_rsc) {
1271 rsc_count = ixgbe_get_rsc_count(rx_desc);
1272
1273 if (rsc_count) {
1274 u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >> 1436 u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
1275 IXGBE_RXDADV_NEXTP_SHIFT; 1437 IXGBE_RXDADV_NEXTP_SHIFT;
1276 next_buffer = &rx_ring->rx_buffer_info[nextp]; 1438 next_buffer = &rx_ring->rx_buffer_info[nextp];
@@ -1278,28 +1440,8 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1278 next_buffer = &rx_ring->rx_buffer_info[i]; 1440 next_buffer = &rx_ring->rx_buffer_info[i];
1279 } 1441 }
1280 1442
1281 if (staterr & IXGBE_RXD_STAT_EOP) { 1443 if (!(staterr & IXGBE_RXD_STAT_EOP)) {
1282 if (skb->prev) 1444 if (ring_is_ps_enabled(rx_ring)) {
1283 skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count));
1284 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
1285 if (IXGBE_RSC_CB(skb)->delay_unmap) {
1286 dma_unmap_single(&pdev->dev,
1287 IXGBE_RSC_CB(skb)->dma,
1288 rx_ring->rx_buf_len,
1289 DMA_FROM_DEVICE);
1290 IXGBE_RSC_CB(skb)->dma = 0;
1291 IXGBE_RSC_CB(skb)->delay_unmap = false;
1292 }
1293 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
1294 rx_ring->rsc_count += skb_shinfo(skb)->nr_frags;
1295 else
1296 rx_ring->rsc_count++;
1297 rx_ring->rsc_flush++;
1298 }
1299 rx_ring->stats.packets++;
1300 rx_ring->stats.bytes += skb->len;
1301 } else {
1302 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
1303 rx_buffer_info->skb = next_buffer->skb; 1445 rx_buffer_info->skb = next_buffer->skb;
1304 rx_buffer_info->dma = next_buffer->dma; 1446 rx_buffer_info->dma = next_buffer->dma;
1305 next_buffer->skb = skb; 1447 next_buffer->skb = skb;
@@ -1308,22 +1450,57 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1308 skb->next = next_buffer->skb; 1450 skb->next = next_buffer->skb;
1309 skb->next->prev = skb; 1451 skb->next->prev = skb;
1310 } 1452 }
1311 rx_ring->non_eop_descs++; 1453 rx_ring->rx_stats.non_eop_descs++;
1312 goto next_desc; 1454 goto next_desc;
1313 } 1455 }
1314 1456
1457 if (skb->prev) {
1458 skb = ixgbe_transform_rsc_queue(skb);
1459 /* if we got here without RSC the packet is invalid */
1460 if (!pkt_is_rsc) {
1461 __pskb_trim(skb, 0);
1462 rx_buffer_info->skb = skb;
1463 goto next_desc;
1464 }
1465 }
1466
1467 if (ring_is_rsc_enabled(rx_ring)) {
1468 if (IXGBE_RSC_CB(skb)->delay_unmap) {
1469 dma_unmap_single(rx_ring->dev,
1470 IXGBE_RSC_CB(skb)->dma,
1471 rx_ring->rx_buf_len,
1472 DMA_FROM_DEVICE);
1473 IXGBE_RSC_CB(skb)->dma = 0;
1474 IXGBE_RSC_CB(skb)->delay_unmap = false;
1475 }
1476 }
1477 if (pkt_is_rsc) {
1478 if (ring_is_ps_enabled(rx_ring))
1479 rx_ring->rx_stats.rsc_count +=
1480 skb_shinfo(skb)->nr_frags;
1481 else
1482 rx_ring->rx_stats.rsc_count +=
1483 IXGBE_RSC_CB(skb)->skb_cnt;
1484 rx_ring->rx_stats.rsc_flush++;
1485 }
1486
1487 /* ERR_MASK will only have valid bits if EOP set */
1315 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) { 1488 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
1316 dev_kfree_skb_irq(skb); 1489 /* trim packet back to size 0 and recycle it */
1490 __pskb_trim(skb, 0);
1491 rx_buffer_info->skb = skb;
1317 goto next_desc; 1492 goto next_desc;
1318 } 1493 }
1319 1494
1320 ixgbe_rx_checksum(adapter, rx_desc, skb); 1495 ixgbe_rx_checksum(adapter, rx_desc, skb);
1496 if (adapter->netdev->features & NETIF_F_RXHASH)
1497 ixgbe_rx_hash(rx_desc, skb);
1321 1498
1322 /* probably a little skewed due to removing CRC */ 1499 /* probably a little skewed due to removing CRC */
1323 total_rx_bytes += skb->len; 1500 total_rx_bytes += skb->len;
1324 total_rx_packets++; 1501 total_rx_packets++;
1325 1502
1326 skb->protocol = eth_type_trans(skb, adapter->netdev); 1503 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1327#ifdef IXGBE_FCOE 1504#ifdef IXGBE_FCOE
1328 /* if ddp, not passing to ULD unless for FCP_RSP or error */ 1505 /* if ddp, not passing to ULD unless for FCP_RSP or error */
1329 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 1506 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
@@ -1337,16 +1514,18 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1337next_desc: 1514next_desc:
1338 rx_desc->wb.upper.status_error = 0; 1515 rx_desc->wb.upper.status_error = 0;
1339 1516
1517 (*work_done)++;
1518 if (*work_done >= work_to_do)
1519 break;
1520
1340 /* return some buffers to hardware, one at a time is too slow */ 1521 /* return some buffers to hardware, one at a time is too slow */
1341 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { 1522 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
1342 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count); 1523 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
1343 cleaned_count = 0; 1524 cleaned_count = 0;
1344 } 1525 }
1345 1526
1346 /* use prefetched values */ 1527 /* use prefetched values */
1347 rx_desc = next_rxd; 1528 rx_desc = next_rxd;
1348 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1349
1350 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 1529 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1351 } 1530 }
1352 1531
@@ -1354,14 +1533,14 @@ next_desc:
1354 cleaned_count = IXGBE_DESC_UNUSED(rx_ring); 1533 cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
1355 1534
1356 if (cleaned_count) 1535 if (cleaned_count)
1357 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count); 1536 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
1358 1537
1359#ifdef IXGBE_FCOE 1538#ifdef IXGBE_FCOE
1360 /* include DDPed FCoE data */ 1539 /* include DDPed FCoE data */
1361 if (ddp_bytes > 0) { 1540 if (ddp_bytes > 0) {
1362 unsigned int mss; 1541 unsigned int mss;
1363 1542
1364 mss = adapter->netdev->mtu - sizeof(struct fcoe_hdr) - 1543 mss = rx_ring->netdev->mtu - sizeof(struct fcoe_hdr) -
1365 sizeof(struct fc_frame_header) - 1544 sizeof(struct fc_frame_header) -
1366 sizeof(struct fcoe_crc_eof); 1545 sizeof(struct fcoe_crc_eof);
1367 if (mss > 512) 1546 if (mss > 512)
@@ -1373,10 +1552,10 @@ next_desc:
1373 1552
1374 rx_ring->total_packets += total_rx_packets; 1553 rx_ring->total_packets += total_rx_packets;
1375 rx_ring->total_bytes += total_rx_bytes; 1554 rx_ring->total_bytes += total_rx_bytes;
1376 netdev->stats.rx_bytes += total_rx_bytes; 1555 u64_stats_update_begin(&rx_ring->syncp);
1377 netdev->stats.rx_packets += total_rx_packets; 1556 rx_ring->stats.packets += total_rx_packets;
1378 1557 rx_ring->stats.bytes += total_rx_bytes;
1379 return cleaned; 1558 u64_stats_update_end(&rx_ring->syncp);
1380} 1559}
1381 1560
1382static int ixgbe_clean_rxonly(struct napi_struct *, int); 1561static int ixgbe_clean_rxonly(struct napi_struct *, int);
@@ -1390,7 +1569,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *, int);
1390static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) 1569static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1391{ 1570{
1392 struct ixgbe_q_vector *q_vector; 1571 struct ixgbe_q_vector *q_vector;
1393 int i, j, q_vectors, v_idx, r_idx; 1572 int i, q_vectors, v_idx, r_idx;
1394 u32 mask; 1573 u32 mask;
1395 1574
1396 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1575 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
@@ -1403,24 +1582,24 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1403 q_vector = adapter->q_vector[v_idx]; 1582 q_vector = adapter->q_vector[v_idx];
1404 /* XXX for_each_set_bit(...) */ 1583 /* XXX for_each_set_bit(...) */
1405 r_idx = find_first_bit(q_vector->rxr_idx, 1584 r_idx = find_first_bit(q_vector->rxr_idx,
1406 adapter->num_rx_queues); 1585 adapter->num_rx_queues);
1407 1586
1408 for (i = 0; i < q_vector->rxr_count; i++) { 1587 for (i = 0; i < q_vector->rxr_count; i++) {
1409 j = adapter->rx_ring[r_idx]->reg_idx; 1588 u8 reg_idx = adapter->rx_ring[r_idx]->reg_idx;
1410 ixgbe_set_ivar(adapter, 0, j, v_idx); 1589 ixgbe_set_ivar(adapter, 0, reg_idx, v_idx);
1411 r_idx = find_next_bit(q_vector->rxr_idx, 1590 r_idx = find_next_bit(q_vector->rxr_idx,
1412 adapter->num_rx_queues, 1591 adapter->num_rx_queues,
1413 r_idx + 1); 1592 r_idx + 1);
1414 } 1593 }
1415 r_idx = find_first_bit(q_vector->txr_idx, 1594 r_idx = find_first_bit(q_vector->txr_idx,
1416 adapter->num_tx_queues); 1595 adapter->num_tx_queues);
1417 1596
1418 for (i = 0; i < q_vector->txr_count; i++) { 1597 for (i = 0; i < q_vector->txr_count; i++) {
1419 j = adapter->tx_ring[r_idx]->reg_idx; 1598 u8 reg_idx = adapter->tx_ring[r_idx]->reg_idx;
1420 ixgbe_set_ivar(adapter, 1, j, v_idx); 1599 ixgbe_set_ivar(adapter, 1, reg_idx, v_idx);
1421 r_idx = find_next_bit(q_vector->txr_idx, 1600 r_idx = find_next_bit(q_vector->txr_idx,
1422 adapter->num_tx_queues, 1601 adapter->num_tx_queues,
1423 r_idx + 1); 1602 r_idx + 1);
1424 } 1603 }
1425 1604
1426 if (q_vector->txr_count && !q_vector->rxr_count) 1605 if (q_vector->txr_count && !q_vector->rxr_count)
@@ -1431,13 +1610,36 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1431 q_vector->eitr = adapter->rx_eitr_param; 1610 q_vector->eitr = adapter->rx_eitr_param;
1432 1611
1433 ixgbe_write_eitr(q_vector); 1612 ixgbe_write_eitr(q_vector);
1613 /* If Flow Director is enabled, set interrupt affinity */
1614 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
1615 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
1616 /*
1617 * Allocate the affinity_hint cpumask, assign the mask
1618 * for this vector, and set our affinity_hint for
1619 * this irq.
1620 */
1621 if (!alloc_cpumask_var(&q_vector->affinity_mask,
1622 GFP_KERNEL))
1623 return;
1624 cpumask_set_cpu(v_idx, q_vector->affinity_mask);
1625 irq_set_affinity_hint(adapter->msix_entries[v_idx].vector,
1626 q_vector->affinity_mask);
1627 }
1434 } 1628 }
1435 1629
1436 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 1630 switch (adapter->hw.mac.type) {
1631 case ixgbe_mac_82598EB:
1437 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, 1632 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
1438 v_idx); 1633 v_idx);
1439 else if (adapter->hw.mac.type == ixgbe_mac_82599EB) 1634 break;
1635 case ixgbe_mac_82599EB:
1636 case ixgbe_mac_X540:
1440 ixgbe_set_ivar(adapter, -1, 1, v_idx); 1637 ixgbe_set_ivar(adapter, -1, 1, v_idx);
1638 break;
1639
1640 default:
1641 break;
1642 }
1441 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950); 1643 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
1442 1644
1443 /* set up to autoclear timer, and the vectors */ 1645 /* set up to autoclear timer, and the vectors */
@@ -1477,8 +1679,8 @@ enum latency_range {
1477 * parameter (see ixgbe_param.c) 1679 * parameter (see ixgbe_param.c)
1478 **/ 1680 **/
1479static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter, 1681static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
1480 u32 eitr, u8 itr_setting, 1682 u32 eitr, u8 itr_setting,
1481 int packets, int bytes) 1683 int packets, int bytes)
1482{ 1684{
1483 unsigned int retval = itr_setting; 1685 unsigned int retval = itr_setting;
1484 u32 timepassed_us; 1686 u32 timepassed_us;
@@ -1533,12 +1735,15 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
1533 int v_idx = q_vector->v_idx; 1735 int v_idx = q_vector->v_idx;
1534 u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr); 1736 u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr);
1535 1737
1536 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 1738 switch (adapter->hw.mac.type) {
1739 case ixgbe_mac_82598EB:
1537 /* must write high and low 16 bits to reset counter */ 1740 /* must write high and low 16 bits to reset counter */
1538 itr_reg |= (itr_reg << 16); 1741 itr_reg |= (itr_reg << 16);
1539 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 1742 break;
1743 case ixgbe_mac_82599EB:
1744 case ixgbe_mac_X540:
1540 /* 1745 /*
1541 * 82599 can support a value of zero, so allow it for 1746 * 82599 and X540 can support a value of zero, so allow it for
1542 * max interrupt rate, but there is an errata where it can 1747 * max interrupt rate, but there is an errata where it can
1543 * not be zero with RSC 1748 * not be zero with RSC
1544 */ 1749 */
@@ -1551,6 +1756,9 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
1551 * immediate assertion of the interrupt 1756 * immediate assertion of the interrupt
1552 */ 1757 */
1553 itr_reg |= IXGBE_EITR_CNT_WDIS; 1758 itr_reg |= IXGBE_EITR_CNT_WDIS;
1759 break;
1760 default:
1761 break;
1554 } 1762 }
1555 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg); 1763 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
1556} 1764}
@@ -1558,39 +1766,38 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
1558static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) 1766static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1559{ 1767{
1560 struct ixgbe_adapter *adapter = q_vector->adapter; 1768 struct ixgbe_adapter *adapter = q_vector->adapter;
1769 int i, r_idx;
1561 u32 new_itr; 1770 u32 new_itr;
1562 u8 current_itr, ret_itr; 1771 u8 current_itr, ret_itr;
1563 int i, r_idx;
1564 struct ixgbe_ring *rx_ring, *tx_ring;
1565 1772
1566 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 1773 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1567 for (i = 0; i < q_vector->txr_count; i++) { 1774 for (i = 0; i < q_vector->txr_count; i++) {
1568 tx_ring = adapter->tx_ring[r_idx]; 1775 struct ixgbe_ring *tx_ring = adapter->tx_ring[r_idx];
1569 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, 1776 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
1570 q_vector->tx_itr, 1777 q_vector->tx_itr,
1571 tx_ring->total_packets, 1778 tx_ring->total_packets,
1572 tx_ring->total_bytes); 1779 tx_ring->total_bytes);
1573 /* if the result for this queue would decrease interrupt 1780 /* if the result for this queue would decrease interrupt
1574 * rate for this vector then use that result */ 1781 * rate for this vector then use that result */
1575 q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ? 1782 q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
1576 q_vector->tx_itr - 1 : ret_itr); 1783 q_vector->tx_itr - 1 : ret_itr);
1577 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 1784 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1578 r_idx + 1); 1785 r_idx + 1);
1579 } 1786 }
1580 1787
1581 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1788 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1582 for (i = 0; i < q_vector->rxr_count; i++) { 1789 for (i = 0; i < q_vector->rxr_count; i++) {
1583 rx_ring = adapter->rx_ring[r_idx]; 1790 struct ixgbe_ring *rx_ring = adapter->rx_ring[r_idx];
1584 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, 1791 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
1585 q_vector->rx_itr, 1792 q_vector->rx_itr,
1586 rx_ring->total_packets, 1793 rx_ring->total_packets,
1587 rx_ring->total_bytes); 1794 rx_ring->total_bytes);
1588 /* if the result for this queue would decrease interrupt 1795 /* if the result for this queue would decrease interrupt
1589 * rate for this vector then use that result */ 1796 * rate for this vector then use that result */
1590 q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ? 1797 q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
1591 q_vector->rx_itr - 1 : ret_itr); 1798 q_vector->rx_itr - 1 : ret_itr);
1592 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 1799 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1593 r_idx + 1); 1800 r_idx + 1);
1594 } 1801 }
1595 1802
1596 current_itr = max(q_vector->rx_itr, q_vector->tx_itr); 1803 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
@@ -1611,7 +1818,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1611 1818
1612 if (new_itr != q_vector->eitr) { 1819 if (new_itr != q_vector->eitr) {
1613 /* do an exponential smoothing */ 1820 /* do an exponential smoothing */
1614 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); 1821 new_itr = ((q_vector->eitr * 9) + new_itr)/10;
1615 1822
1616 /* save the algorithm value here, not the smoothed one */ 1823 /* save the algorithm value here, not the smoothed one */
1617 q_vector->eitr = new_itr; 1824 q_vector->eitr = new_itr;
@@ -1621,45 +1828,62 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1621} 1828}
1622 1829
1623/** 1830/**
1624 * ixgbe_check_overtemp_task - worker thread to check over tempurature 1831 * ixgbe_check_overtemp_subtask - check for over tempurature
1625 * @work: pointer to work_struct containing our data 1832 * @adapter: pointer to adapter
1626 **/ 1833 **/
1627static void ixgbe_check_overtemp_task(struct work_struct *work) 1834static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
1628{ 1835{
1629 struct ixgbe_adapter *adapter = container_of(work,
1630 struct ixgbe_adapter,
1631 check_overtemp_task);
1632 struct ixgbe_hw *hw = &adapter->hw; 1836 struct ixgbe_hw *hw = &adapter->hw;
1633 u32 eicr = adapter->interrupt_event; 1837 u32 eicr = adapter->interrupt_event;
1634 1838
1635 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) { 1839 if (test_bit(__IXGBE_DOWN, &adapter->state))
1636 switch (hw->device_id) { 1840 return;
1637 case IXGBE_DEV_ID_82599_T3_LOM: { 1841
1842 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
1843 !(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT))
1844 return;
1845
1846 adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT;
1847
1848 switch (hw->device_id) {
1849 case IXGBE_DEV_ID_82599_T3_LOM:
1850 /*
1851 * Since the warning interrupt is for both ports
1852 * we don't have to check if:
1853 * - This interrupt wasn't for our port.
1854 * - We may have missed the interrupt so always have to
1855 * check if we got a LSC
1856 */
1857 if (!(eicr & IXGBE_EICR_GPI_SDP0) &&
1858 !(eicr & IXGBE_EICR_LSC))
1859 return;
1860
1861 if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) {
1638 u32 autoneg; 1862 u32 autoneg;
1639 bool link_up = false; 1863 bool link_up = false;
1640 1864
1641 if (hw->mac.ops.check_link) 1865 hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
1642 hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
1643 1866
1644 if (((eicr & IXGBE_EICR_GPI_SDP0) && (!link_up)) || 1867 if (link_up)
1645 (eicr & IXGBE_EICR_LSC))
1646 /* Check if this is due to overtemp */
1647 if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP)
1648 break;
1649 }
1650 return;
1651 default:
1652 if (!(eicr & IXGBE_EICR_GPI_SDP0))
1653 return; 1868 return;
1654 break;
1655 } 1869 }
1656 e_crit(drv, "Network adapter has been stopped because it has " 1870
1657 "over heated. Restart the computer. If the problem " 1871 /* Check if this is not due to overtemp */
1658 "persists, power off the system and replace the " 1872 if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP)
1659 "adapter\n"); 1873 return;
1660 /* write to clear the interrupt */ 1874
1661 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0); 1875 break;
1876 default:
1877 if (!(eicr & IXGBE_EICR_GPI_SDP0))
1878 return;
1879 break;
1662 } 1880 }
1881 e_crit(drv,
1882 "Network adapter has been stopped because it has over heated. "
1883 "Restart the computer. If the problem persists, "
1884 "power off the system and replace the adapter\n");
1885
1886 adapter->interrupt_event = 0;
1663} 1887}
1664 1888
1665static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr) 1889static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
@@ -1678,17 +1902,22 @@ static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
1678{ 1902{
1679 struct ixgbe_hw *hw = &adapter->hw; 1903 struct ixgbe_hw *hw = &adapter->hw;
1680 1904
1905 if (eicr & IXGBE_EICR_GPI_SDP2) {
1906 /* Clear the interrupt */
1907 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
1908 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1909 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
1910 ixgbe_service_event_schedule(adapter);
1911 }
1912 }
1913
1681 if (eicr & IXGBE_EICR_GPI_SDP1) { 1914 if (eicr & IXGBE_EICR_GPI_SDP1) {
1682 /* Clear the interrupt */ 1915 /* Clear the interrupt */
1683 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); 1916 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1684 schedule_work(&adapter->multispeed_fiber_task); 1917 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1685 } else if (eicr & IXGBE_EICR_GPI_SDP2) { 1918 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
1686 /* Clear the interrupt */ 1919 ixgbe_service_event_schedule(adapter);
1687 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2); 1920 }
1688 schedule_work(&adapter->sfp_config_module_task);
1689 } else {
1690 /* Interrupt isn't for us... */
1691 return;
1692 } 1921 }
1693} 1922}
1694 1923
@@ -1702,7 +1931,7 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
1702 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { 1931 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1703 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); 1932 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
1704 IXGBE_WRITE_FLUSH(hw); 1933 IXGBE_WRITE_FLUSH(hw);
1705 schedule_work(&adapter->watchdog_task); 1934 ixgbe_service_event_schedule(adapter);
1706 } 1935 }
1707} 1936}
1708 1937
@@ -1728,33 +1957,47 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
1728 if (eicr & IXGBE_EICR_MAILBOX) 1957 if (eicr & IXGBE_EICR_MAILBOX)
1729 ixgbe_msg_task(adapter); 1958 ixgbe_msg_task(adapter);
1730 1959
1731 if (hw->mac.type == ixgbe_mac_82598EB) 1960 switch (hw->mac.type) {
1732 ixgbe_check_fan_failure(adapter, eicr); 1961 case ixgbe_mac_82599EB:
1733 1962 case ixgbe_mac_X540:
1734 if (hw->mac.type == ixgbe_mac_82599EB) {
1735 ixgbe_check_sfp_event(adapter, eicr);
1736 adapter->interrupt_event = eicr;
1737 if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
1738 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
1739 schedule_work(&adapter->check_overtemp_task);
1740
1741 /* Handle Flow Director Full threshold interrupt */ 1963 /* Handle Flow Director Full threshold interrupt */
1742 if (eicr & IXGBE_EICR_FLOW_DIR) { 1964 if (eicr & IXGBE_EICR_FLOW_DIR) {
1965 int reinit_count = 0;
1743 int i; 1966 int i;
1744 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR);
1745 /* Disable transmits before FDIR Re-initialization */
1746 netif_tx_stop_all_queues(netdev);
1747 for (i = 0; i < adapter->num_tx_queues; i++) { 1967 for (i = 0; i < adapter->num_tx_queues; i++) {
1748 struct ixgbe_ring *tx_ring = 1968 struct ixgbe_ring *ring = adapter->tx_ring[i];
1749 adapter->tx_ring[i]; 1969 if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
1750 if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE, 1970 &ring->state))
1751 &tx_ring->reinit_state)) 1971 reinit_count++;
1752 schedule_work(&adapter->fdir_reinit_task); 1972 }
1973 if (reinit_count) {
1974 /* no more flow director interrupts until after init */
1975 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
1976 eicr &= ~IXGBE_EICR_FLOW_DIR;
1977 adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
1978 ixgbe_service_event_schedule(adapter);
1753 } 1979 }
1754 } 1980 }
1981 ixgbe_check_sfp_event(adapter, eicr);
1982 if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
1983 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
1984 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1985 adapter->interrupt_event = eicr;
1986 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
1987 ixgbe_service_event_schedule(adapter);
1988 }
1989 }
1990 break;
1991 default:
1992 break;
1755 } 1993 }
1994
1995 ixgbe_check_fan_failure(adapter, eicr);
1996
1997 /* re-enable the original interrupt state, no lsc, no queues */
1756 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1998 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1757 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); 1999 IXGBE_WRITE_REG(hw, IXGBE_EIMS, eicr &
2000 ~(IXGBE_EIMS_LSC | IXGBE_EIMS_RTX_QUEUE));
1758 2001
1759 return IRQ_HANDLED; 2002 return IRQ_HANDLED;
1760} 2003}
@@ -1763,32 +2006,50 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
1763 u64 qmask) 2006 u64 qmask)
1764{ 2007{
1765 u32 mask; 2008 u32 mask;
2009 struct ixgbe_hw *hw = &adapter->hw;
1766 2010
1767 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 2011 switch (hw->mac.type) {
2012 case ixgbe_mac_82598EB:
1768 mask = (IXGBE_EIMS_RTX_QUEUE & qmask); 2013 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
1769 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); 2014 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1770 } else { 2015 break;
2016 case ixgbe_mac_82599EB:
2017 case ixgbe_mac_X540:
1771 mask = (qmask & 0xFFFFFFFF); 2018 mask = (qmask & 0xFFFFFFFF);
1772 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask); 2019 if (mask)
2020 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1773 mask = (qmask >> 32); 2021 mask = (qmask >> 32);
1774 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask); 2022 if (mask)
2023 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2024 break;
2025 default:
2026 break;
1775 } 2027 }
1776 /* skip the flush */ 2028 /* skip the flush */
1777} 2029}
1778 2030
1779static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, 2031static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
1780 u64 qmask) 2032 u64 qmask)
1781{ 2033{
1782 u32 mask; 2034 u32 mask;
2035 struct ixgbe_hw *hw = &adapter->hw;
1783 2036
1784 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 2037 switch (hw->mac.type) {
2038 case ixgbe_mac_82598EB:
1785 mask = (IXGBE_EIMS_RTX_QUEUE & qmask); 2039 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
1786 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask); 2040 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1787 } else { 2041 break;
2042 case ixgbe_mac_82599EB:
2043 case ixgbe_mac_X540:
1788 mask = (qmask & 0xFFFFFFFF); 2044 mask = (qmask & 0xFFFFFFFF);
1789 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), mask); 2045 if (mask)
2046 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1790 mask = (qmask >> 32); 2047 mask = (qmask >> 32);
1791 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), mask); 2048 if (mask)
2049 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2050 break;
2051 default:
2052 break;
1792 } 2053 }
1793 /* skip the flush */ 2054 /* skip the flush */
1794} 2055}
@@ -1809,7 +2070,7 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
1809 tx_ring->total_bytes = 0; 2070 tx_ring->total_bytes = 0;
1810 tx_ring->total_packets = 0; 2071 tx_ring->total_packets = 0;
1811 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 2072 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1812 r_idx + 1); 2073 r_idx + 1);
1813 } 2074 }
1814 2075
1815 /* EIAM disabled interrupts (on this vector) for us */ 2076 /* EIAM disabled interrupts (on this vector) for us */
@@ -1831,19 +2092,23 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
1831 int r_idx; 2092 int r_idx;
1832 int i; 2093 int i;
1833 2094
2095#ifdef CONFIG_IXGBE_DCA
2096 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
2097 ixgbe_update_dca(q_vector);
2098#endif
2099
1834 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 2100 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1835 for (i = 0; i < q_vector->rxr_count; i++) { 2101 for (i = 0; i < q_vector->rxr_count; i++) {
1836 rx_ring = adapter->rx_ring[r_idx]; 2102 rx_ring = adapter->rx_ring[r_idx];
1837 rx_ring->total_bytes = 0; 2103 rx_ring->total_bytes = 0;
1838 rx_ring->total_packets = 0; 2104 rx_ring->total_packets = 0;
1839 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 2105 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1840 r_idx + 1); 2106 r_idx + 1);
1841 } 2107 }
1842 2108
1843 if (!q_vector->rxr_count) 2109 if (!q_vector->rxr_count)
1844 return IRQ_HANDLED; 2110 return IRQ_HANDLED;
1845 2111
1846 /* disable interrupts on this vector only */
1847 /* EIAM disabled interrupts (on this vector) for us */ 2112 /* EIAM disabled interrupts (on this vector) for us */
1848 napi_schedule(&q_vector->napi); 2113 napi_schedule(&q_vector->napi);
1849 2114
@@ -1867,7 +2132,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
1867 ring->total_bytes = 0; 2132 ring->total_bytes = 0;
1868 ring->total_packets = 0; 2133 ring->total_packets = 0;
1869 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 2134 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1870 r_idx + 1); 2135 r_idx + 1);
1871 } 2136 }
1872 2137
1873 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 2138 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
@@ -1876,7 +2141,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
1876 ring->total_bytes = 0; 2141 ring->total_bytes = 0;
1877 ring->total_packets = 0; 2142 ring->total_packets = 0;
1878 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 2143 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1879 r_idx + 1); 2144 r_idx + 1);
1880 } 2145 }
1881 2146
1882 /* EIAM disabled interrupts (on this vector) for us */ 2147 /* EIAM disabled interrupts (on this vector) for us */
@@ -1896,19 +2161,20 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
1896static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) 2161static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1897{ 2162{
1898 struct ixgbe_q_vector *q_vector = 2163 struct ixgbe_q_vector *q_vector =
1899 container_of(napi, struct ixgbe_q_vector, napi); 2164 container_of(napi, struct ixgbe_q_vector, napi);
1900 struct ixgbe_adapter *adapter = q_vector->adapter; 2165 struct ixgbe_adapter *adapter = q_vector->adapter;
1901 struct ixgbe_ring *rx_ring = NULL; 2166 struct ixgbe_ring *rx_ring = NULL;
1902 int work_done = 0; 2167 int work_done = 0;
1903 long r_idx; 2168 long r_idx;
1904 2169
1905 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1906 rx_ring = adapter->rx_ring[r_idx];
1907#ifdef CONFIG_IXGBE_DCA 2170#ifdef CONFIG_IXGBE_DCA
1908 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 2171 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1909 ixgbe_update_rx_dca(adapter, rx_ring); 2172 ixgbe_update_dca(q_vector);
1910#endif 2173#endif
1911 2174
2175 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
2176 rx_ring = adapter->rx_ring[r_idx];
2177
1912 ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget); 2178 ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
1913 2179
1914 /* If all Rx work done, exit the polling mode */ 2180 /* If all Rx work done, exit the polling mode */
@@ -1918,7 +2184,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1918 ixgbe_set_itr_msix(q_vector); 2184 ixgbe_set_itr_msix(q_vector);
1919 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2185 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1920 ixgbe_irq_enable_queues(adapter, 2186 ixgbe_irq_enable_queues(adapter,
1921 ((u64)1 << q_vector->v_idx)); 2187 ((u64)1 << q_vector->v_idx));
1922 } 2188 }
1923 2189
1924 return work_done; 2190 return work_done;
@@ -1935,23 +2201,24 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1935static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget) 2201static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
1936{ 2202{
1937 struct ixgbe_q_vector *q_vector = 2203 struct ixgbe_q_vector *q_vector =
1938 container_of(napi, struct ixgbe_q_vector, napi); 2204 container_of(napi, struct ixgbe_q_vector, napi);
1939 struct ixgbe_adapter *adapter = q_vector->adapter; 2205 struct ixgbe_adapter *adapter = q_vector->adapter;
1940 struct ixgbe_ring *ring = NULL; 2206 struct ixgbe_ring *ring = NULL;
1941 int work_done = 0, i; 2207 int work_done = 0, i;
1942 long r_idx; 2208 long r_idx;
1943 bool tx_clean_complete = true; 2209 bool tx_clean_complete = true;
1944 2210
2211#ifdef CONFIG_IXGBE_DCA
2212 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
2213 ixgbe_update_dca(q_vector);
2214#endif
2215
1945 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 2216 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1946 for (i = 0; i < q_vector->txr_count; i++) { 2217 for (i = 0; i < q_vector->txr_count; i++) {
1947 ring = adapter->tx_ring[r_idx]; 2218 ring = adapter->tx_ring[r_idx];
1948#ifdef CONFIG_IXGBE_DCA
1949 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1950 ixgbe_update_tx_dca(adapter, ring);
1951#endif
1952 tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring); 2219 tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
1953 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 2220 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1954 r_idx + 1); 2221 r_idx + 1);
1955 } 2222 }
1956 2223
1957 /* attempt to distribute budget to each queue fairly, but don't allow 2224 /* attempt to distribute budget to each queue fairly, but don't allow
@@ -1961,13 +2228,9 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
1961 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 2228 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1962 for (i = 0; i < q_vector->rxr_count; i++) { 2229 for (i = 0; i < q_vector->rxr_count; i++) {
1963 ring = adapter->rx_ring[r_idx]; 2230 ring = adapter->rx_ring[r_idx];
1964#ifdef CONFIG_IXGBE_DCA
1965 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1966 ixgbe_update_rx_dca(adapter, ring);
1967#endif
1968 ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget); 2231 ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
1969 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 2232 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1970 r_idx + 1); 2233 r_idx + 1);
1971 } 2234 }
1972 2235
1973 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 2236 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
@@ -1979,7 +2242,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
1979 ixgbe_set_itr_msix(q_vector); 2242 ixgbe_set_itr_msix(q_vector);
1980 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2243 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1981 ixgbe_irq_enable_queues(adapter, 2244 ixgbe_irq_enable_queues(adapter,
1982 ((u64)1 << q_vector->v_idx)); 2245 ((u64)1 << q_vector->v_idx));
1983 return 0; 2246 return 0;
1984 } 2247 }
1985 2248
@@ -1997,19 +2260,20 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
1997static int ixgbe_clean_txonly(struct napi_struct *napi, int budget) 2260static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
1998{ 2261{
1999 struct ixgbe_q_vector *q_vector = 2262 struct ixgbe_q_vector *q_vector =
2000 container_of(napi, struct ixgbe_q_vector, napi); 2263 container_of(napi, struct ixgbe_q_vector, napi);
2001 struct ixgbe_adapter *adapter = q_vector->adapter; 2264 struct ixgbe_adapter *adapter = q_vector->adapter;
2002 struct ixgbe_ring *tx_ring = NULL; 2265 struct ixgbe_ring *tx_ring = NULL;
2003 int work_done = 0; 2266 int work_done = 0;
2004 long r_idx; 2267 long r_idx;
2005 2268
2006 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
2007 tx_ring = adapter->tx_ring[r_idx];
2008#ifdef CONFIG_IXGBE_DCA 2269#ifdef CONFIG_IXGBE_DCA
2009 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 2270 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
2010 ixgbe_update_tx_dca(adapter, tx_ring); 2271 ixgbe_update_dca(q_vector);
2011#endif 2272#endif
2012 2273
2274 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
2275 tx_ring = adapter->tx_ring[r_idx];
2276
2013 if (!ixgbe_clean_tx_irq(q_vector, tx_ring)) 2277 if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
2014 work_done = budget; 2278 work_done = budget;
2015 2279
@@ -2019,34 +2283,38 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
2019 if (adapter->tx_itr_setting & 1) 2283 if (adapter->tx_itr_setting & 1)
2020 ixgbe_set_itr_msix(q_vector); 2284 ixgbe_set_itr_msix(q_vector);
2021 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2285 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2022 ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx)); 2286 ixgbe_irq_enable_queues(adapter,
2287 ((u64)1 << q_vector->v_idx));
2023 } 2288 }
2024 2289
2025 return work_done; 2290 return work_done;
2026} 2291}
2027 2292
2028static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, 2293static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
2029 int r_idx) 2294 int r_idx)
2030{ 2295{
2031 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; 2296 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
2297 struct ixgbe_ring *rx_ring = a->rx_ring[r_idx];
2032 2298
2033 set_bit(r_idx, q_vector->rxr_idx); 2299 set_bit(r_idx, q_vector->rxr_idx);
2034 q_vector->rxr_count++; 2300 q_vector->rxr_count++;
2301 rx_ring->q_vector = q_vector;
2035} 2302}
2036 2303
2037static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, 2304static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
2038 int t_idx) 2305 int t_idx)
2039{ 2306{
2040 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; 2307 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
2308 struct ixgbe_ring *tx_ring = a->tx_ring[t_idx];
2041 2309
2042 set_bit(t_idx, q_vector->txr_idx); 2310 set_bit(t_idx, q_vector->txr_idx);
2043 q_vector->txr_count++; 2311 q_vector->txr_count++;
2312 tx_ring->q_vector = q_vector;
2044} 2313}
2045 2314
2046/** 2315/**
2047 * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors 2316 * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
2048 * @adapter: board private structure to initialize 2317 * @adapter: board private structure to initialize
2049 * @vectors: allotted vector count for descriptor rings
2050 * 2318 *
2051 * This function maps descriptor rings to the queue-specific vectors 2319 * This function maps descriptor rings to the queue-specific vectors
2052 * we were allotted through the MSI-X enabling code. Ideally, we'd have 2320 * we were allotted through the MSI-X enabling code. Ideally, we'd have
@@ -2054,9 +2322,9 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
2054 * group the rings as "efficiently" as possible. You would add new 2322 * group the rings as "efficiently" as possible. You would add new
2055 * mapping configurations in here. 2323 * mapping configurations in here.
2056 **/ 2324 **/
2057static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter, 2325static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter)
2058 int vectors)
2059{ 2326{
2327 int q_vectors;
2060 int v_start = 0; 2328 int v_start = 0;
2061 int rxr_idx = 0, txr_idx = 0; 2329 int rxr_idx = 0, txr_idx = 0;
2062 int rxr_remaining = adapter->num_rx_queues; 2330 int rxr_remaining = adapter->num_rx_queues;
@@ -2069,11 +2337,13 @@ static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
2069 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) 2337 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
2070 goto out; 2338 goto out;
2071 2339
2340 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2341
2072 /* 2342 /*
2073 * The ideal configuration... 2343 * The ideal configuration...
2074 * We have enough vectors to map one per queue. 2344 * We have enough vectors to map one per queue.
2075 */ 2345 */
2076 if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) { 2346 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
2077 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) 2347 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
2078 map_vector_to_rxq(adapter, v_start, rxr_idx); 2348 map_vector_to_rxq(adapter, v_start, rxr_idx);
2079 2349
@@ -2089,23 +2359,20 @@ static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
2089 * multiple queues per vector. 2359 * multiple queues per vector.
2090 */ 2360 */
2091 /* Re-adjusting *qpv takes care of the remainder. */ 2361 /* Re-adjusting *qpv takes care of the remainder. */
2092 for (i = v_start; i < vectors; i++) { 2362 for (i = v_start; i < q_vectors; i++) {
2093 rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i); 2363 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
2094 for (j = 0; j < rqpv; j++) { 2364 for (j = 0; j < rqpv; j++) {
2095 map_vector_to_rxq(adapter, i, rxr_idx); 2365 map_vector_to_rxq(adapter, i, rxr_idx);
2096 rxr_idx++; 2366 rxr_idx++;
2097 rxr_remaining--; 2367 rxr_remaining--;
2098 } 2368 }
2099 } 2369 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
2100 for (i = v_start; i < vectors; i++) {
2101 tqpv = DIV_ROUND_UP(txr_remaining, vectors - i);
2102 for (j = 0; j < tqpv; j++) { 2370 for (j = 0; j < tqpv; j++) {
2103 map_vector_to_txq(adapter, i, txr_idx); 2371 map_vector_to_txq(adapter, i, txr_idx);
2104 txr_idx++; 2372 txr_idx++;
2105 txr_remaining--; 2373 txr_remaining--;
2106 } 2374 }
2107 } 2375 }
2108
2109out: 2376out:
2110 return err; 2377 return err;
2111} 2378}
@@ -2122,37 +2389,41 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2122 struct net_device *netdev = adapter->netdev; 2389 struct net_device *netdev = adapter->netdev;
2123 irqreturn_t (*handler)(int, void *); 2390 irqreturn_t (*handler)(int, void *);
2124 int i, vector, q_vectors, err; 2391 int i, vector, q_vectors, err;
2125 int ri=0, ti=0; 2392 int ri = 0, ti = 0;
2126 2393
2127 /* Decrement for Other and TCP Timer vectors */ 2394 /* Decrement for Other and TCP Timer vectors */
2128 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 2395 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2129 2396
2130 /* Map the Tx/Rx rings to the vectors we were allotted. */ 2397 err = ixgbe_map_rings_to_vectors(adapter);
2131 err = ixgbe_map_rings_to_vectors(adapter, q_vectors);
2132 if (err) 2398 if (err)
2133 goto out; 2399 return err;
2134 2400
2135#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \ 2401#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \
2136 (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \ 2402 ? &ixgbe_msix_clean_many : \
2137 &ixgbe_msix_clean_many) 2403 (_v)->rxr_count ? &ixgbe_msix_clean_rx : \
2404 (_v)->txr_count ? &ixgbe_msix_clean_tx : \
2405 NULL)
2138 for (vector = 0; vector < q_vectors; vector++) { 2406 for (vector = 0; vector < q_vectors; vector++) {
2139 handler = SET_HANDLER(adapter->q_vector[vector]); 2407 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
2140 2408 handler = SET_HANDLER(q_vector);
2141 if(handler == &ixgbe_msix_clean_rx) { 2409
2142 sprintf(adapter->name[vector], "%s-%s-%d", 2410 if (handler == &ixgbe_msix_clean_rx) {
2143 netdev->name, "rx", ri++); 2411 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2144 } 2412 "%s-%s-%d", netdev->name, "rx", ri++);
2145 else if(handler == &ixgbe_msix_clean_tx) { 2413 } else if (handler == &ixgbe_msix_clean_tx) {
2146 sprintf(adapter->name[vector], "%s-%s-%d", 2414 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2147 netdev->name, "tx", ti++); 2415 "%s-%s-%d", netdev->name, "tx", ti++);
2416 } else if (handler == &ixgbe_msix_clean_many) {
2417 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2418 "%s-%s-%d", netdev->name, "TxRx", ri++);
2419 ti++;
2420 } else {
2421 /* skip this unused q_vector */
2422 continue;
2148 } 2423 }
2149 else
2150 sprintf(adapter->name[vector], "%s-%s-%d",
2151 netdev->name, "TxRx", vector);
2152
2153 err = request_irq(adapter->msix_entries[vector].vector, 2424 err = request_irq(adapter->msix_entries[vector].vector,
2154 handler, 0, adapter->name[vector], 2425 handler, 0, q_vector->name,
2155 adapter->q_vector[vector]); 2426 q_vector);
2156 if (err) { 2427 if (err) {
2157 e_err(probe, "request_irq failed for MSIX interrupt " 2428 e_err(probe, "request_irq failed for MSIX interrupt "
2158 "Error: %d\n", err); 2429 "Error: %d\n", err);
@@ -2160,9 +2431,9 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2160 } 2431 }
2161 } 2432 }
2162 2433
2163 sprintf(adapter->name[vector], "%s:lsc", netdev->name); 2434 sprintf(adapter->lsc_int_name, "%s:lsc", netdev->name);
2164 err = request_irq(adapter->msix_entries[vector].vector, 2435 err = request_irq(adapter->msix_entries[vector].vector,
2165 ixgbe_msix_lsc, 0, adapter->name[vector], netdev); 2436 ixgbe_msix_lsc, 0, adapter->lsc_int_name, netdev);
2166 if (err) { 2437 if (err) {
2167 e_err(probe, "request_irq for msix_lsc failed: %d\n", err); 2438 e_err(probe, "request_irq for msix_lsc failed: %d\n", err);
2168 goto free_queue_irqs; 2439 goto free_queue_irqs;
@@ -2173,31 +2444,30 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2173free_queue_irqs: 2444free_queue_irqs:
2174 for (i = vector - 1; i >= 0; i--) 2445 for (i = vector - 1; i >= 0; i--)
2175 free_irq(adapter->msix_entries[--vector].vector, 2446 free_irq(adapter->msix_entries[--vector].vector,
2176 adapter->q_vector[i]); 2447 adapter->q_vector[i]);
2177 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 2448 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2178 pci_disable_msix(adapter->pdev); 2449 pci_disable_msix(adapter->pdev);
2179 kfree(adapter->msix_entries); 2450 kfree(adapter->msix_entries);
2180 adapter->msix_entries = NULL; 2451 adapter->msix_entries = NULL;
2181out:
2182 return err; 2452 return err;
2183} 2453}
2184 2454
2185static void ixgbe_set_itr(struct ixgbe_adapter *adapter) 2455static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
2186{ 2456{
2187 struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; 2457 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
2188 u8 current_itr;
2189 u32 new_itr = q_vector->eitr;
2190 struct ixgbe_ring *rx_ring = adapter->rx_ring[0]; 2458 struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
2191 struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; 2459 struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
2460 u32 new_itr = q_vector->eitr;
2461 u8 current_itr;
2192 2462
2193 q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr, 2463 q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
2194 q_vector->tx_itr, 2464 q_vector->tx_itr,
2195 tx_ring->total_packets, 2465 tx_ring->total_packets,
2196 tx_ring->total_bytes); 2466 tx_ring->total_bytes);
2197 q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr, 2467 q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr,
2198 q_vector->rx_itr, 2468 q_vector->rx_itr,
2199 rx_ring->total_packets, 2469 rx_ring->total_packets,
2200 rx_ring->total_bytes); 2470 rx_ring->total_bytes);
2201 2471
2202 current_itr = max(q_vector->rx_itr, q_vector->tx_itr); 2472 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
2203 2473
@@ -2218,9 +2488,9 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
2218 2488
2219 if (new_itr != q_vector->eitr) { 2489 if (new_itr != q_vector->eitr) {
2220 /* do an exponential smoothing */ 2490 /* do an exponential smoothing */
2221 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); 2491 new_itr = ((q_vector->eitr * 9) + new_itr)/10;
2222 2492
2223 /* save the algorithm value here, not the smoothed one */ 2493 /* save the algorithm value here */
2224 q_vector->eitr = new_itr; 2494 q_vector->eitr = new_itr;
2225 2495
2226 ixgbe_write_eitr(q_vector); 2496 ixgbe_write_eitr(q_vector);
@@ -2231,7 +2501,8 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
2231 * ixgbe_irq_enable - Enable default interrupt generation settings 2501 * ixgbe_irq_enable - Enable default interrupt generation settings
2232 * @adapter: board private structure 2502 * @adapter: board private structure
2233 **/ 2503 **/
2234static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter) 2504static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
2505 bool flush)
2235{ 2506{
2236 u32 mask; 2507 u32 mask;
2237 2508
@@ -2240,20 +2511,27 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
2240 mask |= IXGBE_EIMS_GPI_SDP0; 2511 mask |= IXGBE_EIMS_GPI_SDP0;
2241 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) 2512 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
2242 mask |= IXGBE_EIMS_GPI_SDP1; 2513 mask |= IXGBE_EIMS_GPI_SDP1;
2243 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 2514 switch (adapter->hw.mac.type) {
2515 case ixgbe_mac_82599EB:
2516 case ixgbe_mac_X540:
2244 mask |= IXGBE_EIMS_ECC; 2517 mask |= IXGBE_EIMS_ECC;
2245 mask |= IXGBE_EIMS_GPI_SDP1; 2518 mask |= IXGBE_EIMS_GPI_SDP1;
2246 mask |= IXGBE_EIMS_GPI_SDP2; 2519 mask |= IXGBE_EIMS_GPI_SDP2;
2247 if (adapter->num_vfs) 2520 if (adapter->num_vfs)
2248 mask |= IXGBE_EIMS_MAILBOX; 2521 mask |= IXGBE_EIMS_MAILBOX;
2522 break;
2523 default:
2524 break;
2249 } 2525 }
2250 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || 2526 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
2251 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) 2527 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
2252 mask |= IXGBE_EIMS_FLOW_DIR; 2528 mask |= IXGBE_EIMS_FLOW_DIR;
2253 2529
2254 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); 2530 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
2255 ixgbe_irq_enable_queues(adapter, ~0); 2531 if (queues)
2256 IXGBE_WRITE_FLUSH(&adapter->hw); 2532 ixgbe_irq_enable_queues(adapter, ~0);
2533 if (flush)
2534 IXGBE_WRITE_FLUSH(&adapter->hw);
2257 2535
2258 if (adapter->num_vfs > 32) { 2536 if (adapter->num_vfs > 32) {
2259 u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1; 2537 u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
@@ -2275,7 +2553,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
2275 u32 eicr; 2553 u32 eicr;
2276 2554
2277 /* 2555 /*
2278 * Workaround for silicon errata. Mask the interrupts 2556 * Workaround for silicon errata on 82598. Mask the interrupts
2279 * before the read of EICR. 2557 * before the read of EICR.
2280 */ 2558 */
2281 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); 2559 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
@@ -2284,23 +2562,38 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
2284 * therefore no explict interrupt disable is necessary */ 2562 * therefore no explict interrupt disable is necessary */
2285 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 2563 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
2286 if (!eicr) { 2564 if (!eicr) {
2287 /* shared interrupt alert! 2565 /*
2566 * shared interrupt alert!
2288 * make sure interrupts are enabled because the read will 2567 * make sure interrupts are enabled because the read will
2289 * have disabled interrupts due to EIAM */ 2568 * have disabled interrupts due to EIAM
2290 ixgbe_irq_enable(adapter); 2569 * finish the workaround of silicon errata on 82598. Unmask
2570 * the interrupt that we masked before the EICR read.
2571 */
2572 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2573 ixgbe_irq_enable(adapter, true, true);
2291 return IRQ_NONE; /* Not our interrupt */ 2574 return IRQ_NONE; /* Not our interrupt */
2292 } 2575 }
2293 2576
2294 if (eicr & IXGBE_EICR_LSC) 2577 if (eicr & IXGBE_EICR_LSC)
2295 ixgbe_check_lsc(adapter); 2578 ixgbe_check_lsc(adapter);
2296 2579
2297 if (hw->mac.type == ixgbe_mac_82599EB) 2580 switch (hw->mac.type) {
2581 case ixgbe_mac_82599EB:
2298 ixgbe_check_sfp_event(adapter, eicr); 2582 ixgbe_check_sfp_event(adapter, eicr);
2583 if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
2584 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
2585 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2586 adapter->interrupt_event = eicr;
2587 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2588 ixgbe_service_event_schedule(adapter);
2589 }
2590 }
2591 break;
2592 default:
2593 break;
2594 }
2299 2595
2300 ixgbe_check_fan_failure(adapter, eicr); 2596 ixgbe_check_fan_failure(adapter, eicr);
2301 if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
2302 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
2303 schedule_work(&adapter->check_overtemp_task);
2304 2597
2305 if (napi_schedule_prep(&(q_vector->napi))) { 2598 if (napi_schedule_prep(&(q_vector->napi))) {
2306 adapter->tx_ring[0]->total_packets = 0; 2599 adapter->tx_ring[0]->total_packets = 0;
@@ -2311,6 +2604,14 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
2311 __napi_schedule(&(q_vector->napi)); 2604 __napi_schedule(&(q_vector->napi));
2312 } 2605 }
2313 2606
2607 /*
2608 * re-enable link(maybe) and non-queue interrupts, no flush.
2609 * ixgbe_poll will re-enable the queue interrupts
2610 */
2611
2612 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2613 ixgbe_irq_enable(adapter, false, false);
2614
2314 return IRQ_HANDLED; 2615 return IRQ_HANDLED;
2315} 2616}
2316 2617
@@ -2343,10 +2644,10 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
2343 err = ixgbe_request_msix_irqs(adapter); 2644 err = ixgbe_request_msix_irqs(adapter);
2344 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { 2645 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
2345 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0, 2646 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
2346 netdev->name, netdev); 2647 netdev->name, netdev);
2347 } else { 2648 } else {
2348 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED, 2649 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
2349 netdev->name, netdev); 2650 netdev->name, netdev);
2350 } 2651 }
2351 2652
2352 if (err) 2653 if (err)
@@ -2369,8 +2670,13 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
2369 2670
2370 i--; 2671 i--;
2371 for (; i >= 0; i--) { 2672 for (; i >= 0; i--) {
2673 /* free only the irqs that were actually requested */
2674 if (!adapter->q_vector[i]->rxr_count &&
2675 !adapter->q_vector[i]->txr_count)
2676 continue;
2677
2372 free_irq(adapter->msix_entries[i].vector, 2678 free_irq(adapter->msix_entries[i].vector,
2373 adapter->q_vector[i]); 2679 adapter->q_vector[i]);
2374 } 2680 }
2375 2681
2376 ixgbe_reset_q_vectors(adapter); 2682 ixgbe_reset_q_vectors(adapter);
@@ -2385,14 +2691,20 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
2385 **/ 2691 **/
2386static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) 2692static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
2387{ 2693{
2388 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 2694 switch (adapter->hw.mac.type) {
2695 case ixgbe_mac_82598EB:
2389 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); 2696 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
2390 } else { 2697 break;
2698 case ixgbe_mac_82599EB:
2699 case ixgbe_mac_X540:
2391 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); 2700 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
2392 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); 2701 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
2393 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); 2702 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
2394 if (adapter->num_vfs > 32) 2703 if (adapter->num_vfs > 32)
2395 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0); 2704 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
2705 break;
2706 default:
2707 break;
2396 } 2708 }
2397 IXGBE_WRITE_FLUSH(&adapter->hw); 2709 IXGBE_WRITE_FLUSH(&adapter->hw);
2398 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 2710 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
@@ -2413,7 +2725,7 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
2413 struct ixgbe_hw *hw = &adapter->hw; 2725 struct ixgbe_hw *hw = &adapter->hw;
2414 2726
2415 IXGBE_WRITE_REG(hw, IXGBE_EITR(0), 2727 IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
2416 EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param)); 2728 EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param));
2417 2729
2418 ixgbe_set_ivar(adapter, 0, 0, 0); 2730 ixgbe_set_ivar(adapter, 0, 0, 0);
2419 ixgbe_set_ivar(adapter, 1, 0, 0); 2731 ixgbe_set_ivar(adapter, 1, 0, 0);
@@ -2425,115 +2737,176 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
2425} 2737}
2426 2738
2427/** 2739/**
2428 * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset 2740 * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset
2429 * @adapter: board private structure 2741 * @adapter: board private structure
2742 * @ring: structure containing ring specific data
2430 * 2743 *
2431 * Configure the Tx unit of the MAC after a reset. 2744 * Configure the Tx descriptor ring after a reset.
2432 **/ 2745 **/
2433static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) 2746void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
2747 struct ixgbe_ring *ring)
2434{ 2748{
2435 u64 tdba;
2436 struct ixgbe_hw *hw = &adapter->hw; 2749 struct ixgbe_hw *hw = &adapter->hw;
2437 u32 i, j, tdlen, txctrl; 2750 u64 tdba = ring->dma;
2751 int wait_loop = 10;
2752 u32 txdctl;
2753 u8 reg_idx = ring->reg_idx;
2438 2754
2439 /* Setup the HW Tx Head and Tail descriptor pointers */ 2755 /* disable queue to avoid issues while updating state */
2440 for (i = 0; i < adapter->num_tx_queues; i++) { 2756 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
2441 struct ixgbe_ring *ring = adapter->tx_ring[i]; 2757 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx),
2442 j = ring->reg_idx; 2758 txdctl & ~IXGBE_TXDCTL_ENABLE);
2443 tdba = ring->dma; 2759 IXGBE_WRITE_FLUSH(hw);
2444 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc); 2760
2445 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), 2761 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
2446 (tdba & DMA_BIT_MASK(32))); 2762 (tdba & DMA_BIT_MASK(32)));
2447 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); 2763 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
2448 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen); 2764 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
2449 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); 2765 ring->count * sizeof(union ixgbe_adv_tx_desc));
2450 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); 2766 IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
2451 adapter->tx_ring[i]->head = IXGBE_TDH(j); 2767 IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
2452 adapter->tx_ring[i]->tail = IXGBE_TDT(j); 2768 ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx);
2453 /* 2769
2454 * Disable Tx Head Writeback RO bit, since this hoses 2770 /* configure fetching thresholds */
2455 * bookkeeping if things aren't delivered in order. 2771 if (adapter->rx_itr_setting == 0) {
2456 */ 2772 /* cannot set wthresh when itr==0 */
2457 switch (hw->mac.type) { 2773 txdctl &= ~0x007F0000;
2458 case ixgbe_mac_82598EB: 2774 } else {
2459 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); 2775 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
2460 break; 2776 txdctl |= (8 << 16);
2461 case ixgbe_mac_82599EB: 2777 }
2462 default: 2778 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
2463 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j)); 2779 /* PThresh workaround for Tx hang with DFP enabled. */
2464 break; 2780 txdctl |= 32;
2465 }
2466 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
2467 switch (hw->mac.type) {
2468 case ixgbe_mac_82598EB:
2469 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
2470 break;
2471 case ixgbe_mac_82599EB:
2472 default:
2473 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
2474 break;
2475 }
2476 } 2781 }
2477 2782
2478 if (hw->mac.type == ixgbe_mac_82599EB) { 2783 /* reinitialize flowdirector state */
2479 u32 rttdcs; 2784 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
2480 u32 mask; 2785 adapter->atr_sample_rate) {
2786 ring->atr_sample_rate = adapter->atr_sample_rate;
2787 ring->atr_count = 0;
2788 set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
2789 } else {
2790 ring->atr_sample_rate = 0;
2791 }
2481 2792
2482 /* disable the arbiter while setting MTQC */ 2793 clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
2483 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2484 rttdcs |= IXGBE_RTTDCS_ARBDIS;
2485 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2486 2794
2487 /* set transmit pool layout */ 2795 /* enable queue */
2488 mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED); 2796 txdctl |= IXGBE_TXDCTL_ENABLE;
2489 switch (adapter->flags & mask) { 2797 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
2490 2798
2491 case (IXGBE_FLAG_SRIOV_ENABLED): 2799 /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */
2492 IXGBE_WRITE_REG(hw, IXGBE_MTQC, 2800 if (hw->mac.type == ixgbe_mac_82598EB &&
2493 (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF)); 2801 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
2494 break; 2802 return;
2495 2803
2496 case (IXGBE_FLAG_DCB_ENABLED): 2804 /* poll to verify queue is enabled */
2497 /* We enable 8 traffic classes, DCB only */ 2805 do {
2498 IXGBE_WRITE_REG(hw, IXGBE_MTQC, 2806 usleep_range(1000, 2000);
2499 (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ)); 2807 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
2500 break; 2808 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
2809 if (!wait_loop)
2810 e_err(drv, "Could not enable Tx Queue %d\n", reg_idx);
2811}
2501 2812
2502 default: 2813static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
2503 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB); 2814{
2504 break; 2815 struct ixgbe_hw *hw = &adapter->hw;
2505 } 2816 u32 rttdcs;
2817 u32 mask;
2818
2819 if (hw->mac.type == ixgbe_mac_82598EB)
2820 return;
2506 2821
2507 /* re-eable the arbiter */ 2822 /* disable the arbiter while setting MTQC */
2508 rttdcs &= ~IXGBE_RTTDCS_ARBDIS; 2823 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2509 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 2824 rttdcs |= IXGBE_RTTDCS_ARBDIS;
2825 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2826
2827 /* set transmit pool layout */
2828 mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED);
2829 switch (adapter->flags & mask) {
2830
2831 case (IXGBE_FLAG_SRIOV_ENABLED):
2832 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
2833 (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
2834 break;
2835
2836 case (IXGBE_FLAG_DCB_ENABLED):
2837 /* We enable 8 traffic classes, DCB only */
2838 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
2839 (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ));
2840 break;
2841
2842 default:
2843 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2844 break;
2845 }
2846
2847 /* re-enable the arbiter */
2848 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2849 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2850}
2851
2852/**
2853 * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
2854 * @adapter: board private structure
2855 *
2856 * Configure the Tx unit of the MAC after a reset.
2857 **/
2858static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
2859{
2860 struct ixgbe_hw *hw = &adapter->hw;
2861 u32 dmatxctl;
2862 u32 i;
2863
2864 ixgbe_setup_mtqc(adapter);
2865
2866 if (hw->mac.type != ixgbe_mac_82598EB) {
2867 /* DMATXCTL.EN must be before Tx queues are enabled */
2868 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2869 dmatxctl |= IXGBE_DMATXCTL_TE;
2870 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2510 } 2871 }
2872
2873 /* Setup the HW Tx Head and Tail descriptor pointers */
2874 for (i = 0; i < adapter->num_tx_queues; i++)
2875 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
2511} 2876}
2512 2877
2513#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 2878#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
2514 2879
2515static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, 2880static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
2516 struct ixgbe_ring *rx_ring) 2881 struct ixgbe_ring *rx_ring)
2517{ 2882{
2518 u32 srrctl; 2883 u32 srrctl;
2519 int index; 2884 u8 reg_idx = rx_ring->reg_idx;
2520 struct ixgbe_ring_feature *feature = adapter->ring_feature;
2521 2885
2522 index = rx_ring->reg_idx; 2886 switch (adapter->hw.mac.type) {
2523 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 2887 case ixgbe_mac_82598EB: {
2524 unsigned long mask; 2888 struct ixgbe_ring_feature *feature = adapter->ring_feature;
2525 mask = (unsigned long) feature[RING_F_RSS].mask; 2889 const int mask = feature[RING_F_RSS].mask;
2526 index = index & mask; 2890 reg_idx = reg_idx & mask;
2891 }
2892 break;
2893 case ixgbe_mac_82599EB:
2894 case ixgbe_mac_X540:
2895 default:
2896 break;
2527 } 2897 }
2528 srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index)); 2898
2899 srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx));
2529 2900
2530 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 2901 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
2531 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 2902 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
2903 if (adapter->num_vfs)
2904 srrctl |= IXGBE_SRRCTL_DROP_EN;
2532 2905
2533 srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & 2906 srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
2534 IXGBE_SRRCTL_BSIZEHDR_MASK; 2907 IXGBE_SRRCTL_BSIZEHDR_MASK;
2535 2908
2536 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { 2909 if (ring_is_ps_enabled(rx_ring)) {
2537#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER 2910#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
2538 srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 2911 srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2539#else 2912#else
@@ -2546,41 +2919,93 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
2546 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 2919 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2547 } 2920 }
2548 2921
2549 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl); 2922 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl);
2550} 2923}
2551 2924
2552static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) 2925static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
2553{ 2926{
2554 u32 mrqc = 0; 2927 struct ixgbe_hw *hw = &adapter->hw;
2928 static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
2929 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
2930 0x6A3E67EA, 0x14364D17, 0x3BED200D};
2931 u32 mrqc = 0, reta = 0;
2932 u32 rxcsum;
2933 int i, j;
2555 int mask; 2934 int mask;
2556 2935
2557 if (!(adapter->hw.mac.type == ixgbe_mac_82599EB)) 2936 /* Fill out hash function seeds */
2558 return mrqc; 2937 for (i = 0; i < 10; i++)
2938 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
2939
2940 /* Fill out redirection table */
2941 for (i = 0, j = 0; i < 128; i++, j++) {
2942 if (j == adapter->ring_feature[RING_F_RSS].indices)
2943 j = 0;
2944 /* reta = 4-byte sliding window of
2945 * 0x00..(indices-1)(indices-1)00..etc. */
2946 reta = (reta << 8) | (j * 0x11);
2947 if ((i & 3) == 3)
2948 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2949 }
2559 2950
2560 mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED 2951 /* Disable indicating checksum in descriptor, enables RSS hash */
2952 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2953 rxcsum |= IXGBE_RXCSUM_PCSD;
2954 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2955
2956 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2957 mask = adapter->flags & IXGBE_FLAG_RSS_ENABLED;
2958 else
2959 mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED
2561#ifdef CONFIG_IXGBE_DCB 2960#ifdef CONFIG_IXGBE_DCB
2562 | IXGBE_FLAG_DCB_ENABLED 2961 | IXGBE_FLAG_DCB_ENABLED
2563#endif 2962#endif
2564 | IXGBE_FLAG_SRIOV_ENABLED 2963 | IXGBE_FLAG_SRIOV_ENABLED
2565 ); 2964 );
2566 2965
2567 switch (mask) { 2966 switch (mask) {
2967#ifdef CONFIG_IXGBE_DCB
2968 case (IXGBE_FLAG_DCB_ENABLED | IXGBE_FLAG_RSS_ENABLED):
2969 mrqc = IXGBE_MRQC_RTRSS8TCEN;
2970 break;
2971 case (IXGBE_FLAG_DCB_ENABLED):
2972 mrqc = IXGBE_MRQC_RT8TCEN;
2973 break;
2974#endif /* CONFIG_IXGBE_DCB */
2568 case (IXGBE_FLAG_RSS_ENABLED): 2975 case (IXGBE_FLAG_RSS_ENABLED):
2569 mrqc = IXGBE_MRQC_RSSEN; 2976 mrqc = IXGBE_MRQC_RSSEN;
2570 break; 2977 break;
2571 case (IXGBE_FLAG_SRIOV_ENABLED): 2978 case (IXGBE_FLAG_SRIOV_ENABLED):
2572 mrqc = IXGBE_MRQC_VMDQEN; 2979 mrqc = IXGBE_MRQC_VMDQEN;
2573 break; 2980 break;
2574#ifdef CONFIG_IXGBE_DCB
2575 case (IXGBE_FLAG_DCB_ENABLED):
2576 mrqc = IXGBE_MRQC_RT8TCEN;
2577 break;
2578#endif /* CONFIG_IXGBE_DCB */
2579 default: 2981 default:
2580 break; 2982 break;
2581 } 2983 }
2582 2984
2583 return mrqc; 2985 /* Perform hash on these packet types */
2986 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
2987 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2988 | IXGBE_MRQC_RSS_FIELD_IPV6
2989 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2990
2991 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2992}
2993
2994/**
2995 * ixgbe_clear_rscctl - disable RSC for the indicated ring
2996 * @adapter: address of board private structure
2997 * @ring: structure containing ring specific data
2998 **/
2999void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
3000 struct ixgbe_ring *ring)
3001{
3002 struct ixgbe_hw *hw = &adapter->hw;
3003 u32 rscctrl;
3004 u8 reg_idx = ring->reg_idx;
3005
3006 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
3007 rscctrl &= ~IXGBE_RSCCTL_RSCEN;
3008 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
2584} 3009}
2585 3010
2586/** 3011/**
@@ -2588,25 +3013,26 @@ static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
2588 * @adapter: address of board private structure 3013 * @adapter: address of board private structure
2589 * @index: index of ring to set 3014 * @index: index of ring to set
2590 **/ 3015 **/
2591static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index) 3016void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
3017 struct ixgbe_ring *ring)
2592{ 3018{
2593 struct ixgbe_ring *rx_ring;
2594 struct ixgbe_hw *hw = &adapter->hw; 3019 struct ixgbe_hw *hw = &adapter->hw;
2595 int j;
2596 u32 rscctrl; 3020 u32 rscctrl;
2597 int rx_buf_len; 3021 int rx_buf_len;
3022 u8 reg_idx = ring->reg_idx;
3023
3024 if (!ring_is_rsc_enabled(ring))
3025 return;
2598 3026
2599 rx_ring = adapter->rx_ring[index]; 3027 rx_buf_len = ring->rx_buf_len;
2600 j = rx_ring->reg_idx; 3028 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
2601 rx_buf_len = rx_ring->rx_buf_len;
2602 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
2603 rscctrl |= IXGBE_RSCCTL_RSCEN; 3029 rscctrl |= IXGBE_RSCCTL_RSCEN;
2604 /* 3030 /*
2605 * we must limit the number of descriptors so that the 3031 * we must limit the number of descriptors so that the
2606 * total size of max desc * buf_len is not greater 3032 * total size of max desc * buf_len is not greater
2607 * than 65535 3033 * than 65535
2608 */ 3034 */
2609 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { 3035 if (ring_is_ps_enabled(ring)) {
2610#if (MAX_SKB_FRAGS > 16) 3036#if (MAX_SKB_FRAGS > 16)
2611 rscctrl |= IXGBE_RSCCTL_MAXDESC_16; 3037 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2612#elif (MAX_SKB_FRAGS > 8) 3038#elif (MAX_SKB_FRAGS > 8)
@@ -2624,120 +3050,309 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index)
2624 else 3050 else
2625 rscctrl |= IXGBE_RSCCTL_MAXDESC_4; 3051 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
2626 } 3052 }
2627 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(j), rscctrl); 3053 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
2628} 3054}
2629 3055
2630/** 3056/**
2631 * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset 3057 * ixgbe_set_uta - Set unicast filter table address
2632 * @adapter: board private structure 3058 * @adapter: board private structure
2633 * 3059 *
2634 * Configure the Rx unit of the MAC after a reset. 3060 * The unicast table address is a register array of 32-bit registers.
3061 * The table is meant to be used in a way similar to how the MTA is used
3062 * however due to certain limitations in the hardware it is necessary to
3063 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
3064 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
2635 **/ 3065 **/
2636static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) 3066static void ixgbe_set_uta(struct ixgbe_adapter *adapter)
3067{
3068 struct ixgbe_hw *hw = &adapter->hw;
3069 int i;
3070
3071 /* The UTA table only exists on 82599 hardware and newer */
3072 if (hw->mac.type < ixgbe_mac_82599EB)
3073 return;
3074
3075 /* we only need to do this if VMDq is enabled */
3076 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
3077 return;
3078
3079 for (i = 0; i < 128; i++)
3080 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
3081}
3082
3083#define IXGBE_MAX_RX_DESC_POLL 10
3084static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
3085 struct ixgbe_ring *ring)
3086{
3087 struct ixgbe_hw *hw = &adapter->hw;
3088 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
3089 u32 rxdctl;
3090 u8 reg_idx = ring->reg_idx;
3091
3092 /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
3093 if (hw->mac.type == ixgbe_mac_82598EB &&
3094 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3095 return;
3096
3097 do {
3098 usleep_range(1000, 2000);
3099 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3100 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
3101
3102 if (!wait_loop) {
3103 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
3104 "the polling period\n", reg_idx);
3105 }
3106}
3107
3108void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
3109 struct ixgbe_ring *ring)
3110{
3111 struct ixgbe_hw *hw = &adapter->hw;
3112 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
3113 u32 rxdctl;
3114 u8 reg_idx = ring->reg_idx;
3115
3116 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3117 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
3118
3119 /* write value back with RXDCTL.ENABLE bit cleared */
3120 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
3121
3122 if (hw->mac.type == ixgbe_mac_82598EB &&
3123 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3124 return;
3125
3126 /* the hardware may take up to 100us to really disable the rx queue */
3127 do {
3128 udelay(10);
3129 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3130 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
3131
3132 if (!wait_loop) {
3133 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within "
3134 "the polling period\n", reg_idx);
3135 }
3136}
3137
3138void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
3139 struct ixgbe_ring *ring)
3140{
3141 struct ixgbe_hw *hw = &adapter->hw;
3142 u64 rdba = ring->dma;
3143 u32 rxdctl;
3144 u8 reg_idx = ring->reg_idx;
3145
3146 /* disable queue to avoid issues while updating state */
3147 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3148 ixgbe_disable_rx_queue(adapter, ring);
3149
3150 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
3151 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
3152 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
3153 ring->count * sizeof(union ixgbe_adv_rx_desc));
3154 IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
3155 IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
3156 ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx);
3157
3158 ixgbe_configure_srrctl(adapter, ring);
3159 ixgbe_configure_rscctl(adapter, ring);
3160
3161 /* If operating in IOV mode set RLPML for X540 */
3162 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
3163 hw->mac.type == ixgbe_mac_X540) {
3164 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
3165 rxdctl |= ((ring->netdev->mtu + ETH_HLEN +
3166 ETH_FCS_LEN + VLAN_HLEN) | IXGBE_RXDCTL_RLPML_EN);
3167 }
3168
3169 if (hw->mac.type == ixgbe_mac_82598EB) {
3170 /*
3171 * enable cache line friendly hardware writes:
3172 * PTHRESH=32 descriptors (half the internal cache),
3173 * this also removes ugly rx_no_buffer_count increment
3174 * HTHRESH=4 descriptors (to minimize latency on fetch)
3175 * WTHRESH=8 burst writeback up to two cache lines
3176 */
3177 rxdctl &= ~0x3FFFFF;
3178 rxdctl |= 0x080420;
3179 }
3180
3181 /* enable receive descriptor ring */
3182 rxdctl |= IXGBE_RXDCTL_ENABLE;
3183 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
3184
3185 ixgbe_rx_desc_queue_enable(adapter, ring);
3186 ixgbe_alloc_rx_buffers(ring, IXGBE_DESC_UNUSED(ring));
3187}
3188
3189static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
3190{
3191 struct ixgbe_hw *hw = &adapter->hw;
3192 int p;
3193
3194 /* PSRTYPE must be initialized in non 82598 adapters */
3195 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
3196 IXGBE_PSRTYPE_UDPHDR |
3197 IXGBE_PSRTYPE_IPV4HDR |
3198 IXGBE_PSRTYPE_L2HDR |
3199 IXGBE_PSRTYPE_IPV6HDR;
3200
3201 if (hw->mac.type == ixgbe_mac_82598EB)
3202 return;
3203
3204 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED)
3205 psrtype |= (adapter->num_rx_queues_per_pool << 29);
3206
3207 for (p = 0; p < adapter->num_rx_pools; p++)
3208 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(adapter->num_vfs + p),
3209 psrtype);
3210}
3211
3212static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
3213{
3214 struct ixgbe_hw *hw = &adapter->hw;
3215 u32 gcr_ext;
3216 u32 vt_reg_bits;
3217 u32 reg_offset, vf_shift;
3218 u32 vmdctl;
3219
3220 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
3221 return;
3222
3223 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3224 vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN | IXGBE_VT_CTL_REPLEN;
3225 vt_reg_bits |= (adapter->num_vfs << IXGBE_VT_CTL_POOL_SHIFT);
3226 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
3227
3228 vf_shift = adapter->num_vfs % 32;
3229 reg_offset = (adapter->num_vfs > 32) ? 1 : 0;
3230
3231 /* Enable only the PF's pool for Tx/Rx */
3232 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
3233 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), 0);
3234 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
3235 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), 0);
3236 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
3237
3238 /* Map PF MAC address in RAR Entry 0 to first pool following VFs */
3239 hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs);
3240
3241 /*
3242 * Set up VF register offsets for selected VT Mode,
3243 * i.e. 32 or 64 VFs for SR-IOV
3244 */
3245 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
3246 gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
3247 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
3248 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
3249
3250 /* enable Tx loopback for VF/PF communication */
3251 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
3252 /* Enable MAC Anti-Spoofing */
3253 hw->mac.ops.set_mac_anti_spoofing(hw,
3254 (adapter->antispoofing_enabled =
3255 (adapter->num_vfs != 0)),
3256 adapter->num_vfs);
3257}
3258
3259static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
2637{ 3260{
2638 u64 rdba;
2639 struct ixgbe_hw *hw = &adapter->hw; 3261 struct ixgbe_hw *hw = &adapter->hw;
2640 struct ixgbe_ring *rx_ring;
2641 struct net_device *netdev = adapter->netdev; 3262 struct net_device *netdev = adapter->netdev;
2642 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 3263 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
2643 int i, j;
2644 u32 rdlen, rxctrl, rxcsum;
2645 static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
2646 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
2647 0x6A3E67EA, 0x14364D17, 0x3BED200D};
2648 u32 fctrl, hlreg0;
2649 u32 reta = 0, mrqc = 0;
2650 u32 rdrxctl;
2651 int rx_buf_len; 3264 int rx_buf_len;
3265 struct ixgbe_ring *rx_ring;
3266 int i;
3267 u32 mhadd, hlreg0;
2652 3268
2653 /* Decide whether to use packet split mode or not */ 3269 /* Decide whether to use packet split mode or not */
3270 /* On by default */
3271 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
3272
2654 /* Do not use packet split if we're in SR-IOV Mode */ 3273 /* Do not use packet split if we're in SR-IOV Mode */
2655 if (!adapter->num_vfs) 3274 if (adapter->num_vfs)
2656 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; 3275 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
3276
3277 /* Disable packet split due to 82599 erratum #45 */
3278 if (hw->mac.type == ixgbe_mac_82599EB)
3279 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
2657 3280
2658 /* Set the RX buffer length according to the mode */ 3281 /* Set the RX buffer length according to the mode */
2659 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 3282 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
2660 rx_buf_len = IXGBE_RX_HDR_SIZE; 3283 rx_buf_len = IXGBE_RX_HDR_SIZE;
2661 if (hw->mac.type == ixgbe_mac_82599EB) {
2662 /* PSRTYPE must be initialized in 82599 */
2663 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
2664 IXGBE_PSRTYPE_UDPHDR |
2665 IXGBE_PSRTYPE_IPV4HDR |
2666 IXGBE_PSRTYPE_IPV6HDR |
2667 IXGBE_PSRTYPE_L2HDR;
2668 IXGBE_WRITE_REG(hw,
2669 IXGBE_PSRTYPE(adapter->num_vfs),
2670 psrtype);
2671 }
2672 } else { 3284 } else {
2673 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && 3285 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
2674 (netdev->mtu <= ETH_DATA_LEN)) 3286 (netdev->mtu <= ETH_DATA_LEN))
2675 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; 3287 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
2676 else 3288 else
2677 rx_buf_len = ALIGN(max_frame, 1024); 3289 rx_buf_len = ALIGN(max_frame + VLAN_HLEN, 1024);
2678 } 3290 }
2679 3291
2680 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 3292#ifdef IXGBE_FCOE
2681 fctrl |= IXGBE_FCTRL_BAM; 3293 /* adjust max frame to be able to do baby jumbo for FCoE */
2682 fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */ 3294 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
2683 fctrl |= IXGBE_FCTRL_PMCF; 3295 (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
2684 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl); 3296 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
3297
3298#endif /* IXGBE_FCOE */
3299 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3300 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
3301 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3302 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
3303
3304 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3305 }
2685 3306
2686 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); 3307 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2687 if (adapter->netdev->mtu <= ETH_DATA_LEN) 3308 /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */
2688 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; 3309 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
2689 else
2690 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
2691#ifdef IXGBE_FCOE
2692 if (netdev->features & NETIF_F_FCOE_MTU)
2693 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
2694#endif
2695 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); 3310 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2696 3311
2697 rdlen = adapter->rx_ring[0]->count * sizeof(union ixgbe_adv_rx_desc);
2698 /* disable receives while setting up the descriptors */
2699 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2700 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
2701
2702 /* 3312 /*
2703 * Setup the HW Rx Head and Tail Descriptor Pointers and 3313 * Setup the HW Rx Head and Tail Descriptor Pointers and
2704 * the Base and Length of the Rx Descriptor Ring 3314 * the Base and Length of the Rx Descriptor Ring
2705 */ 3315 */
2706 for (i = 0; i < adapter->num_rx_queues; i++) { 3316 for (i = 0; i < adapter->num_rx_queues; i++) {
2707 rx_ring = adapter->rx_ring[i]; 3317 rx_ring = adapter->rx_ring[i];
2708 rdba = rx_ring->dma;
2709 j = rx_ring->reg_idx;
2710 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32)));
2711 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
2712 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), rdlen);
2713 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
2714 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
2715 rx_ring->head = IXGBE_RDH(j);
2716 rx_ring->tail = IXGBE_RDT(j);
2717 rx_ring->rx_buf_len = rx_buf_len; 3318 rx_ring->rx_buf_len = rx_buf_len;
2718 3319
2719 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) 3320 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
2720 rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED; 3321 set_ring_ps_enabled(rx_ring);
3322 else
3323 clear_ring_ps_enabled(rx_ring);
3324
3325 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
3326 set_ring_rsc_enabled(rx_ring);
2721 else 3327 else
2722 rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED; 3328 clear_ring_rsc_enabled(rx_ring);
2723 3329
2724#ifdef IXGBE_FCOE 3330#ifdef IXGBE_FCOE
2725 if (netdev->features & NETIF_F_FCOE_MTU) { 3331 if (netdev->features & NETIF_F_FCOE_MTU) {
2726 struct ixgbe_ring_feature *f; 3332 struct ixgbe_ring_feature *f;
2727 f = &adapter->ring_feature[RING_F_FCOE]; 3333 f = &adapter->ring_feature[RING_F_FCOE];
2728 if ((i >= f->mask) && (i < f->mask + f->indices)) { 3334 if ((i >= f->mask) && (i < f->mask + f->indices)) {
2729 rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED; 3335 clear_ring_ps_enabled(rx_ring);
2730 if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE) 3336 if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
2731 rx_ring->rx_buf_len = 3337 rx_ring->rx_buf_len =
2732 IXGBE_FCOE_JUMBO_FRAME_SIZE; 3338 IXGBE_FCOE_JUMBO_FRAME_SIZE;
3339 } else if (!ring_is_rsc_enabled(rx_ring) &&
3340 !ring_is_ps_enabled(rx_ring)) {
3341 rx_ring->rx_buf_len =
3342 IXGBE_FCOE_JUMBO_FRAME_SIZE;
2733 } 3343 }
2734 } 3344 }
2735
2736#endif /* IXGBE_FCOE */ 3345#endif /* IXGBE_FCOE */
2737 ixgbe_configure_srrctl(adapter, rx_ring);
2738 } 3346 }
3347}
2739 3348
2740 if (hw->mac.type == ixgbe_mac_82598EB) { 3349static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
3350{
3351 struct ixgbe_hw *hw = &adapter->hw;
3352 u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
3353
3354 switch (hw->mac.type) {
3355 case ixgbe_mac_82598EB:
2741 /* 3356 /*
2742 * For VMDq support of different descriptor types or 3357 * For VMDq support of different descriptor types or
2743 * buffer sizes through the use of multiple SRRCTL 3358 * buffer sizes through the use of multiple SRRCTL
@@ -2748,110 +3363,67 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2748 * effects of setting this bit are only that SRRCTL must be 3363 * effects of setting this bit are only that SRRCTL must be
2749 * fully programmed [0..15] 3364 * fully programmed [0..15]
2750 */ 3365 */
2751 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2752 rdrxctl |= IXGBE_RDRXCTL_MVMEN; 3366 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
2753 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); 3367 break;
3368 case ixgbe_mac_82599EB:
3369 case ixgbe_mac_X540:
3370 /* Disable RSC for ACK packets */
3371 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
3372 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
3373 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
3374 /* hardware requires some bits to be set by default */
3375 rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX);
3376 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
3377 break;
3378 default:
3379 /* We should do nothing since we don't know this hardware */
3380 return;
2754 } 3381 }
2755 3382
2756 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { 3383 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
2757 u32 vt_reg_bits; 3384}
2758 u32 reg_offset, vf_shift;
2759 u32 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
2760 vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN
2761 | IXGBE_VT_CTL_REPLEN;
2762 vt_reg_bits |= (adapter->num_vfs <<
2763 IXGBE_VT_CTL_POOL_SHIFT);
2764 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
2765 IXGBE_WRITE_REG(hw, IXGBE_MRQC, 0);
2766
2767 vf_shift = adapter->num_vfs % 32;
2768 reg_offset = adapter->num_vfs / 32;
2769 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
2770 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
2771 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
2772 IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
2773 /* Enable only the PF's pool for Tx/Rx */
2774 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
2775 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
2776 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
2777 ixgbe_set_vmolr(hw, adapter->num_vfs, true);
2778 }
2779
2780 /* Program MRQC for the distribution of queues */
2781 mrqc = ixgbe_setup_mrqc(adapter);
2782
2783 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
2784 /* Fill out redirection table */
2785 for (i = 0, j = 0; i < 128; i++, j++) {
2786 if (j == adapter->ring_feature[RING_F_RSS].indices)
2787 j = 0;
2788 /* reta = 4-byte sliding window of
2789 * 0x00..(indices-1)(indices-1)00..etc. */
2790 reta = (reta << 8) | (j * 0x11);
2791 if ((i & 3) == 3)
2792 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2793 }
2794
2795 /* Fill out hash function seeds */
2796 for (i = 0; i < 10; i++)
2797 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
2798
2799 if (hw->mac.type == ixgbe_mac_82598EB)
2800 mrqc |= IXGBE_MRQC_RSSEN;
2801 /* Perform hash on these packet types */
2802 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
2803 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2804 | IXGBE_MRQC_RSS_FIELD_IPV6
2805 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2806 }
2807 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2808 3385
2809 if (adapter->num_vfs) { 3386/**
2810 u32 reg; 3387 * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
3388 * @adapter: board private structure
3389 *
3390 * Configure the Rx unit of the MAC after a reset.
3391 **/
3392static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
3393{
3394 struct ixgbe_hw *hw = &adapter->hw;
3395 int i;
3396 u32 rxctrl;
2811 3397
2812 /* Map PF MAC address in RAR Entry 0 to first pool 3398 /* disable receives while setting up the descriptors */
2813 * following VFs */ 3399 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2814 hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs); 3400 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
2815 3401
2816 /* Set up VF register offsets for selected VT Mode, i.e. 3402 ixgbe_setup_psrtype(adapter);
2817 * 64 VFs for SR-IOV */ 3403 ixgbe_setup_rdrxctl(adapter);
2818 reg = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
2819 reg |= IXGBE_GCR_EXT_SRIOV;
2820 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, reg);
2821 }
2822 3404
2823 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 3405 /* Program registers for the distribution of queues */
3406 ixgbe_setup_mrqc(adapter);
2824 3407
2825 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED || 3408 ixgbe_set_uta(adapter);
2826 adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED) {
2827 /* Disable indicating checksum in descriptor, enables
2828 * RSS hash */
2829 rxcsum |= IXGBE_RXCSUM_PCSD;
2830 }
2831 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) {
2832 /* Enable IPv4 payload checksum for UDP fragments
2833 * if PCSD is not set */
2834 rxcsum |= IXGBE_RXCSUM_IPPCSE;
2835 }
2836 3409
2837 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 3410 /* set_rx_buffer_len must be called before ring initialization */
3411 ixgbe_set_rx_buffer_len(adapter);
2838 3412
2839 if (hw->mac.type == ixgbe_mac_82599EB) { 3413 /*
2840 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 3414 * Setup the HW Rx Head and Tail Descriptor Pointers and
2841 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; 3415 * the Base and Length of the Rx Descriptor Ring
2842 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; 3416 */
2843 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); 3417 for (i = 0; i < adapter->num_rx_queues; i++)
2844 } 3418 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
2845 3419
2846 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { 3420 /* disable drop enable for 82598 parts */
2847 /* Enable 82599 HW-RSC */ 3421 if (hw->mac.type == ixgbe_mac_82598EB)
2848 for (i = 0; i < adapter->num_rx_queues; i++) 3422 rxctrl |= IXGBE_RXCTRL_DMBYPS;
2849 ixgbe_configure_rscctl(adapter, i);
2850 3423
2851 /* Disable RSC for ACK packets */ 3424 /* enable all receives */
2852 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, 3425 rxctrl |= IXGBE_RXCTRL_RXEN;
2853 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); 3426 hw->mac.ops.enable_rx_dma(hw, rxctrl);
2854 }
2855} 3427}
2856 3428
2857static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 3429static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
@@ -2862,6 +3434,7 @@ static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
2862 3434
2863 /* add VID to filter table */ 3435 /* add VID to filter table */
2864 hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true); 3436 hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true);
3437 set_bit(vid, adapter->active_vlans);
2865} 3438}
2866 3439
2867static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 3440static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
@@ -2870,16 +3443,9 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2870 struct ixgbe_hw *hw = &adapter->hw; 3443 struct ixgbe_hw *hw = &adapter->hw;
2871 int pool_ndx = adapter->num_vfs; 3444 int pool_ndx = adapter->num_vfs;
2872 3445
2873 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2874 ixgbe_irq_disable(adapter);
2875
2876 vlan_group_set_device(adapter->vlgrp, vid, NULL);
2877
2878 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2879 ixgbe_irq_enable(adapter);
2880
2881 /* remove VID from filter table */ 3446 /* remove VID from filter table */
2882 hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false); 3447 hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
3448 clear_bit(vid, adapter->active_vlans);
2883} 3449}
2884 3450
2885/** 3451/**
@@ -2889,27 +3455,46 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2889static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter) 3455static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter)
2890{ 3456{
2891 struct ixgbe_hw *hw = &adapter->hw; 3457 struct ixgbe_hw *hw = &adapter->hw;
2892 u32 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 3458 u32 vlnctrl;
3459
3460 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3461 vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
3462 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3463}
3464
3465/**
3466 * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering
3467 * @adapter: driver data
3468 */
3469static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
3470{
3471 struct ixgbe_hw *hw = &adapter->hw;
3472 u32 vlnctrl;
3473
3474 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3475 vlnctrl |= IXGBE_VLNCTRL_VFE;
3476 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
3477 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3478}
3479
3480/**
3481 * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping
3482 * @adapter: driver data
3483 */
3484static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
3485{
3486 struct ixgbe_hw *hw = &adapter->hw;
3487 u32 vlnctrl;
2893 int i, j; 3488 int i, j;
2894 3489
2895 switch (hw->mac.type) { 3490 switch (hw->mac.type) {
2896 case ixgbe_mac_82598EB: 3491 case ixgbe_mac_82598EB:
2897 vlnctrl &= ~IXGBE_VLNCTRL_VFE; 3492 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2898#ifdef CONFIG_IXGBE_DCB 3493 vlnctrl &= ~IXGBE_VLNCTRL_VME;
2899 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
2900 vlnctrl &= ~IXGBE_VLNCTRL_VME;
2901#endif
2902 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2903 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 3494 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2904 break; 3495 break;
2905 case ixgbe_mac_82599EB: 3496 case ixgbe_mac_82599EB:
2906 vlnctrl &= ~IXGBE_VLNCTRL_VFE; 3497 case ixgbe_mac_X540:
2907 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2908 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2909#ifdef CONFIG_IXGBE_DCB
2910 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
2911 break;
2912#endif
2913 for (i = 0; i < adapter->num_rx_queues; i++) { 3498 for (i = 0; i < adapter->num_rx_queues; i++) {
2914 j = adapter->rx_ring[i]->reg_idx; 3499 j = adapter->rx_ring[i]->reg_idx;
2915 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); 3500 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
@@ -2923,25 +3508,23 @@ static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter)
2923} 3508}
2924 3509
2925/** 3510/**
2926 * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering 3511 * ixgbe_vlan_strip_enable - helper to enable hw vlan stripping
2927 * @adapter: driver data 3512 * @adapter: driver data
2928 */ 3513 */
2929static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter) 3514static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
2930{ 3515{
2931 struct ixgbe_hw *hw = &adapter->hw; 3516 struct ixgbe_hw *hw = &adapter->hw;
2932 u32 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 3517 u32 vlnctrl;
2933 int i, j; 3518 int i, j;
2934 3519
2935 switch (hw->mac.type) { 3520 switch (hw->mac.type) {
2936 case ixgbe_mac_82598EB: 3521 case ixgbe_mac_82598EB:
2937 vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE; 3522 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2938 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; 3523 vlnctrl |= IXGBE_VLNCTRL_VME;
2939 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 3524 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2940 break; 3525 break;
2941 case ixgbe_mac_82599EB: 3526 case ixgbe_mac_82599EB:
2942 vlnctrl |= IXGBE_VLNCTRL_VFE; 3527 case ixgbe_mac_X540:
2943 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2944 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2945 for (i = 0; i < adapter->num_rx_queues; i++) { 3528 for (i = 0; i < adapter->num_rx_queues; i++) {
2946 j = adapter->rx_ring[i]->reg_idx; 3529 j = adapter->rx_ring[i]->reg_idx;
2947 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); 3530 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
@@ -2954,40 +3537,14 @@ static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
2954 } 3537 }
2955} 3538}
2956 3539
2957static void ixgbe_vlan_rx_register(struct net_device *netdev,
2958 struct vlan_group *grp)
2959{
2960 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2961
2962 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2963 ixgbe_irq_disable(adapter);
2964 adapter->vlgrp = grp;
2965
2966 /*
2967 * For a DCB driver, always enable VLAN tag stripping so we can
2968 * still receive traffic from a DCB-enabled host even if we're
2969 * not in DCB mode.
2970 */
2971 ixgbe_vlan_filter_enable(adapter);
2972
2973 ixgbe_vlan_rx_add_vid(netdev, 0);
2974
2975 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2976 ixgbe_irq_enable(adapter);
2977}
2978
2979static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) 3540static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
2980{ 3541{
2981 ixgbe_vlan_rx_register(adapter->netdev, adapter->vlgrp); 3542 u16 vid;
2982 3543
2983 if (adapter->vlgrp) { 3544 ixgbe_vlan_rx_add_vid(adapter->netdev, 0);
2984 u16 vid; 3545
2985 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 3546 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2986 if (!vlan_group_get_device(adapter->vlgrp, vid)) 3547 ixgbe_vlan_rx_add_vid(adapter->netdev, vid);
2987 continue;
2988 ixgbe_vlan_rx_add_vid(adapter->netdev, vid);
2989 }
2990 }
2991} 3548}
2992 3549
2993/** 3550/**
@@ -3004,7 +3561,7 @@ static int ixgbe_write_uc_addr_list(struct net_device *netdev)
3004 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3561 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3005 struct ixgbe_hw *hw = &adapter->hw; 3562 struct ixgbe_hw *hw = &adapter->hw;
3006 unsigned int vfn = adapter->num_vfs; 3563 unsigned int vfn = adapter->num_vfs;
3007 unsigned int rar_entries = hw->mac.num_rar_entries - (vfn + 1); 3564 unsigned int rar_entries = IXGBE_MAX_PF_MACVLANS;
3008 int count = 0; 3565 int count = 0;
3009 3566
3010 /* return ENOMEM indicating insufficient memory for addresses */ 3567 /* return ENOMEM indicating insufficient memory for addresses */
@@ -3052,6 +3609,11 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
3052 3609
3053 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 3610 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3054 3611
3612 /* set all bits that we expect to always be set */
3613 fctrl |= IXGBE_FCTRL_BAM;
3614 fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
3615 fctrl |= IXGBE_FCTRL_PMCF;
3616
3055 /* clear the bits we are changing the status of */ 3617 /* clear the bits we are changing the status of */
3056 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3618 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3057 3619
@@ -3068,7 +3630,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
3068 } else { 3630 } else {
3069 /* 3631 /*
3070 * Write addresses to the MTA, if the attempt fails 3632 * Write addresses to the MTA, if the attempt fails
3071 * then we should just turn on promiscous mode so 3633 * then we should just turn on promiscuous mode so
3072 * that we can at least receive multicast traffic 3634 * that we can at least receive multicast traffic
3073 */ 3635 */
3074 hw->mac.ops.update_mc_addr_list(hw, netdev); 3636 hw->mac.ops.update_mc_addr_list(hw, netdev);
@@ -3079,7 +3641,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
3079 /* 3641 /*
3080 * Write addresses to available RAR registers, if there is not 3642 * Write addresses to available RAR registers, if there is not
3081 * sufficient space to store all the addresses then enable 3643 * sufficient space to store all the addresses then enable
3082 * unicast promiscous mode 3644 * unicast promiscuous mode
3083 */ 3645 */
3084 count = ixgbe_write_uc_addr_list(netdev); 3646 count = ixgbe_write_uc_addr_list(netdev);
3085 if (count < 0) { 3647 if (count < 0) {
@@ -3097,6 +3659,11 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
3097 } 3659 }
3098 3660
3099 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 3661 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3662
3663 if (netdev->features & NETIF_F_HW_VLAN_RX)
3664 ixgbe_vlan_strip_enable(adapter);
3665 else
3666 ixgbe_vlan_strip_disable(adapter);
3100} 3667}
3101 3668
3102static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) 3669static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
@@ -3154,27 +3721,61 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
3154static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) 3721static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
3155{ 3722{
3156 struct ixgbe_hw *hw = &adapter->hw; 3723 struct ixgbe_hw *hw = &adapter->hw;
3157 u32 txdctl; 3724 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
3158 int i, j;
3159 3725
3160 ixgbe_dcb_check_config(&adapter->dcb_cfg); 3726 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
3161 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG); 3727 if (hw->mac.type == ixgbe_mac_82598EB)
3162 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG); 3728 netif_set_gso_max_size(adapter->netdev, 65536);
3729 return;
3730 }
3731
3732 if (hw->mac.type == ixgbe_mac_82598EB)
3733 netif_set_gso_max_size(adapter->netdev, 32768);
3163 3734
3164 /* reconfigure the hardware */
3165 ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
3166 3735
3167 for (i = 0; i < adapter->num_tx_queues; i++) {
3168 j = adapter->tx_ring[i]->reg_idx;
3169 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
3170 /* PThresh workaround for Tx hang with DFP enabled. */
3171 txdctl |= 32;
3172 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
3173 }
3174 /* Enable VLAN tag insert/strip */ 3736 /* Enable VLAN tag insert/strip */
3175 ixgbe_vlan_filter_enable(adapter); 3737 adapter->netdev->features |= NETIF_F_HW_VLAN_RX;
3176 3738
3177 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); 3739 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
3740
3741 /* reconfigure the hardware */
3742 if (adapter->dcbx_cap & (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE)) {
3743#ifdef CONFIG_FCOE
3744 if (adapter->netdev->features & NETIF_F_FCOE_MTU)
3745 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
3746#endif
3747 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
3748 DCB_TX_CONFIG);
3749 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
3750 DCB_RX_CONFIG);
3751 ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
3752 } else {
3753 struct net_device *dev = adapter->netdev;
3754
3755 if (adapter->ixgbe_ieee_ets)
3756 dev->dcbnl_ops->ieee_setets(dev,
3757 adapter->ixgbe_ieee_ets);
3758 if (adapter->ixgbe_ieee_pfc)
3759 dev->dcbnl_ops->ieee_setpfc(dev,
3760 adapter->ixgbe_ieee_pfc);
3761 }
3762
3763 /* Enable RSS Hash per TC */
3764 if (hw->mac.type != ixgbe_mac_82598EB) {
3765 int i;
3766 u32 reg = 0;
3767
3768 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
3769 u8 msb = 0;
3770 u8 cnt = adapter->netdev->tc_to_txq[i].count;
3771
3772 while (cnt >>= 1)
3773 msb++;
3774
3775 reg |= msb << IXGBE_RQTC_SHIFT_TC(i);
3776 }
3777 IXGBE_WRITE_REG(hw, IXGBE_RQTC, reg);
3778 }
3178} 3779}
3179 3780
3180#endif 3781#endif
@@ -3184,23 +3785,13 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
3184 struct ixgbe_hw *hw = &adapter->hw; 3785 struct ixgbe_hw *hw = &adapter->hw;
3185 int i; 3786 int i;
3186 3787
3187 ixgbe_set_rx_mode(netdev);
3188
3189 ixgbe_restore_vlan(adapter);
3190#ifdef CONFIG_IXGBE_DCB 3788#ifdef CONFIG_IXGBE_DCB
3191 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 3789 ixgbe_configure_dcb(adapter);
3192 if (hw->mac.type == ixgbe_mac_82598EB)
3193 netif_set_gso_max_size(netdev, 32768);
3194 else
3195 netif_set_gso_max_size(netdev, 65536);
3196 ixgbe_configure_dcb(adapter);
3197 } else {
3198 netif_set_gso_max_size(netdev, 65536);
3199 }
3200#else
3201 netif_set_gso_max_size(netdev, 65536);
3202#endif 3790#endif
3203 3791
3792 ixgbe_set_rx_mode(netdev);
3793 ixgbe_restore_vlan(adapter);
3794
3204#ifdef IXGBE_FCOE 3795#ifdef IXGBE_FCOE
3205 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) 3796 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
3206 ixgbe_configure_fcoe(adapter); 3797 ixgbe_configure_fcoe(adapter);
@@ -3209,17 +3800,15 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
3209 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { 3800 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
3210 for (i = 0; i < adapter->num_tx_queues; i++) 3801 for (i = 0; i < adapter->num_tx_queues; i++)
3211 adapter->tx_ring[i]->atr_sample_rate = 3802 adapter->tx_ring[i]->atr_sample_rate =
3212 adapter->atr_sample_rate; 3803 adapter->atr_sample_rate;
3213 ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc); 3804 ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc);
3214 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { 3805 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
3215 ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc); 3806 ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc);
3216 } 3807 }
3808 ixgbe_configure_virtualization(adapter);
3217 3809
3218 ixgbe_configure_tx(adapter); 3810 ixgbe_configure_tx(adapter);
3219 ixgbe_configure_rx(adapter); 3811 ixgbe_configure_rx(adapter);
3220 for (i = 0; i < adapter->num_rx_queues; i++)
3221 ixgbe_alloc_rx_buffers(adapter, adapter->rx_ring[i],
3222 (adapter->rx_ring[i]->count - 1));
3223} 3812}
3224 3813
3225static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) 3814static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
@@ -3245,30 +3834,16 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
3245 **/ 3834 **/
3246static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) 3835static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
3247{ 3836{
3248 struct ixgbe_hw *hw = &adapter->hw; 3837 /*
3838 * We are assuming the worst case scenerio here, and that
3839 * is that an SFP was inserted/removed after the reset
3840 * but before SFP detection was enabled. As such the best
3841 * solution is to just start searching as soon as we start
3842 */
3843 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3844 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
3249 3845
3250 if (hw->phy.multispeed_fiber) { 3846 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
3251 /*
3252 * In multispeed fiber setups, the device may not have
3253 * had a physical connection when the driver loaded.
3254 * If that's the case, the initial link configuration
3255 * couldn't get the MAC into 10G or 1G mode, so we'll
3256 * never have a link status change interrupt fire.
3257 * We need to try and force an autonegotiation
3258 * session, then bring up link.
3259 */
3260 hw->mac.ops.setup_sfp(hw);
3261 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
3262 schedule_work(&adapter->multispeed_fiber_task);
3263 } else {
3264 /*
3265 * Direct Attach Cu and non-multispeed fiber modules
3266 * still need to be configured properly prior to
3267 * attempting link.
3268 */
3269 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_MOD_TASK))
3270 schedule_work(&adapter->sfp_config_module_task);
3271 }
3272} 3847}
3273 3848
3274/** 3849/**
@@ -3289,8 +3864,10 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
3289 if (ret) 3864 if (ret)
3290 goto link_cfg_out; 3865 goto link_cfg_out;
3291 3866
3292 if (hw->mac.ops.get_link_capabilities) 3867 autoneg = hw->phy.autoneg_advertised;
3293 ret = hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation); 3868 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3869 ret = hw->mac.ops.get_link_capabilities(hw, &autoneg,
3870 &negotiation);
3294 if (ret) 3871 if (ret)
3295 goto link_cfg_out; 3872 goto link_cfg_out;
3296 3873
@@ -3300,62 +3877,15 @@ link_cfg_out:
3300 return ret; 3877 return ret;
3301} 3878}
3302 3879
3303#define IXGBE_MAX_RX_DESC_POLL 10 3880static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
3304static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
3305 int rxr)
3306{
3307 int j = adapter->rx_ring[rxr]->reg_idx;
3308 int k;
3309
3310 for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
3311 if (IXGBE_READ_REG(&adapter->hw,
3312 IXGBE_RXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
3313 break;
3314 else
3315 msleep(1);
3316 }
3317 if (k >= IXGBE_MAX_RX_DESC_POLL) {
3318 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
3319 "the polling period\n", rxr);
3320 }
3321 ixgbe_release_rx_desc(&adapter->hw, adapter->rx_ring[rxr],
3322 (adapter->rx_ring[rxr]->count - 1));
3323}
3324
3325static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3326{ 3881{
3327 struct net_device *netdev = adapter->netdev;
3328 struct ixgbe_hw *hw = &adapter->hw; 3882 struct ixgbe_hw *hw = &adapter->hw;
3329 int i, j = 0; 3883 u32 gpie = 0;
3330 int num_rx_rings = adapter->num_rx_queues;
3331 int err;
3332 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
3333 u32 txdctl, rxdctl, mhadd;
3334 u32 dmatxctl;
3335 u32 gpie;
3336 u32 ctrl_ext;
3337
3338 ixgbe_get_hw_control(adapter);
3339
3340 if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) ||
3341 (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) {
3342 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3343 gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME |
3344 IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD);
3345 } else {
3346 /* MSI only */
3347 gpie = 0;
3348 }
3349 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3350 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
3351 gpie |= IXGBE_GPIE_VTMODE_64;
3352 }
3353 /* XXX: to interrupt immediately for EICS writes, enable this */
3354 /* gpie |= IXGBE_GPIE_EIMEN; */
3355 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3356 }
3357 3884
3358 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 3885 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3886 gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
3887 IXGBE_GPIE_OCD;
3888 gpie |= IXGBE_GPIE_EIAME;
3359 /* 3889 /*
3360 * use EIAM to auto-mask when MSI-X interrupt is asserted 3890 * use EIAM to auto-mask when MSI-X interrupt is asserted
3361 * this saves a register write for every interrupt 3891 * this saves a register write for every interrupt
@@ -3364,8 +3894,9 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3364 case ixgbe_mac_82598EB: 3894 case ixgbe_mac_82598EB:
3365 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 3895 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3366 break; 3896 break;
3367 default:
3368 case ixgbe_mac_82599EB: 3897 case ixgbe_mac_82599EB:
3898 case ixgbe_mac_X540:
3899 default:
3369 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 3900 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3370 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 3901 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3371 break; 3902 break;
@@ -3376,115 +3907,61 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3376 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 3907 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3377 } 3908 }
3378 3909
3379 /* Enable Thermal over heat sensor interrupt */ 3910 /* XXX: to interrupt immediately for EICS writes, enable this */
3380 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) { 3911 /* gpie |= IXGBE_GPIE_EIMEN; */
3381 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 3912
3382 gpie |= IXGBE_SDP0_GPIEN; 3913 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3383 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 3914 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
3915 gpie |= IXGBE_GPIE_VTMODE_64;
3384 } 3916 }
3385 3917
3386 /* Enable fan failure interrupt if media type is copper */ 3918 /* Enable fan failure interrupt */
3387 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { 3919 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
3388 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3389 gpie |= IXGBE_SDP1_GPIEN; 3920 gpie |= IXGBE_SDP1_GPIEN;
3390 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3391 }
3392 3921
3393 if (hw->mac.type == ixgbe_mac_82599EB) { 3922 if (hw->mac.type == ixgbe_mac_82599EB) {
3394 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3395 gpie |= IXGBE_SDP1_GPIEN; 3923 gpie |= IXGBE_SDP1_GPIEN;
3396 gpie |= IXGBE_SDP2_GPIEN; 3924 gpie |= IXGBE_SDP2_GPIEN;
3397 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3398 }
3399
3400#ifdef IXGBE_FCOE
3401 /* adjust max frame to be able to do baby jumbo for FCoE */
3402 if ((netdev->features & NETIF_F_FCOE_MTU) &&
3403 (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
3404 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
3405
3406#endif /* IXGBE_FCOE */
3407 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3408 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
3409 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3410 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
3411
3412 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3413 } 3925 }
3414 3926
3415 for (i = 0; i < adapter->num_tx_queues; i++) { 3927 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3416 j = adapter->tx_ring[i]->reg_idx; 3928}
3417 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
3418 if (adapter->rx_itr_setting == 0) {
3419 /* cannot set wthresh when itr==0 */
3420 txdctl &= ~0x007F0000;
3421 } else {
3422 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
3423 txdctl |= (8 << 16);
3424 }
3425 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
3426 }
3427 3929
3428 if (hw->mac.type == ixgbe_mac_82599EB) { 3930static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3429 /* DMATXCTL.EN must be set after all Tx queue config is done */ 3931{
3430 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 3932 struct ixgbe_hw *hw = &adapter->hw;
3431 dmatxctl |= IXGBE_DMATXCTL_TE; 3933 int err;
3432 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); 3934 u32 ctrl_ext;
3433 }
3434 for (i = 0; i < adapter->num_tx_queues; i++) {
3435 j = adapter->tx_ring[i]->reg_idx;
3436 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
3437 txdctl |= IXGBE_TXDCTL_ENABLE;
3438 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
3439 if (hw->mac.type == ixgbe_mac_82599EB) {
3440 int wait_loop = 10;
3441 /* poll for Tx Enable ready */
3442 do {
3443 msleep(1);
3444 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
3445 } while (--wait_loop &&
3446 !(txdctl & IXGBE_TXDCTL_ENABLE));
3447 if (!wait_loop)
3448 e_err(drv, "Could not enable Tx Queue %d\n", j);
3449 }
3450 }
3451 3935
3452 for (i = 0; i < num_rx_rings; i++) { 3936 ixgbe_get_hw_control(adapter);
3453 j = adapter->rx_ring[i]->reg_idx; 3937 ixgbe_setup_gpie(adapter);
3454 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
3455 /* enable PTHRESH=32 descriptors (half the internal cache)
3456 * and HTHRESH=0 descriptors (to minimize latency on fetch),
3457 * this also removes a pesky rx_no_buffer_count increment */
3458 rxdctl |= 0x0020;
3459 rxdctl |= IXGBE_RXDCTL_ENABLE;
3460 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), rxdctl);
3461 if (hw->mac.type == ixgbe_mac_82599EB)
3462 ixgbe_rx_desc_queue_enable(adapter, i);
3463 }
3464 /* enable all receives */
3465 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3466 if (hw->mac.type == ixgbe_mac_82598EB)
3467 rxdctl |= (IXGBE_RXCTRL_DMBYPS | IXGBE_RXCTRL_RXEN);
3468 else
3469 rxdctl |= IXGBE_RXCTRL_RXEN;
3470 hw->mac.ops.enable_rx_dma(hw, rxdctl);
3471 3938
3472 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) 3939 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
3473 ixgbe_configure_msix(adapter); 3940 ixgbe_configure_msix(adapter);
3474 else 3941 else
3475 ixgbe_configure_msi_and_legacy(adapter); 3942 ixgbe_configure_msi_and_legacy(adapter);
3476 3943
3477 /* enable the optics */ 3944 /* enable the optics for both mult-speed fiber and 82599 SFP+ fiber */
3478 if (hw->phy.multispeed_fiber) 3945 if (hw->mac.ops.enable_tx_laser &&
3946 ((hw->phy.multispeed_fiber) ||
3947 ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
3948 (hw->mac.type == ixgbe_mac_82599EB))))
3479 hw->mac.ops.enable_tx_laser(hw); 3949 hw->mac.ops.enable_tx_laser(hw);
3480 3950
3481 clear_bit(__IXGBE_DOWN, &adapter->state); 3951 clear_bit(__IXGBE_DOWN, &adapter->state);
3482 ixgbe_napi_enable_all(adapter); 3952 ixgbe_napi_enable_all(adapter);
3483 3953
3954 if (ixgbe_is_sfp(hw)) {
3955 ixgbe_sfp_link_config(adapter);
3956 } else {
3957 err = ixgbe_non_sfp_link_config(hw);
3958 if (err)
3959 e_err(probe, "link_config FAILED %d\n", err);
3960 }
3961
3484 /* clear any pending interrupts, may auto mask */ 3962 /* clear any pending interrupts, may auto mask */
3485 IXGBE_READ_REG(hw, IXGBE_EICR); 3963 IXGBE_READ_REG(hw, IXGBE_EICR);
3486 3964 ixgbe_irq_enable(adapter, true, true);
3487 ixgbe_irq_enable(adapter);
3488 3965
3489 /* 3966 /*
3490 * If this adapter has a fan, check to see if we had a failure 3967 * If this adapter has a fan, check to see if we had a failure
@@ -3496,47 +3973,14 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3496 e_crit(drv, "Fan has stopped, replace the adapter\n"); 3973 e_crit(drv, "Fan has stopped, replace the adapter\n");
3497 } 3974 }
3498 3975
3499 /*
3500 * For hot-pluggable SFP+ devices, a new SFP+ module may have
3501 * arrived before interrupts were enabled but after probe. Such
3502 * devices wouldn't have their type identified yet. We need to
3503 * kick off the SFP+ module setup first, then try to bring up link.
3504 * If we're not hot-pluggable SFP+, we just need to configure link
3505 * and bring it up.
3506 */
3507 if (hw->phy.type == ixgbe_phy_unknown) {
3508 err = hw->phy.ops.identify(hw);
3509 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3510 /*
3511 * Take the device down and schedule the sfp tasklet
3512 * which will unregister_netdev and log it.
3513 */
3514 ixgbe_down(adapter);
3515 schedule_work(&adapter->sfp_config_module_task);
3516 return err;
3517 }
3518 }
3519
3520 if (ixgbe_is_sfp(hw)) {
3521 ixgbe_sfp_link_config(adapter);
3522 } else {
3523 err = ixgbe_non_sfp_link_config(hw);
3524 if (err)
3525 e_err(probe, "link_config FAILED %d\n", err);
3526 }
3527
3528 for (i = 0; i < adapter->num_tx_queues; i++)
3529 set_bit(__IXGBE_FDIR_INIT_DONE,
3530 &(adapter->tx_ring[i]->reinit_state));
3531
3532 /* enable transmits */ 3976 /* enable transmits */
3533 netif_tx_start_all_queues(netdev); 3977 netif_tx_start_all_queues(adapter->netdev);
3534 3978
3535 /* bring the link up in the watchdog, this could race with our first 3979 /* bring the link up in the watchdog, this could race with our first
3536 * link up interrupt but shouldn't be a problem */ 3980 * link up interrupt but shouldn't be a problem */
3537 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 3981 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
3538 adapter->link_check_timeout = jiffies; 3982 adapter->link_check_timeout = jiffies;
3539 mod_timer(&adapter->watchdog_timer, jiffies); 3983 mod_timer(&adapter->service_timer, jiffies);
3540 3984
3541 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 3985 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
3542 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 3986 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
@@ -3549,8 +3993,11 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3549void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) 3993void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
3550{ 3994{
3551 WARN_ON(in_interrupt()); 3995 WARN_ON(in_interrupt());
3996 /* put off any impending NetWatchDogTimeout */
3997 adapter->netdev->trans_start = jiffies;
3998
3552 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) 3999 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
3553 msleep(1); 4000 usleep_range(1000, 2000);
3554 ixgbe_down(adapter); 4001 ixgbe_down(adapter);
3555 /* 4002 /*
3556 * If SR-IOV enabled then wait a bit before bringing the adapter 4003 * If SR-IOV enabled then wait a bit before bringing the adapter
@@ -3577,10 +4024,20 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
3577 struct ixgbe_hw *hw = &adapter->hw; 4024 struct ixgbe_hw *hw = &adapter->hw;
3578 int err; 4025 int err;
3579 4026
4027 /* lock SFP init bit to prevent race conditions with the watchdog */
4028 while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
4029 usleep_range(1000, 2000);
4030
4031 /* clear all SFP and link config related flags while holding SFP_INIT */
4032 adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP |
4033 IXGBE_FLAG2_SFP_NEEDS_RESET);
4034 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
4035
3580 err = hw->mac.ops.init_hw(hw); 4036 err = hw->mac.ops.init_hw(hw);
3581 switch (err) { 4037 switch (err) {
3582 case 0: 4038 case 0:
3583 case IXGBE_ERR_SFP_NOT_PRESENT: 4039 case IXGBE_ERR_SFP_NOT_PRESENT:
4040 case IXGBE_ERR_SFP_NOT_SUPPORTED:
3584 break; 4041 break;
3585 case IXGBE_ERR_MASTER_REQUESTS_PENDING: 4042 case IXGBE_ERR_MASTER_REQUESTS_PENDING:
3586 e_dev_err("master disable timed out\n"); 4043 e_dev_err("master disable timed out\n");
@@ -3598,6 +4055,8 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
3598 e_dev_err("Hardware Error: %d\n", err); 4055 e_dev_err("Hardware Error: %d\n", err);
3599 } 4056 }
3600 4057
4058 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
4059
3601 /* reprogram the RAR[0] in case user changed it. */ 4060 /* reprogram the RAR[0] in case user changed it. */
3602 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs, 4061 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
3603 IXGBE_RAH_AV); 4062 IXGBE_RAH_AV);
@@ -3605,25 +4064,26 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
3605 4064
3606/** 4065/**
3607 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue 4066 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
3608 * @adapter: board private structure
3609 * @rx_ring: ring to free buffers from 4067 * @rx_ring: ring to free buffers from
3610 **/ 4068 **/
3611static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, 4069static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
3612 struct ixgbe_ring *rx_ring)
3613{ 4070{
3614 struct pci_dev *pdev = adapter->pdev; 4071 struct device *dev = rx_ring->dev;
3615 unsigned long size; 4072 unsigned long size;
3616 unsigned int i; 4073 u16 i;
3617 4074
3618 /* Free all the Rx ring sk_buffs */ 4075 /* ring already cleared, nothing to do */
4076 if (!rx_ring->rx_buffer_info)
4077 return;
3619 4078
4079 /* Free all the Rx ring sk_buffs */
3620 for (i = 0; i < rx_ring->count; i++) { 4080 for (i = 0; i < rx_ring->count; i++) {
3621 struct ixgbe_rx_buffer *rx_buffer_info; 4081 struct ixgbe_rx_buffer *rx_buffer_info;
3622 4082
3623 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 4083 rx_buffer_info = &rx_ring->rx_buffer_info[i];
3624 if (rx_buffer_info->dma) { 4084 if (rx_buffer_info->dma) {
3625 dma_unmap_single(&pdev->dev, rx_buffer_info->dma, 4085 dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
3626 rx_ring->rx_buf_len, 4086 rx_ring->rx_buf_len,
3627 DMA_FROM_DEVICE); 4087 DMA_FROM_DEVICE);
3628 rx_buffer_info->dma = 0; 4088 rx_buffer_info->dma = 0;
3629 } 4089 }
@@ -3633,9 +4093,9 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3633 do { 4093 do {
3634 struct sk_buff *this = skb; 4094 struct sk_buff *this = skb;
3635 if (IXGBE_RSC_CB(this)->delay_unmap) { 4095 if (IXGBE_RSC_CB(this)->delay_unmap) {
3636 dma_unmap_single(&pdev->dev, 4096 dma_unmap_single(dev,
3637 IXGBE_RSC_CB(this)->dma, 4097 IXGBE_RSC_CB(this)->dma,
3638 rx_ring->rx_buf_len, 4098 rx_ring->rx_buf_len,
3639 DMA_FROM_DEVICE); 4099 DMA_FROM_DEVICE);
3640 IXGBE_RSC_CB(this)->dma = 0; 4100 IXGBE_RSC_CB(this)->dma = 0;
3641 IXGBE_RSC_CB(skb)->delay_unmap = false; 4101 IXGBE_RSC_CB(skb)->delay_unmap = false;
@@ -3647,7 +4107,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3647 if (!rx_buffer_info->page) 4107 if (!rx_buffer_info->page)
3648 continue; 4108 continue;
3649 if (rx_buffer_info->page_dma) { 4109 if (rx_buffer_info->page_dma) {
3650 dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma, 4110 dma_unmap_page(dev, rx_buffer_info->page_dma,
3651 PAGE_SIZE / 2, DMA_FROM_DEVICE); 4111 PAGE_SIZE / 2, DMA_FROM_DEVICE);
3652 rx_buffer_info->page_dma = 0; 4112 rx_buffer_info->page_dma = 0;
3653 } 4113 }
@@ -3664,30 +4124,26 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3664 4124
3665 rx_ring->next_to_clean = 0; 4125 rx_ring->next_to_clean = 0;
3666 rx_ring->next_to_use = 0; 4126 rx_ring->next_to_use = 0;
3667
3668 if (rx_ring->head)
3669 writel(0, adapter->hw.hw_addr + rx_ring->head);
3670 if (rx_ring->tail)
3671 writel(0, adapter->hw.hw_addr + rx_ring->tail);
3672} 4127}
3673 4128
3674/** 4129/**
3675 * ixgbe_clean_tx_ring - Free Tx Buffers 4130 * ixgbe_clean_tx_ring - Free Tx Buffers
3676 * @adapter: board private structure
3677 * @tx_ring: ring to be cleaned 4131 * @tx_ring: ring to be cleaned
3678 **/ 4132 **/
3679static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter, 4133static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
3680 struct ixgbe_ring *tx_ring)
3681{ 4134{
3682 struct ixgbe_tx_buffer *tx_buffer_info; 4135 struct ixgbe_tx_buffer *tx_buffer_info;
3683 unsigned long size; 4136 unsigned long size;
3684 unsigned int i; 4137 u16 i;
3685 4138
3686 /* Free all the Tx ring sk_buffs */ 4139 /* ring already cleared, nothing to do */
4140 if (!tx_ring->tx_buffer_info)
4141 return;
3687 4142
4143 /* Free all the Tx ring sk_buffs */
3688 for (i = 0; i < tx_ring->count; i++) { 4144 for (i = 0; i < tx_ring->count; i++) {
3689 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 4145 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3690 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); 4146 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
3691 } 4147 }
3692 4148
3693 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; 4149 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
@@ -3698,11 +4154,6 @@ static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
3698 4154
3699 tx_ring->next_to_use = 0; 4155 tx_ring->next_to_use = 0;
3700 tx_ring->next_to_clean = 0; 4156 tx_ring->next_to_clean = 0;
3701
3702 if (tx_ring->head)
3703 writel(0, adapter->hw.hw_addr + tx_ring->head);
3704 if (tx_ring->tail)
3705 writel(0, adapter->hw.hw_addr + tx_ring->tail);
3706} 4157}
3707 4158
3708/** 4159/**
@@ -3714,7 +4165,7 @@ static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
3714 int i; 4165 int i;
3715 4166
3716 for (i = 0; i < adapter->num_rx_queues; i++) 4167 for (i = 0; i < adapter->num_rx_queues; i++)
3717 ixgbe_clean_rx_ring(adapter, adapter->rx_ring[i]); 4168 ixgbe_clean_rx_ring(adapter->rx_ring[i]);
3718} 4169}
3719 4170
3720/** 4171/**
@@ -3726,7 +4177,7 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
3726 int i; 4177 int i;
3727 4178
3728 for (i = 0; i < adapter->num_tx_queues; i++) 4179 for (i = 0; i < adapter->num_tx_queues; i++)
3729 ixgbe_clean_tx_ring(adapter, adapter->tx_ring[i]); 4180 ixgbe_clean_tx_ring(adapter->tx_ring[i]);
3730} 4181}
3731 4182
3732void ixgbe_down(struct ixgbe_adapter *adapter) 4183void ixgbe_down(struct ixgbe_adapter *adapter)
@@ -3734,39 +4185,26 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
3734 struct net_device *netdev = adapter->netdev; 4185 struct net_device *netdev = adapter->netdev;
3735 struct ixgbe_hw *hw = &adapter->hw; 4186 struct ixgbe_hw *hw = &adapter->hw;
3736 u32 rxctrl; 4187 u32 rxctrl;
3737 u32 txdctl; 4188 int i;
3738 int i, j; 4189 int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3739 4190
3740 /* signal that we are down to the interrupt handler */ 4191 /* signal that we are down to the interrupt handler */
3741 set_bit(__IXGBE_DOWN, &adapter->state); 4192 set_bit(__IXGBE_DOWN, &adapter->state);
3742 4193
3743 /* disable receive for all VFs and wait one second */
3744 if (adapter->num_vfs) {
3745 /* ping all the active vfs to let them know we are going down */
3746 ixgbe_ping_all_vfs(adapter);
3747
3748 /* Disable all VFTE/VFRE TX/RX */
3749 ixgbe_disable_tx_rx(adapter);
3750
3751 /* Mark all the VFs as inactive */
3752 for (i = 0 ; i < adapter->num_vfs; i++)
3753 adapter->vfinfo[i].clear_to_send = 0;
3754 }
3755
3756 /* disable receives */ 4194 /* disable receives */
3757 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 4195 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3758 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); 4196 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
3759 4197
3760 IXGBE_WRITE_FLUSH(hw); 4198 /* disable all enabled rx queues */
3761 msleep(10); 4199 for (i = 0; i < adapter->num_rx_queues; i++)
4200 /* this call also flushes the previous write */
4201 ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
3762 4202
3763 netif_tx_stop_all_queues(netdev); 4203 usleep_range(10000, 20000);
3764 4204
3765 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); 4205 netif_tx_stop_all_queues(netdev);
3766 del_timer_sync(&adapter->sfp_timer);
3767 del_timer_sync(&adapter->watchdog_timer);
3768 cancel_work_sync(&adapter->watchdog_task);
3769 4206
4207 /* call carrier off first to avoid false dev_watchdog timeouts */
3770 netif_carrier_off(netdev); 4208 netif_carrier_off(netdev);
3771 netif_tx_disable(netdev); 4209 netif_tx_disable(netdev);
3772 4210
@@ -3774,35 +4212,62 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
3774 4212
3775 ixgbe_napi_disable_all(adapter); 4213 ixgbe_napi_disable_all(adapter);
3776 4214
3777 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || 4215 adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT |
3778 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) 4216 IXGBE_FLAG2_RESET_REQUESTED);
3779 cancel_work_sync(&adapter->fdir_reinit_task); 4217 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
3780 4218
3781 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) 4219 del_timer_sync(&adapter->service_timer);
3782 cancel_work_sync(&adapter->check_overtemp_task); 4220
4221 /* disable receive for all VFs and wait one second */
4222 if (adapter->num_vfs) {
4223 /* ping all the active vfs to let them know we are going down */
4224 ixgbe_ping_all_vfs(adapter);
4225
4226 /* Disable all VFTE/VFRE TX/RX */
4227 ixgbe_disable_tx_rx(adapter);
4228
4229 /* Mark all the VFs as inactive */
4230 for (i = 0 ; i < adapter->num_vfs; i++)
4231 adapter->vfinfo[i].clear_to_send = 0;
4232 }
4233
4234 /* Cleanup the affinity_hint CPU mask memory and callback */
4235 for (i = 0; i < num_q_vectors; i++) {
4236 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
4237 /* clear the affinity_mask in the IRQ descriptor */
4238 irq_set_affinity_hint(adapter->msix_entries[i]. vector, NULL);
4239 /* release the CPU mask memory */
4240 free_cpumask_var(q_vector->affinity_mask);
4241 }
3783 4242
3784 /* disable transmits in the hardware now that interrupts are off */ 4243 /* disable transmits in the hardware now that interrupts are off */
3785 for (i = 0; i < adapter->num_tx_queues; i++) { 4244 for (i = 0; i < adapter->num_tx_queues; i++) {
3786 j = adapter->tx_ring[i]->reg_idx; 4245 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
3787 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); 4246 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
3788 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
3789 (txdctl & ~IXGBE_TXDCTL_ENABLE));
3790 } 4247 }
3791 /* Disable the Tx DMA engine on 82599 */
3792 if (hw->mac.type == ixgbe_mac_82599EB)
3793 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
3794 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
3795 ~IXGBE_DMATXCTL_TE));
3796
3797 /* power down the optics */
3798 if (hw->phy.multispeed_fiber)
3799 hw->mac.ops.disable_tx_laser(hw);
3800 4248
3801 /* clear n-tuple filters that are cached */ 4249 /* Disable the Tx DMA engine on 82599 and X540 */
3802 ethtool_ntuple_flush(netdev); 4250 switch (hw->mac.type) {
4251 case ixgbe_mac_82599EB:
4252 case ixgbe_mac_X540:
4253 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
4254 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
4255 ~IXGBE_DMATXCTL_TE));
4256 break;
4257 default:
4258 break;
4259 }
3803 4260
3804 if (!pci_channel_offline(adapter->pdev)) 4261 if (!pci_channel_offline(adapter->pdev))
3805 ixgbe_reset(adapter); 4262 ixgbe_reset(adapter);
4263
4264 /* power down the optics for multispeed fiber and 82599 SFP+ fiber */
4265 if (hw->mac.ops.disable_tx_laser &&
4266 ((hw->phy.multispeed_fiber) ||
4267 ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
4268 (hw->mac.type == ixgbe_mac_82599EB))))
4269 hw->mac.ops.disable_tx_laser(hw);
4270
3806 ixgbe_clean_all_tx_rings(adapter); 4271 ixgbe_clean_all_tx_rings(adapter);
3807 ixgbe_clean_all_rx_rings(adapter); 4272 ixgbe_clean_all_rx_rings(adapter);
3808 4273
@@ -3822,15 +4287,13 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
3822static int ixgbe_poll(struct napi_struct *napi, int budget) 4287static int ixgbe_poll(struct napi_struct *napi, int budget)
3823{ 4288{
3824 struct ixgbe_q_vector *q_vector = 4289 struct ixgbe_q_vector *q_vector =
3825 container_of(napi, struct ixgbe_q_vector, napi); 4290 container_of(napi, struct ixgbe_q_vector, napi);
3826 struct ixgbe_adapter *adapter = q_vector->adapter; 4291 struct ixgbe_adapter *adapter = q_vector->adapter;
3827 int tx_clean_complete, work_done = 0; 4292 int tx_clean_complete, work_done = 0;
3828 4293
3829#ifdef CONFIG_IXGBE_DCA 4294#ifdef CONFIG_IXGBE_DCA
3830 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { 4295 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
3831 ixgbe_update_tx_dca(adapter, adapter->tx_ring[0]); 4296 ixgbe_update_dca(q_vector);
3832 ixgbe_update_rx_dca(adapter, adapter->rx_ring[0]);
3833 }
3834#endif 4297#endif
3835 4298
3836 tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]); 4299 tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]);
@@ -3859,44 +4322,9 @@ static void ixgbe_tx_timeout(struct net_device *netdev)
3859 struct ixgbe_adapter *adapter = netdev_priv(netdev); 4322 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3860 4323
3861 /* Do the reset outside of interrupt context */ 4324 /* Do the reset outside of interrupt context */
3862 schedule_work(&adapter->reset_task); 4325 ixgbe_tx_timeout_reset(adapter);
3863}
3864
3865static void ixgbe_reset_task(struct work_struct *work)
3866{
3867 struct ixgbe_adapter *adapter;
3868 adapter = container_of(work, struct ixgbe_adapter, reset_task);
3869
3870 /* If we're already down or resetting, just bail */
3871 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
3872 test_bit(__IXGBE_RESETTING, &adapter->state))
3873 return;
3874
3875 adapter->tx_timeout_count++;
3876
3877 ixgbe_dump(adapter);
3878 netdev_err(adapter->netdev, "Reset adapter\n");
3879 ixgbe_reinit_locked(adapter);
3880} 4326}
3881 4327
3882#ifdef CONFIG_IXGBE_DCB
3883static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
3884{
3885 bool ret = false;
3886 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB];
3887
3888 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
3889 return ret;
3890
3891 f->mask = 0x7 << 3;
3892 adapter->num_rx_queues = f->indices;
3893 adapter->num_tx_queues = f->indices;
3894 ret = true;
3895
3896 return ret;
3897}
3898#endif
3899
3900/** 4328/**
3901 * ixgbe_set_rss_queues: Allocate queues for RSS 4329 * ixgbe_set_rss_queues: Allocate queues for RSS
3902 * @adapter: board private structure to initialize 4330 * @adapter: board private structure to initialize
@@ -3932,7 +4360,7 @@ static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
3932 * Rx load across CPUs using RSS. 4360 * Rx load across CPUs using RSS.
3933 * 4361 *
3934 **/ 4362 **/
3935static bool inline ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter) 4363static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
3936{ 4364{
3937 bool ret = false; 4365 bool ret = false;
3938 struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR]; 4366 struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
@@ -3967,19 +4395,26 @@ static bool inline ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
3967 **/ 4395 **/
3968static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) 4396static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
3969{ 4397{
3970 bool ret = false;
3971 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; 4398 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
3972 4399
3973 f->indices = min((int)num_online_cpus(), f->indices); 4400 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
3974 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 4401 return false;
3975 adapter->num_rx_queues = 1; 4402
3976 adapter->num_tx_queues = 1; 4403 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
3977#ifdef CONFIG_IXGBE_DCB 4404#ifdef CONFIG_IXGBE_DCB
3978 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 4405 int tc;
3979 e_info(probe, "FCoE enabled with DCB\n"); 4406 struct net_device *dev = adapter->netdev;
3980 ixgbe_set_dcb_queues(adapter); 4407
3981 } 4408 tc = netdev_get_prio_tc_map(dev, adapter->fcoe.up);
4409 f->indices = dev->tc_to_txq[tc].count;
4410 f->mask = dev->tc_to_txq[tc].offset;
3982#endif 4411#endif
4412 } else {
4413 f->indices = min((int)num_online_cpus(), f->indices);
4414
4415 adapter->num_rx_queues = 1;
4416 adapter->num_tx_queues = 1;
4417
3983 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { 4418 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
3984 e_info(probe, "FCoE enabled with RSS\n"); 4419 e_info(probe, "FCoE enabled with RSS\n");
3985 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || 4420 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
@@ -3992,14 +4427,45 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
3992 f->mask = adapter->num_rx_queues; 4427 f->mask = adapter->num_rx_queues;
3993 adapter->num_rx_queues += f->indices; 4428 adapter->num_rx_queues += f->indices;
3994 adapter->num_tx_queues += f->indices; 4429 adapter->num_tx_queues += f->indices;
4430 }
3995 4431
3996 ret = true; 4432 return true;
4433}
4434#endif /* IXGBE_FCOE */
4435
4436#ifdef CONFIG_IXGBE_DCB
4437static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
4438{
4439 bool ret = false;
4440 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB];
4441 int i, q;
4442
4443 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
4444 return ret;
4445
4446 f->indices = 0;
4447 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
4448 q = min((int)num_online_cpus(), MAX_TRAFFIC_CLASS);
4449 f->indices += q;
3997 } 4450 }
3998 4451
4452 f->mask = 0x7 << 3;
4453 adapter->num_rx_queues = f->indices;
4454 adapter->num_tx_queues = f->indices;
4455 ret = true;
4456
4457#ifdef IXGBE_FCOE
4458 /* FCoE enabled queues require special configuration done through
4459 * configure_fcoe() and others. Here we map FCoE indices onto the
4460 * DCB queue pairs allowing FCoE to own configuration later.
4461 */
4462 ixgbe_set_fcoe_queues(adapter);
4463#endif
4464
3999 return ret; 4465 return ret;
4000} 4466}
4467#endif
4001 4468
4002#endif /* IXGBE_FCOE */
4003/** 4469/**
4004 * ixgbe_set_sriov_queues: Allocate queues for IOV use 4470 * ixgbe_set_sriov_queues: Allocate queues for IOV use
4005 * @adapter: board private structure to initialize 4471 * @adapter: board private structure to initialize
@@ -4014,7 +4480,7 @@ static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
4014} 4480}
4015 4481
4016/* 4482/*
4017 * ixgbe_set_num_queues: Allocate queues for device, feature dependant 4483 * ixgbe_set_num_queues: Allocate queues for device, feature dependent
4018 * @adapter: board private structure to initialize 4484 * @adapter: board private structure to initialize
4019 * 4485 *
4020 * This is the top level queue allocation routine. The order here is very 4486 * This is the top level queue allocation routine. The order here is very
@@ -4024,7 +4490,7 @@ static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
4024 * fallthrough conditions. 4490 * fallthrough conditions.
4025 * 4491 *
4026 **/ 4492 **/
4027static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) 4493static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
4028{ 4494{
4029 /* Start with base case */ 4495 /* Start with base case */
4030 adapter->num_rx_queues = 1; 4496 adapter->num_rx_queues = 1;
@@ -4033,18 +4499,18 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
4033 adapter->num_rx_queues_per_pool = 1; 4499 adapter->num_rx_queues_per_pool = 1;
4034 4500
4035 if (ixgbe_set_sriov_queues(adapter)) 4501 if (ixgbe_set_sriov_queues(adapter))
4036 return;
4037
4038#ifdef IXGBE_FCOE
4039 if (ixgbe_set_fcoe_queues(adapter))
4040 goto done; 4502 goto done;
4041 4503
4042#endif /* IXGBE_FCOE */
4043#ifdef CONFIG_IXGBE_DCB 4504#ifdef CONFIG_IXGBE_DCB
4044 if (ixgbe_set_dcb_queues(adapter)) 4505 if (ixgbe_set_dcb_queues(adapter))
4045 goto done; 4506 goto done;
4046 4507
4047#endif 4508#endif
4509#ifdef IXGBE_FCOE
4510 if (ixgbe_set_fcoe_queues(adapter))
4511 goto done;
4512
4513#endif /* IXGBE_FCOE */
4048 if (ixgbe_set_fdir_queues(adapter)) 4514 if (ixgbe_set_fdir_queues(adapter))
4049 goto done; 4515 goto done;
4050 4516
@@ -4056,12 +4522,14 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
4056 adapter->num_tx_queues = 1; 4522 adapter->num_tx_queues = 1;
4057 4523
4058done: 4524done:
4059 /* Notify the stack of the (possibly) reduced Tx Queue count. */ 4525 /* Notify the stack of the (possibly) reduced queue counts. */
4060 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); 4526 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
4527 return netif_set_real_num_rx_queues(adapter->netdev,
4528 adapter->num_rx_queues);
4061} 4529}
4062 4530
4063static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, 4531static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
4064 int vectors) 4532 int vectors)
4065{ 4533{
4066 int err, vector_threshold; 4534 int err, vector_threshold;
4067 4535
@@ -4080,7 +4548,7 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
4080 */ 4548 */
4081 while (vectors >= vector_threshold) { 4549 while (vectors >= vector_threshold) {
4082 err = pci_enable_msix(adapter->pdev, adapter->msix_entries, 4550 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
4083 vectors); 4551 vectors);
4084 if (!err) /* Success in acquiring all requested vectors. */ 4552 if (!err) /* Success in acquiring all requested vectors. */
4085 break; 4553 break;
4086 else if (err < 0) 4554 else if (err < 0)
@@ -4107,7 +4575,7 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
4107 * vectors we were allocated. 4575 * vectors we were allocated.
4108 */ 4576 */
4109 adapter->num_msix_vectors = min(vectors, 4577 adapter->num_msix_vectors = min(vectors,
4110 adapter->max_msix_q_vectors + NON_Q_VECTORS); 4578 adapter->max_msix_q_vectors + NON_Q_VECTORS);
4111 } 4579 }
4112} 4580}
4113 4581
@@ -4121,22 +4589,123 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
4121static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) 4589static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
4122{ 4590{
4123 int i; 4591 int i;
4124 bool ret = false;
4125 4592
4126 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { 4593 if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
4127 for (i = 0; i < adapter->num_rx_queues; i++) 4594 return false;
4128 adapter->rx_ring[i]->reg_idx = i;
4129 for (i = 0; i < adapter->num_tx_queues; i++)
4130 adapter->tx_ring[i]->reg_idx = i;
4131 ret = true;
4132 } else {
4133 ret = false;
4134 }
4135 4595
4136 return ret; 4596 for (i = 0; i < adapter->num_rx_queues; i++)
4597 adapter->rx_ring[i]->reg_idx = i;
4598 for (i = 0; i < adapter->num_tx_queues; i++)
4599 adapter->tx_ring[i]->reg_idx = i;
4600
4601 return true;
4137} 4602}
4138 4603
4139#ifdef CONFIG_IXGBE_DCB 4604#ifdef CONFIG_IXGBE_DCB
4605
4606/* ixgbe_get_first_reg_idx - Return first register index associated with ring */
4607static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
4608 unsigned int *tx, unsigned int *rx)
4609{
4610 struct net_device *dev = adapter->netdev;
4611 struct ixgbe_hw *hw = &adapter->hw;
4612 u8 num_tcs = netdev_get_num_tc(dev);
4613
4614 *tx = 0;
4615 *rx = 0;
4616
4617 switch (hw->mac.type) {
4618 case ixgbe_mac_82598EB:
4619 *tx = tc << 3;
4620 *rx = tc << 2;
4621 break;
4622 case ixgbe_mac_82599EB:
4623 case ixgbe_mac_X540:
4624 if (num_tcs == 8) {
4625 if (tc < 3) {
4626 *tx = tc << 5;
4627 *rx = tc << 4;
4628 } else if (tc < 5) {
4629 *tx = ((tc + 2) << 4);
4630 *rx = tc << 4;
4631 } else if (tc < num_tcs) {
4632 *tx = ((tc + 8) << 3);
4633 *rx = tc << 4;
4634 }
4635 } else if (num_tcs == 4) {
4636 *rx = tc << 5;
4637 switch (tc) {
4638 case 0:
4639 *tx = 0;
4640 break;
4641 case 1:
4642 *tx = 64;
4643 break;
4644 case 2:
4645 *tx = 96;
4646 break;
4647 case 3:
4648 *tx = 112;
4649 break;
4650 default:
4651 break;
4652 }
4653 }
4654 break;
4655 default:
4656 break;
4657 }
4658}
4659
4660#define IXGBE_MAX_Q_PER_TC (IXGBE_MAX_DCB_INDICES / MAX_TRAFFIC_CLASS)
4661
4662/* ixgbe_setup_tc - routine to configure net_device for multiple traffic
4663 * classes.
4664 *
4665 * @netdev: net device to configure
4666 * @tc: number of traffic classes to enable
4667 */
4668int ixgbe_setup_tc(struct net_device *dev, u8 tc)
4669{
4670 int i;
4671 unsigned int q, offset = 0;
4672
4673 if (!tc) {
4674 netdev_reset_tc(dev);
4675 } else {
4676 struct ixgbe_adapter *adapter = netdev_priv(dev);
4677
4678 /* Hardware supports up to 8 traffic classes */
4679 if (tc > MAX_TRAFFIC_CLASS || netdev_set_num_tc(dev, tc))
4680 return -EINVAL;
4681
4682 /* Partition Tx queues evenly amongst traffic classes */
4683 for (i = 0; i < tc; i++) {
4684 q = min((int)num_online_cpus(), IXGBE_MAX_Q_PER_TC);
4685 netdev_set_prio_tc_map(dev, i, i);
4686 netdev_set_tc_queue(dev, i, q, offset);
4687 offset += q;
4688 }
4689
4690 /* This enables multiple traffic class support in the hardware
4691 * which defaults to strict priority transmission by default.
4692 * If traffic classes are already enabled perhaps through DCB
4693 * code path then existing configuration will be used.
4694 */
4695 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
4696 dev->dcbnl_ops && dev->dcbnl_ops->setdcbx) {
4697 struct ieee_ets ets = {
4698 .prio_tc = {0, 1, 2, 3, 4, 5, 6, 7},
4699 };
4700 u8 mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
4701
4702 dev->dcbnl_ops->setdcbx(dev, mode);
4703 dev->dcbnl_ops->ieee_setets(dev, &ets);
4704 }
4705 }
4706 return 0;
4707}
4708
4140/** 4709/**
4141 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB 4710 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
4142 * @adapter: board private structure to initialize 4711 * @adapter: board private structure to initialize
@@ -4146,76 +4715,27 @@ static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
4146 **/ 4715 **/
4147static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) 4716static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
4148{ 4717{
4149 int i; 4718 struct net_device *dev = adapter->netdev;
4150 bool ret = false; 4719 int i, j, k;
4151 int dcb_i = adapter->ring_feature[RING_F_DCB].indices; 4720 u8 num_tcs = netdev_get_num_tc(dev);
4152 4721
4153 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 4722 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
4154 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 4723 return false;
4155 /* the number of queues is assumed to be symmetric */
4156 for (i = 0; i < dcb_i; i++) {
4157 adapter->rx_ring[i]->reg_idx = i << 3;
4158 adapter->tx_ring[i]->reg_idx = i << 2;
4159 }
4160 ret = true;
4161 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
4162 if (dcb_i == 8) {
4163 /*
4164 * Tx TC0 starts at: descriptor queue 0
4165 * Tx TC1 starts at: descriptor queue 32
4166 * Tx TC2 starts at: descriptor queue 64
4167 * Tx TC3 starts at: descriptor queue 80
4168 * Tx TC4 starts at: descriptor queue 96
4169 * Tx TC5 starts at: descriptor queue 104
4170 * Tx TC6 starts at: descriptor queue 112
4171 * Tx TC7 starts at: descriptor queue 120
4172 *
4173 * Rx TC0-TC7 are offset by 16 queues each
4174 */
4175 for (i = 0; i < 3; i++) {
4176 adapter->tx_ring[i]->reg_idx = i << 5;
4177 adapter->rx_ring[i]->reg_idx = i << 4;
4178 }
4179 for ( ; i < 5; i++) {
4180 adapter->tx_ring[i]->reg_idx =
4181 ((i + 2) << 4);
4182 adapter->rx_ring[i]->reg_idx = i << 4;
4183 }
4184 for ( ; i < dcb_i; i++) {
4185 adapter->tx_ring[i]->reg_idx =
4186 ((i + 8) << 3);
4187 adapter->rx_ring[i]->reg_idx = i << 4;
4188 }
4189 4724
4190 ret = true; 4725 for (i = 0, k = 0; i < num_tcs; i++) {
4191 } else if (dcb_i == 4) { 4726 unsigned int tx_s, rx_s;
4192 /* 4727 u16 count = dev->tc_to_txq[i].count;
4193 * Tx TC0 starts at: descriptor queue 0 4728
4194 * Tx TC1 starts at: descriptor queue 64 4729 ixgbe_get_first_reg_idx(adapter, i, &tx_s, &rx_s);
4195 * Tx TC2 starts at: descriptor queue 96 4730 for (j = 0; j < count; j++, k++) {
4196 * Tx TC3 starts at: descriptor queue 112 4731 adapter->tx_ring[k]->reg_idx = tx_s + j;
4197 * 4732 adapter->rx_ring[k]->reg_idx = rx_s + j;
4198 * Rx TC0-TC3 are offset by 32 queues each 4733 adapter->tx_ring[k]->dcb_tc = i;
4199 */ 4734 adapter->rx_ring[k]->dcb_tc = i;
4200 adapter->tx_ring[0]->reg_idx = 0;
4201 adapter->tx_ring[1]->reg_idx = 64;
4202 adapter->tx_ring[2]->reg_idx = 96;
4203 adapter->tx_ring[3]->reg_idx = 112;
4204 for (i = 0 ; i < dcb_i; i++)
4205 adapter->rx_ring[i]->reg_idx = i << 5;
4206
4207 ret = true;
4208 } else {
4209 ret = false;
4210 }
4211 } else {
4212 ret = false;
4213 } 4735 }
4214 } else {
4215 ret = false;
4216 } 4736 }
4217 4737
4218 return ret; 4738 return true;
4219} 4739}
4220#endif 4740#endif
4221 4741
@@ -4226,7 +4746,7 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
4226 * Cache the descriptor ring offsets for Flow Director to the assigned rings. 4746 * Cache the descriptor ring offsets for Flow Director to the assigned rings.
4227 * 4747 *
4228 **/ 4748 **/
4229static bool inline ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter) 4749static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
4230{ 4750{
4231 int i; 4751 int i;
4232 bool ret = false; 4752 bool ret = false;
@@ -4254,55 +4774,28 @@ static bool inline ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
4254 */ 4774 */
4255static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) 4775static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
4256{ 4776{
4257 int i, fcoe_rx_i = 0, fcoe_tx_i = 0;
4258 bool ret = false;
4259 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; 4777 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
4778 int i;
4779 u8 fcoe_rx_i = 0, fcoe_tx_i = 0;
4260 4780
4261 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 4781 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
4262#ifdef CONFIG_IXGBE_DCB 4782 return false;
4263 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
4264 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
4265 4783
4266 ixgbe_cache_ring_dcb(adapter); 4784 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
4267 /* find out queues in TC for FCoE */ 4785 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
4268 fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1; 4786 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
4269 fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1; 4787 ixgbe_cache_ring_fdir(adapter);
4270 /* 4788 else
4271 * In 82599, the number of Tx queues for each traffic 4789 ixgbe_cache_ring_rss(adapter);
4272 * class for both 8-TC and 4-TC modes are:
4273 * TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7
4274 * 8 TCs: 32 32 16 16 8 8 8 8
4275 * 4 TCs: 64 64 32 32
4276 * We have max 8 queues for FCoE, where 8 the is
4277 * FCoE redirection table size. If TC for FCoE is
4278 * less than or equal to TC3, we have enough queues
4279 * to add max of 8 queues for FCoE, so we start FCoE
4280 * tx descriptor from the next one, i.e., reg_idx + 1.
4281 * If TC for FCoE is above TC3, implying 8 TC mode,
4282 * and we need 8 for FCoE, we have to take all queues
4283 * in that traffic class for FCoE.
4284 */
4285 if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3))
4286 fcoe_tx_i--;
4287 }
4288#endif /* CONFIG_IXGBE_DCB */
4289 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
4290 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
4291 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
4292 ixgbe_cache_ring_fdir(adapter);
4293 else
4294 ixgbe_cache_ring_rss(adapter);
4295 4790
4296 fcoe_rx_i = f->mask; 4791 fcoe_rx_i = f->mask;
4297 fcoe_tx_i = f->mask; 4792 fcoe_tx_i = f->mask;
4298 }
4299 for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
4300 adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
4301 adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
4302 }
4303 ret = true;
4304 } 4793 }
4305 return ret; 4794 for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
4795 adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
4796 adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
4797 }
4798 return true;
4306} 4799}
4307 4800
4308#endif /* IXGBE_FCOE */ 4801#endif /* IXGBE_FCOE */
@@ -4344,16 +4837,16 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
4344 if (ixgbe_cache_ring_sriov(adapter)) 4837 if (ixgbe_cache_ring_sriov(adapter))
4345 return; 4838 return;
4346 4839
4840#ifdef CONFIG_IXGBE_DCB
4841 if (ixgbe_cache_ring_dcb(adapter))
4842 return;
4843#endif
4844
4347#ifdef IXGBE_FCOE 4845#ifdef IXGBE_FCOE
4348 if (ixgbe_cache_ring_fcoe(adapter)) 4846 if (ixgbe_cache_ring_fcoe(adapter))
4349 return; 4847 return;
4350
4351#endif /* IXGBE_FCOE */ 4848#endif /* IXGBE_FCOE */
4352#ifdef CONFIG_IXGBE_DCB
4353 if (ixgbe_cache_ring_dcb(adapter))
4354 return;
4355 4849
4356#endif
4357 if (ixgbe_cache_ring_fdir(adapter)) 4850 if (ixgbe_cache_ring_fdir(adapter))
4358 return; 4851 return;
4359 4852
@@ -4371,65 +4864,55 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
4371 **/ 4864 **/
4372static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter) 4865static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
4373{ 4866{
4374 int i; 4867 int rx = 0, tx = 0, nid = adapter->node;
4375 int orig_node = adapter->node;
4376 4868
4377 for (i = 0; i < adapter->num_tx_queues; i++) { 4869 if (nid < 0 || !node_online(nid))
4378 struct ixgbe_ring *ring = adapter->tx_ring[i]; 4870 nid = first_online_node;
4379 if (orig_node == -1) { 4871
4380 int cur_node = next_online_node(adapter->node); 4872 for (; tx < adapter->num_tx_queues; tx++) {
4381 if (cur_node == MAX_NUMNODES) 4873 struct ixgbe_ring *ring;
4382 cur_node = first_online_node; 4874
4383 adapter->node = cur_node; 4875 ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid);
4384 }
4385 ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
4386 adapter->node);
4387 if (!ring) 4876 if (!ring)
4388 ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL); 4877 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
4389 if (!ring) 4878 if (!ring)
4390 goto err_tx_ring_allocation; 4879 goto err_allocation;
4391 ring->count = adapter->tx_ring_count; 4880 ring->count = adapter->tx_ring_count;
4392 ring->queue_index = i; 4881 ring->queue_index = tx;
4393 ring->numa_node = adapter->node; 4882 ring->numa_node = nid;
4883 ring->dev = &adapter->pdev->dev;
4884 ring->netdev = adapter->netdev;
4394 4885
4395 adapter->tx_ring[i] = ring; 4886 adapter->tx_ring[tx] = ring;
4396 } 4887 }
4397 4888
4398 /* Restore the adapter's original node */ 4889 for (; rx < adapter->num_rx_queues; rx++) {
4399 adapter->node = orig_node; 4890 struct ixgbe_ring *ring;
4400 4891
4401 for (i = 0; i < adapter->num_rx_queues; i++) { 4892 ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid);
4402 struct ixgbe_ring *ring = adapter->rx_ring[i];
4403 if (orig_node == -1) {
4404 int cur_node = next_online_node(adapter->node);
4405 if (cur_node == MAX_NUMNODES)
4406 cur_node = first_online_node;
4407 adapter->node = cur_node;
4408 }
4409 ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
4410 adapter->node);
4411 if (!ring) 4893 if (!ring)
4412 ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL); 4894 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
4413 if (!ring) 4895 if (!ring)
4414 goto err_rx_ring_allocation; 4896 goto err_allocation;
4415 ring->count = adapter->rx_ring_count; 4897 ring->count = adapter->rx_ring_count;
4416 ring->queue_index = i; 4898 ring->queue_index = rx;
4417 ring->numa_node = adapter->node; 4899 ring->numa_node = nid;
4900 ring->dev = &adapter->pdev->dev;
4901 ring->netdev = adapter->netdev;
4418 4902
4419 adapter->rx_ring[i] = ring; 4903 adapter->rx_ring[rx] = ring;
4420 } 4904 }
4421 4905
4422 /* Restore the adapter's original node */
4423 adapter->node = orig_node;
4424
4425 ixgbe_cache_ring_register(adapter); 4906 ixgbe_cache_ring_register(adapter);
4426 4907
4427 return 0; 4908 return 0;
4428 4909
4429err_rx_ring_allocation: 4910err_allocation:
4430 for (i = 0; i < adapter->num_tx_queues; i++) 4911 while (tx)
4431 kfree(adapter->tx_ring[i]); 4912 kfree(adapter->tx_ring[--tx]);
4432err_tx_ring_allocation: 4913
4914 while (rx)
4915 kfree(adapter->rx_ring[--rx]);
4433 return -ENOMEM; 4916 return -ENOMEM;
4434} 4917}
4435 4918
@@ -4453,7 +4936,7 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
4453 * (roughly) the same number of vectors as there are CPU's. 4936 * (roughly) the same number of vectors as there are CPU's.
4454 */ 4937 */
4455 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues, 4938 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
4456 (int)num_online_cpus()) + NON_Q_VECTORS; 4939 (int)num_online_cpus()) + NON_Q_VECTORS;
4457 4940
4458 /* 4941 /*
4459 * At the same time, hardware can only support a maximum of 4942 * At the same time, hardware can only support a maximum of
@@ -4467,7 +4950,7 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
4467 /* A failure in MSI-X entry allocation isn't fatal, but it does 4950 /* A failure in MSI-X entry allocation isn't fatal, but it does
4468 * mean we disable MSI-X capabilities of the adapter. */ 4951 * mean we disable MSI-X capabilities of the adapter. */
4469 adapter->msix_entries = kcalloc(v_budget, 4952 adapter->msix_entries = kcalloc(v_budget,
4470 sizeof(struct msix_entry), GFP_KERNEL); 4953 sizeof(struct msix_entry), GFP_KERNEL);
4471 if (adapter->msix_entries) { 4954 if (adapter->msix_entries) {
4472 for (vector = 0; vector < v_budget; vector++) 4955 for (vector = 0; vector < v_budget; vector++)
4473 adapter->msix_entries[vector].entry = vector; 4956 adapter->msix_entries[vector].entry = vector;
@@ -4480,13 +4963,21 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
4480 4963
4481 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; 4964 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
4482 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; 4965 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
4966 if (adapter->flags & (IXGBE_FLAG_FDIR_HASH_CAPABLE |
4967 IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
4968 e_err(probe,
4969 "Flow Director is not supported while multiple "
4970 "queues are disabled. Disabling Flow Director\n");
4971 }
4483 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 4972 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
4484 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; 4973 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
4485 adapter->atr_sample_rate = 0; 4974 adapter->atr_sample_rate = 0;
4486 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) 4975 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
4487 ixgbe_disable_sriov(adapter); 4976 ixgbe_disable_sriov(adapter);
4488 4977
4489 ixgbe_set_num_queues(adapter); 4978 err = ixgbe_set_num_queues(adapter);
4979 if (err)
4980 return err;
4490 4981
4491 err = pci_enable_msi(adapter->pdev); 4982 err = pci_enable_msi(adapter->pdev);
4492 if (!err) { 4983 if (!err) {
@@ -4514,25 +5005,22 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
4514{ 5005{
4515 int q_idx, num_q_vectors; 5006 int q_idx, num_q_vectors;
4516 struct ixgbe_q_vector *q_vector; 5007 struct ixgbe_q_vector *q_vector;
4517 int napi_vectors;
4518 int (*poll)(struct napi_struct *, int); 5008 int (*poll)(struct napi_struct *, int);
4519 5009
4520 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 5010 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
4521 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 5011 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
4522 napi_vectors = adapter->num_rx_queues;
4523 poll = &ixgbe_clean_rxtx_many; 5012 poll = &ixgbe_clean_rxtx_many;
4524 } else { 5013 } else {
4525 num_q_vectors = 1; 5014 num_q_vectors = 1;
4526 napi_vectors = 1;
4527 poll = &ixgbe_poll; 5015 poll = &ixgbe_poll;
4528 } 5016 }
4529 5017
4530 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 5018 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
4531 q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector), 5019 q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector),
4532 GFP_KERNEL, adapter->node); 5020 GFP_KERNEL, adapter->node);
4533 if (!q_vector) 5021 if (!q_vector)
4534 q_vector = kzalloc(sizeof(struct ixgbe_q_vector), 5022 q_vector = kzalloc(sizeof(struct ixgbe_q_vector),
4535 GFP_KERNEL); 5023 GFP_KERNEL);
4536 if (!q_vector) 5024 if (!q_vector)
4537 goto err_out; 5025 goto err_out;
4538 q_vector->adapter = adapter; 5026 q_vector->adapter = adapter;
@@ -4611,7 +5099,9 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
4611 int err; 5099 int err;
4612 5100
4613 /* Number of supported queues */ 5101 /* Number of supported queues */
4614 ixgbe_set_num_queues(adapter); 5102 err = ixgbe_set_num_queues(adapter);
5103 if (err)
5104 return err;
4615 5105
4616 err = ixgbe_set_interrupt_capability(adapter); 5106 err = ixgbe_set_interrupt_capability(adapter);
4617 if (err) { 5107 if (err) {
@@ -4663,66 +5153,23 @@ void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
4663 adapter->tx_ring[i] = NULL; 5153 adapter->tx_ring[i] = NULL;
4664 } 5154 }
4665 for (i = 0; i < adapter->num_rx_queues; i++) { 5155 for (i = 0; i < adapter->num_rx_queues; i++) {
4666 kfree(adapter->rx_ring[i]); 5156 struct ixgbe_ring *ring = adapter->rx_ring[i];
5157
5158 /* ixgbe_get_stats64() might access this ring, we must wait
5159 * a grace period before freeing it.
5160 */
5161 kfree_rcu(ring, rcu);
4667 adapter->rx_ring[i] = NULL; 5162 adapter->rx_ring[i] = NULL;
4668 } 5163 }
4669 5164
5165 adapter->num_tx_queues = 0;
5166 adapter->num_rx_queues = 0;
5167
4670 ixgbe_free_q_vectors(adapter); 5168 ixgbe_free_q_vectors(adapter);
4671 ixgbe_reset_interrupt_capability(adapter); 5169 ixgbe_reset_interrupt_capability(adapter);
4672} 5170}
4673 5171
4674/** 5172/**
4675 * ixgbe_sfp_timer - worker thread to find a missing module
4676 * @data: pointer to our adapter struct
4677 **/
4678static void ixgbe_sfp_timer(unsigned long data)
4679{
4680 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
4681
4682 /*
4683 * Do the sfp_timer outside of interrupt context due to the
4684 * delays that sfp+ detection requires
4685 */
4686 schedule_work(&adapter->sfp_task);
4687}
4688
4689/**
4690 * ixgbe_sfp_task - worker thread to find a missing module
4691 * @work: pointer to work_struct containing our data
4692 **/
4693static void ixgbe_sfp_task(struct work_struct *work)
4694{
4695 struct ixgbe_adapter *adapter = container_of(work,
4696 struct ixgbe_adapter,
4697 sfp_task);
4698 struct ixgbe_hw *hw = &adapter->hw;
4699
4700 if ((hw->phy.type == ixgbe_phy_nl) &&
4701 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
4702 s32 ret = hw->phy.ops.identify_sfp(hw);
4703 if (ret == IXGBE_ERR_SFP_NOT_PRESENT)
4704 goto reschedule;
4705 ret = hw->phy.ops.reset(hw);
4706 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4707 e_dev_err("failed to initialize because an unsupported "
4708 "SFP+ module type was detected.\n");
4709 e_dev_err("Reload the driver after installing a "
4710 "supported module.\n");
4711 unregister_netdev(adapter->netdev);
4712 } else {
4713 e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type);
4714 }
4715 /* don't need this routine any more */
4716 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
4717 }
4718 return;
4719reschedule:
4720 if (test_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state))
4721 mod_timer(&adapter->sfp_timer,
4722 round_jiffies(jiffies + (2 * HZ)));
4723}
4724
4725/**
4726 * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter) 5173 * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
4727 * @adapter: board private structure to initialize 5174 * @adapter: board private structure to initialize
4728 * 5175 *
@@ -4740,6 +5187,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4740 int j; 5187 int j;
4741 struct tc_configuration *tc; 5188 struct tc_configuration *tc;
4742#endif 5189#endif
5190 int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
4743 5191
4744 /* PCI config space info */ 5192 /* PCI config space info */
4745 5193
@@ -4754,28 +5202,26 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4754 adapter->ring_feature[RING_F_RSS].indices = rss; 5202 adapter->ring_feature[RING_F_RSS].indices = rss;
4755 adapter->flags |= IXGBE_FLAG_RSS_ENABLED; 5203 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
4756 adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES; 5204 adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
4757 if (hw->mac.type == ixgbe_mac_82598EB) { 5205 switch (hw->mac.type) {
5206 case ixgbe_mac_82598EB:
4758 if (hw->device_id == IXGBE_DEV_ID_82598AT) 5207 if (hw->device_id == IXGBE_DEV_ID_82598AT)
4759 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; 5208 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
4760 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; 5209 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
4761 } else if (hw->mac.type == ixgbe_mac_82599EB) { 5210 break;
5211 case ixgbe_mac_82599EB:
5212 case ixgbe_mac_X540:
4762 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; 5213 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
4763 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; 5214 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
4764 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; 5215 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
4765 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) 5216 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
4766 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; 5217 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
4767 if (dev->features & NETIF_F_NTUPLE) { 5218 /* n-tuple support exists, always init our spinlock */
4768 /* Flow Director perfect filter enabled */ 5219 spin_lock_init(&adapter->fdir_perfect_lock);
4769 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; 5220 /* Flow Director hash filters enabled */
4770 adapter->atr_sample_rate = 0; 5221 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
4771 spin_lock_init(&adapter->fdir_perfect_lock); 5222 adapter->atr_sample_rate = 20;
4772 } else {
4773 /* Flow Director hash filters enabled */
4774 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
4775 adapter->atr_sample_rate = 20;
4776 }
4777 adapter->ring_feature[RING_F_FDIR].indices = 5223 adapter->ring_feature[RING_F_FDIR].indices =
4778 IXGBE_MAX_FDIR_INDICES; 5224 IXGBE_MAX_FDIR_INDICES;
4779 adapter->fdir_pballoc = 0; 5225 adapter->fdir_pballoc = 0;
4780#ifdef IXGBE_FCOE 5226#ifdef IXGBE_FCOE
4781 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; 5227 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
@@ -4787,6 +5233,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4787 adapter->fcoe.up = IXGBE_FCOE_DEFTC; 5233 adapter->fcoe.up = IXGBE_FCOE_DEFTC;
4788#endif 5234#endif
4789#endif /* IXGBE_FCOE */ 5235#endif /* IXGBE_FCOE */
5236 break;
5237 default:
5238 break;
4790 } 5239 }
4791 5240
4792#ifdef CONFIG_IXGBE_DCB 5241#ifdef CONFIG_IXGBE_DCB
@@ -4803,10 +5252,10 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4803 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100; 5252 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
4804 adapter->dcb_cfg.rx_pba_cfg = pba_equal; 5253 adapter->dcb_cfg.rx_pba_cfg = pba_equal;
4805 adapter->dcb_cfg.pfc_mode_enable = false; 5254 adapter->dcb_cfg.pfc_mode_enable = false;
4806 adapter->dcb_cfg.round_robin_enable = false;
4807 adapter->dcb_set_bitmap = 0x00; 5255 adapter->dcb_set_bitmap = 0x00;
5256 adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
4808 ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg, 5257 ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
4809 adapter->ring_feature[RING_F_DCB].indices); 5258 MAX_TRAFFIC_CLASS);
4810 5259
4811#endif 5260#endif
4812 5261
@@ -4816,8 +5265,8 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4816#ifdef CONFIG_DCB 5265#ifdef CONFIG_DCB
4817 adapter->last_lfc_mode = hw->fc.current_mode; 5266 adapter->last_lfc_mode = hw->fc.current_mode;
4818#endif 5267#endif
4819 hw->fc.high_water = IXGBE_DEFAULT_FCRTH; 5268 hw->fc.high_water = FC_HIGH_WATER(max_frame);
4820 hw->fc.low_water = IXGBE_DEFAULT_FCRTL; 5269 hw->fc.low_water = FC_LOW_WATER(max_frame);
4821 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; 5270 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
4822 hw->fc.send_xon = true; 5271 hw->fc.send_xon = true;
4823 hw->fc.disable_fc_autoneg = false; 5272 hw->fc.disable_fc_autoneg = false;
@@ -4855,30 +5304,27 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4855 5304
4856/** 5305/**
4857 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors) 5306 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
4858 * @adapter: board private structure
4859 * @tx_ring: tx descriptor ring (for a specific queue) to setup 5307 * @tx_ring: tx descriptor ring (for a specific queue) to setup
4860 * 5308 *
4861 * Return 0 on success, negative on failure 5309 * Return 0 on success, negative on failure
4862 **/ 5310 **/
4863int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, 5311int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
4864 struct ixgbe_ring *tx_ring)
4865{ 5312{
4866 struct pci_dev *pdev = adapter->pdev; 5313 struct device *dev = tx_ring->dev;
4867 int size; 5314 int size;
4868 5315
4869 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; 5316 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
4870 tx_ring->tx_buffer_info = vmalloc_node(size, tx_ring->numa_node); 5317 tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node);
4871 if (!tx_ring->tx_buffer_info) 5318 if (!tx_ring->tx_buffer_info)
4872 tx_ring->tx_buffer_info = vmalloc(size); 5319 tx_ring->tx_buffer_info = vzalloc(size);
4873 if (!tx_ring->tx_buffer_info) 5320 if (!tx_ring->tx_buffer_info)
4874 goto err; 5321 goto err;
4875 memset(tx_ring->tx_buffer_info, 0, size);
4876 5322
4877 /* round up to nearest 4K */ 5323 /* round up to nearest 4K */
4878 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 5324 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
4879 tx_ring->size = ALIGN(tx_ring->size, 4096); 5325 tx_ring->size = ALIGN(tx_ring->size, 4096);
4880 5326
4881 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, 5327 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
4882 &tx_ring->dma, GFP_KERNEL); 5328 &tx_ring->dma, GFP_KERNEL);
4883 if (!tx_ring->desc) 5329 if (!tx_ring->desc)
4884 goto err; 5330 goto err;
@@ -4891,7 +5337,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
4891err: 5337err:
4892 vfree(tx_ring->tx_buffer_info); 5338 vfree(tx_ring->tx_buffer_info);
4893 tx_ring->tx_buffer_info = NULL; 5339 tx_ring->tx_buffer_info = NULL;
4894 e_err(probe, "Unable to allocate memory for the Tx descriptor ring\n"); 5340 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
4895 return -ENOMEM; 5341 return -ENOMEM;
4896} 5342}
4897 5343
@@ -4910,7 +5356,7 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
4910 int i, err = 0; 5356 int i, err = 0;
4911 5357
4912 for (i = 0; i < adapter->num_tx_queues; i++) { 5358 for (i = 0; i < adapter->num_tx_queues; i++) {
4913 err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]); 5359 err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
4914 if (!err) 5360 if (!err)
4915 continue; 5361 continue;
4916 e_err(probe, "Allocation for Tx Queue %u failed\n", i); 5362 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
@@ -4922,48 +5368,40 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
4922 5368
4923/** 5369/**
4924 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors) 5370 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
4925 * @adapter: board private structure
4926 * @rx_ring: rx descriptor ring (for a specific queue) to setup 5371 * @rx_ring: rx descriptor ring (for a specific queue) to setup
4927 * 5372 *
4928 * Returns 0 on success, negative on failure 5373 * Returns 0 on success, negative on failure
4929 **/ 5374 **/
4930int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, 5375int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
4931 struct ixgbe_ring *rx_ring)
4932{ 5376{
4933 struct pci_dev *pdev = adapter->pdev; 5377 struct device *dev = rx_ring->dev;
4934 int size; 5378 int size;
4935 5379
4936 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; 5380 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
4937 rx_ring->rx_buffer_info = vmalloc_node(size, adapter->node); 5381 rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node);
4938 if (!rx_ring->rx_buffer_info) 5382 if (!rx_ring->rx_buffer_info)
4939 rx_ring->rx_buffer_info = vmalloc(size); 5383 rx_ring->rx_buffer_info = vzalloc(size);
4940 if (!rx_ring->rx_buffer_info) { 5384 if (!rx_ring->rx_buffer_info)
4941 e_err(probe, "vmalloc allocation failed for the Rx " 5385 goto err;
4942 "descriptor ring\n");
4943 goto alloc_failed;
4944 }
4945 memset(rx_ring->rx_buffer_info, 0, size);
4946 5386
4947 /* Round up to nearest 4K */ 5387 /* Round up to nearest 4K */
4948 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 5388 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
4949 rx_ring->size = ALIGN(rx_ring->size, 4096); 5389 rx_ring->size = ALIGN(rx_ring->size, 4096);
4950 5390
4951 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, 5391 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
4952 &rx_ring->dma, GFP_KERNEL); 5392 &rx_ring->dma, GFP_KERNEL);
4953 5393
4954 if (!rx_ring->desc) { 5394 if (!rx_ring->desc)
4955 e_err(probe, "Memory allocation failed for the Rx " 5395 goto err;
4956 "descriptor ring\n");
4957 vfree(rx_ring->rx_buffer_info);
4958 goto alloc_failed;
4959 }
4960 5396
4961 rx_ring->next_to_clean = 0; 5397 rx_ring->next_to_clean = 0;
4962 rx_ring->next_to_use = 0; 5398 rx_ring->next_to_use = 0;
4963 5399
4964 return 0; 5400 return 0;
4965 5401err:
4966alloc_failed: 5402 vfree(rx_ring->rx_buffer_info);
5403 rx_ring->rx_buffer_info = NULL;
5404 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
4967 return -ENOMEM; 5405 return -ENOMEM;
4968} 5406}
4969 5407
@@ -4977,13 +5415,12 @@ alloc_failed:
4977 * 5415 *
4978 * Return 0 on success, negative on failure 5416 * Return 0 on success, negative on failure
4979 **/ 5417 **/
4980
4981static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) 5418static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
4982{ 5419{
4983 int i, err = 0; 5420 int i, err = 0;
4984 5421
4985 for (i = 0; i < adapter->num_rx_queues; i++) { 5422 for (i = 0; i < adapter->num_rx_queues; i++) {
4986 err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]); 5423 err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
4987 if (!err) 5424 if (!err)
4988 continue; 5425 continue;
4989 e_err(probe, "Allocation for Rx Queue %u failed\n", i); 5426 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
@@ -4995,23 +5432,23 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
4995 5432
4996/** 5433/**
4997 * ixgbe_free_tx_resources - Free Tx Resources per Queue 5434 * ixgbe_free_tx_resources - Free Tx Resources per Queue
4998 * @adapter: board private structure
4999 * @tx_ring: Tx descriptor ring for a specific queue 5435 * @tx_ring: Tx descriptor ring for a specific queue
5000 * 5436 *
5001 * Free all transmit software resources 5437 * Free all transmit software resources
5002 **/ 5438 **/
5003void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter, 5439void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
5004 struct ixgbe_ring *tx_ring)
5005{ 5440{
5006 struct pci_dev *pdev = adapter->pdev; 5441 ixgbe_clean_tx_ring(tx_ring);
5007
5008 ixgbe_clean_tx_ring(adapter, tx_ring);
5009 5442
5010 vfree(tx_ring->tx_buffer_info); 5443 vfree(tx_ring->tx_buffer_info);
5011 tx_ring->tx_buffer_info = NULL; 5444 tx_ring->tx_buffer_info = NULL;
5012 5445
5013 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, 5446 /* if not set, then don't free */
5014 tx_ring->dma); 5447 if (!tx_ring->desc)
5448 return;
5449
5450 dma_free_coherent(tx_ring->dev, tx_ring->size,
5451 tx_ring->desc, tx_ring->dma);
5015 5452
5016 tx_ring->desc = NULL; 5453 tx_ring->desc = NULL;
5017} 5454}
@@ -5028,28 +5465,28 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
5028 5465
5029 for (i = 0; i < adapter->num_tx_queues; i++) 5466 for (i = 0; i < adapter->num_tx_queues; i++)
5030 if (adapter->tx_ring[i]->desc) 5467 if (adapter->tx_ring[i]->desc)
5031 ixgbe_free_tx_resources(adapter, adapter->tx_ring[i]); 5468 ixgbe_free_tx_resources(adapter->tx_ring[i]);
5032} 5469}
5033 5470
5034/** 5471/**
5035 * ixgbe_free_rx_resources - Free Rx Resources 5472 * ixgbe_free_rx_resources - Free Rx Resources
5036 * @adapter: board private structure
5037 * @rx_ring: ring to clean the resources from 5473 * @rx_ring: ring to clean the resources from
5038 * 5474 *
5039 * Free all receive software resources 5475 * Free all receive software resources
5040 **/ 5476 **/
5041void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter, 5477void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
5042 struct ixgbe_ring *rx_ring)
5043{ 5478{
5044 struct pci_dev *pdev = adapter->pdev; 5479 ixgbe_clean_rx_ring(rx_ring);
5045
5046 ixgbe_clean_rx_ring(adapter, rx_ring);
5047 5480
5048 vfree(rx_ring->rx_buffer_info); 5481 vfree(rx_ring->rx_buffer_info);
5049 rx_ring->rx_buffer_info = NULL; 5482 rx_ring->rx_buffer_info = NULL;
5050 5483
5051 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, 5484 /* if not set, then don't free */
5052 rx_ring->dma); 5485 if (!rx_ring->desc)
5486 return;
5487
5488 dma_free_coherent(rx_ring->dev, rx_ring->size,
5489 rx_ring->desc, rx_ring->dma);
5053 5490
5054 rx_ring->desc = NULL; 5491 rx_ring->desc = NULL;
5055} 5492}
@@ -5066,7 +5503,7 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
5066 5503
5067 for (i = 0; i < adapter->num_rx_queues; i++) 5504 for (i = 0; i < adapter->num_rx_queues; i++)
5068 if (adapter->rx_ring[i]->desc) 5505 if (adapter->rx_ring[i]->desc)
5069 ixgbe_free_rx_resources(adapter, adapter->rx_ring[i]); 5506 ixgbe_free_rx_resources(adapter->rx_ring[i]);
5070} 5507}
5071 5508
5072/** 5509/**
@@ -5079,16 +5516,26 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
5079static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) 5516static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
5080{ 5517{
5081 struct ixgbe_adapter *adapter = netdev_priv(netdev); 5518 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5519 struct ixgbe_hw *hw = &adapter->hw;
5082 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 5520 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
5083 5521
5084 /* MTU < 68 is an error and causes problems on some kernels */ 5522 /* MTU < 68 is an error and causes problems on some kernels */
5085 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) 5523 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED &&
5086 return -EINVAL; 5524 hw->mac.type != ixgbe_mac_X540) {
5525 if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
5526 return -EINVAL;
5527 } else {
5528 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
5529 return -EINVAL;
5530 }
5087 5531
5088 e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); 5532 e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
5089 /* must set new MTU before calling down or up */ 5533 /* must set new MTU before calling down or up */
5090 netdev->mtu = new_mtu; 5534 netdev->mtu = new_mtu;
5091 5535
5536 hw->fc.high_water = FC_HIGH_WATER(max_frame);
5537 hw->fc.low_water = FC_LOW_WATER(max_frame);
5538
5092 if (netif_running(netdev)) 5539 if (netif_running(netdev))
5093 ixgbe_reinit_locked(adapter); 5540 ixgbe_reinit_locked(adapter);
5094 5541
@@ -5184,8 +5631,8 @@ static int ixgbe_close(struct net_device *netdev)
5184#ifdef CONFIG_PM 5631#ifdef CONFIG_PM
5185static int ixgbe_resume(struct pci_dev *pdev) 5632static int ixgbe_resume(struct pci_dev *pdev)
5186{ 5633{
5187 struct net_device *netdev = pci_get_drvdata(pdev); 5634 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
5188 struct ixgbe_adapter *adapter = netdev_priv(netdev); 5635 struct net_device *netdev = adapter->netdev;
5189 u32 err; 5636 u32 err;
5190 5637
5191 pci_set_power_state(pdev, PCI_D0); 5638 pci_set_power_state(pdev, PCI_D0);
@@ -5216,7 +5663,7 @@ static int ixgbe_resume(struct pci_dev *pdev)
5216 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); 5663 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
5217 5664
5218 if (netif_running(netdev)) { 5665 if (netif_running(netdev)) {
5219 err = ixgbe_open(adapter->netdev); 5666 err = ixgbe_open(netdev);
5220 if (err) 5667 if (err)
5221 return err; 5668 return err;
5222 } 5669 }
@@ -5229,8 +5676,8 @@ static int ixgbe_resume(struct pci_dev *pdev)
5229 5676
5230static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) 5677static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
5231{ 5678{
5232 struct net_device *netdev = pci_get_drvdata(pdev); 5679 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
5233 struct ixgbe_adapter *adapter = netdev_priv(netdev); 5680 struct net_device *netdev = adapter->netdev;
5234 struct ixgbe_hw *hw = &adapter->hw; 5681 struct ixgbe_hw *hw = &adapter->hw;
5235 u32 ctrl, fctrl; 5682 u32 ctrl, fctrl;
5236 u32 wufc = adapter->wol; 5683 u32 wufc = adapter->wol;
@@ -5247,6 +5694,12 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
5247 ixgbe_free_all_rx_resources(adapter); 5694 ixgbe_free_all_rx_resources(adapter);
5248 } 5695 }
5249 5696
5697 ixgbe_clear_interrupt_scheme(adapter);
5698#ifdef CONFIG_DCB
5699 kfree(adapter->ixgbe_ieee_pfc);
5700 kfree(adapter->ixgbe_ieee_ets);
5701#endif
5702
5250#ifdef CONFIG_PM 5703#ifdef CONFIG_PM
5251 retval = pci_save_state(pdev); 5704 retval = pci_save_state(pdev);
5252 if (retval) 5705 if (retval)
@@ -5273,15 +5726,20 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
5273 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); 5726 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
5274 } 5727 }
5275 5728
5276 if (wufc && hw->mac.type == ixgbe_mac_82599EB) 5729 switch (hw->mac.type) {
5277 pci_wake_from_d3(pdev, true); 5730 case ixgbe_mac_82598EB:
5278 else
5279 pci_wake_from_d3(pdev, false); 5731 pci_wake_from_d3(pdev, false);
5732 break;
5733 case ixgbe_mac_82599EB:
5734 case ixgbe_mac_X540:
5735 pci_wake_from_d3(pdev, !!wufc);
5736 break;
5737 default:
5738 break;
5739 }
5280 5740
5281 *enable_wake = !!wufc; 5741 *enable_wake = !!wufc;
5282 5742
5283 ixgbe_clear_interrupt_scheme(adapter);
5284
5285 ixgbe_release_hw_control(adapter); 5743 ixgbe_release_hw_control(adapter);
5286 5744
5287 pci_disable_device(pdev); 5745 pci_disable_device(pdev);
@@ -5330,9 +5788,12 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5330{ 5788{
5331 struct net_device *netdev = adapter->netdev; 5789 struct net_device *netdev = adapter->netdev;
5332 struct ixgbe_hw *hw = &adapter->hw; 5790 struct ixgbe_hw *hw = &adapter->hw;
5791 struct ixgbe_hw_stats *hwstats = &adapter->stats;
5333 u64 total_mpc = 0; 5792 u64 total_mpc = 0;
5334 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot; 5793 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
5335 u64 non_eop_descs = 0, restart_queue = 0; 5794 u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
5795 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
5796 u64 bytes = 0, packets = 0;
5336 5797
5337 if (test_bit(__IXGBE_DOWN, &adapter->state) || 5798 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
5338 test_bit(__IXGBE_RESETTING, &adapter->state)) 5799 test_bit(__IXGBE_RESETTING, &adapter->state))
@@ -5343,158 +5804,227 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5343 u64 rsc_flush = 0; 5804 u64 rsc_flush = 0;
5344 for (i = 0; i < 16; i++) 5805 for (i = 0; i < 16; i++)
5345 adapter->hw_rx_no_dma_resources += 5806 adapter->hw_rx_no_dma_resources +=
5346 IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 5807 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
5347 for (i = 0; i < adapter->num_rx_queues; i++) { 5808 for (i = 0; i < adapter->num_rx_queues; i++) {
5348 rsc_count += adapter->rx_ring[i]->rsc_count; 5809 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
5349 rsc_flush += adapter->rx_ring[i]->rsc_flush; 5810 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
5350 } 5811 }
5351 adapter->rsc_total_count = rsc_count; 5812 adapter->rsc_total_count = rsc_count;
5352 adapter->rsc_total_flush = rsc_flush; 5813 adapter->rsc_total_flush = rsc_flush;
5353 } 5814 }
5354 5815
5816 for (i = 0; i < adapter->num_rx_queues; i++) {
5817 struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
5818 non_eop_descs += rx_ring->rx_stats.non_eop_descs;
5819 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
5820 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
5821 bytes += rx_ring->stats.bytes;
5822 packets += rx_ring->stats.packets;
5823 }
5824 adapter->non_eop_descs = non_eop_descs;
5825 adapter->alloc_rx_page_failed = alloc_rx_page_failed;
5826 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
5827 netdev->stats.rx_bytes = bytes;
5828 netdev->stats.rx_packets = packets;
5829
5830 bytes = 0;
5831 packets = 0;
5355 /* gather some stats to the adapter struct that are per queue */ 5832 /* gather some stats to the adapter struct that are per queue */
5356 for (i = 0; i < adapter->num_tx_queues; i++) 5833 for (i = 0; i < adapter->num_tx_queues; i++) {
5357 restart_queue += adapter->tx_ring[i]->restart_queue; 5834 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
5835 restart_queue += tx_ring->tx_stats.restart_queue;
5836 tx_busy += tx_ring->tx_stats.tx_busy;
5837 bytes += tx_ring->stats.bytes;
5838 packets += tx_ring->stats.packets;
5839 }
5358 adapter->restart_queue = restart_queue; 5840 adapter->restart_queue = restart_queue;
5841 adapter->tx_busy = tx_busy;
5842 netdev->stats.tx_bytes = bytes;
5843 netdev->stats.tx_packets = packets;
5359 5844
5360 for (i = 0; i < adapter->num_rx_queues; i++) 5845 hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
5361 non_eop_descs += adapter->rx_ring[i]->non_eop_descs;
5362 adapter->non_eop_descs = non_eop_descs;
5363
5364 adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
5365 for (i = 0; i < 8; i++) { 5846 for (i = 0; i < 8; i++) {
5366 /* for packet buffers not used, the register should read 0 */ 5847 /* for packet buffers not used, the register should read 0 */
5367 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i)); 5848 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
5368 missed_rx += mpc; 5849 missed_rx += mpc;
5369 adapter->stats.mpc[i] += mpc; 5850 hwstats->mpc[i] += mpc;
5370 total_mpc += adapter->stats.mpc[i]; 5851 total_mpc += hwstats->mpc[i];
5371 if (hw->mac.type == ixgbe_mac_82598EB) 5852 if (hw->mac.type == ixgbe_mac_82598EB)
5372 adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i)); 5853 hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
5373 adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 5854 hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
5374 adapter->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i)); 5855 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
5375 adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 5856 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
5376 adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i)); 5857 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
5377 if (hw->mac.type == ixgbe_mac_82599EB) { 5858 switch (hw->mac.type) {
5378 adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw, 5859 case ixgbe_mac_82598EB:
5379 IXGBE_PXONRXCNT(i)); 5860 hwstats->pxonrxc[i] +=
5380 adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw, 5861 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
5381 IXGBE_PXOFFRXCNT(i)); 5862 break;
5382 adapter->stats.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 5863 case ixgbe_mac_82599EB:
5383 } else { 5864 case ixgbe_mac_X540:
5384 adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw, 5865 hwstats->pxonrxc[i] +=
5385 IXGBE_PXONRXC(i)); 5866 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
5386 adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw, 5867 break;
5387 IXGBE_PXOFFRXC(i)); 5868 default:
5869 break;
5388 } 5870 }
5389 adapter->stats.pxontxc[i] += IXGBE_READ_REG(hw, 5871 hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
5390 IXGBE_PXONTXC(i)); 5872 hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
5391 adapter->stats.pxofftxc[i] += IXGBE_READ_REG(hw,
5392 IXGBE_PXOFFTXC(i));
5393 } 5873 }
5394 adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); 5874 hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
5395 /* work around hardware counting issue */ 5875 /* work around hardware counting issue */
5396 adapter->stats.gprc -= missed_rx; 5876 hwstats->gprc -= missed_rx;
5877
5878 ixgbe_update_xoff_received(adapter);
5397 5879
5398 /* 82598 hardware only has a 32 bit counter in the high register */ 5880 /* 82598 hardware only has a 32 bit counter in the high register */
5399 if (hw->mac.type == ixgbe_mac_82599EB) { 5881 switch (hw->mac.type) {
5400 u64 tmp; 5882 case ixgbe_mac_82598EB:
5401 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); 5883 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
5402 tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF; /* 4 high bits of GORC */ 5884 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
5403 adapter->stats.gorc += (tmp << 32); 5885 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
5404 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); 5886 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
5405 tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF; /* 4 high bits of GOTC */ 5887 break;
5406 adapter->stats.gotc += (tmp << 32); 5888 case ixgbe_mac_X540:
5407 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL); 5889 /* OS2BMC stats are X540 only*/
5890 hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
5891 hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
5892 hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
5893 hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
5894 case ixgbe_mac_82599EB:
5895 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
5896 IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
5897 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
5898 IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */
5899 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
5408 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ 5900 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
5409 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 5901 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
5410 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 5902 hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
5411 adapter->stats.fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); 5903 hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
5412 adapter->stats.fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
5413#ifdef IXGBE_FCOE 5904#ifdef IXGBE_FCOE
5414 adapter->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); 5905 hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
5415 adapter->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); 5906 hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
5416 adapter->stats.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); 5907 hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
5417 adapter->stats.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); 5908 hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
5418 adapter->stats.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 5909 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
5419 adapter->stats.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 5910 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
5420#endif /* IXGBE_FCOE */ 5911#endif /* IXGBE_FCOE */
5421 } else { 5912 break;
5422 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 5913 default:
5423 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 5914 break;
5424 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
5425 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
5426 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
5427 } 5915 }
5428 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 5916 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
5429 adapter->stats.bprc += bprc; 5917 hwstats->bprc += bprc;
5430 adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); 5918 hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
5431 if (hw->mac.type == ixgbe_mac_82598EB) 5919 if (hw->mac.type == ixgbe_mac_82598EB)
5432 adapter->stats.mprc -= bprc; 5920 hwstats->mprc -= bprc;
5433 adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC); 5921 hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
5434 adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); 5922 hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
5435 adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); 5923 hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
5436 adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); 5924 hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
5437 adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); 5925 hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
5438 adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); 5926 hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
5439 adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); 5927 hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
5440 adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); 5928 hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
5441 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); 5929 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
5442 adapter->stats.lxontxc += lxon; 5930 hwstats->lxontxc += lxon;
5443 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 5931 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
5444 adapter->stats.lxofftxc += lxoff; 5932 hwstats->lxofftxc += lxoff;
5445 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 5933 hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
5446 adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); 5934 hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
5447 adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); 5935 hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
5448 /* 5936 /*
5449 * 82598 errata - tx of flow control packets is included in tx counters 5937 * 82598 errata - tx of flow control packets is included in tx counters
5450 */ 5938 */
5451 xon_off_tot = lxon + lxoff; 5939 xon_off_tot = lxon + lxoff;
5452 adapter->stats.gptc -= xon_off_tot; 5940 hwstats->gptc -= xon_off_tot;
5453 adapter->stats.mptc -= xon_off_tot; 5941 hwstats->mptc -= xon_off_tot;
5454 adapter->stats.gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN)); 5942 hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
5455 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 5943 hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
5456 adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC); 5944 hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
5457 adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC); 5945 hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
5458 adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR); 5946 hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
5459 adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); 5947 hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
5460 adapter->stats.ptc64 -= xon_off_tot; 5948 hwstats->ptc64 -= xon_off_tot;
5461 adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); 5949 hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
5462 adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); 5950 hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
5463 adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); 5951 hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
5464 adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); 5952 hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
5465 adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); 5953 hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
5466 adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); 5954 hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
5467 5955
5468 /* Fill out the OS statistics structure */ 5956 /* Fill out the OS statistics structure */
5469 netdev->stats.multicast = adapter->stats.mprc; 5957 netdev->stats.multicast = hwstats->mprc;
5470 5958
5471 /* Rx Errors */ 5959 /* Rx Errors */
5472 netdev->stats.rx_errors = adapter->stats.crcerrs + 5960 netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec;
5473 adapter->stats.rlec;
5474 netdev->stats.rx_dropped = 0; 5961 netdev->stats.rx_dropped = 0;
5475 netdev->stats.rx_length_errors = adapter->stats.rlec; 5962 netdev->stats.rx_length_errors = hwstats->rlec;
5476 netdev->stats.rx_crc_errors = adapter->stats.crcerrs; 5963 netdev->stats.rx_crc_errors = hwstats->crcerrs;
5477 netdev->stats.rx_missed_errors = total_mpc; 5964 netdev->stats.rx_missed_errors = total_mpc;
5478} 5965}
5479 5966
5480/** 5967/**
5481 * ixgbe_watchdog - Timer Call-back 5968 * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table
5482 * @data: pointer to adapter cast into an unsigned long 5969 * @adapter - pointer to the device adapter structure
5483 **/ 5970 **/
5484static void ixgbe_watchdog(unsigned long data) 5971static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
5485{ 5972{
5486 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
5487 struct ixgbe_hw *hw = &adapter->hw; 5973 struct ixgbe_hw *hw = &adapter->hw;
5488 u64 eics = 0;
5489 int i; 5974 int i;
5490 5975
5491 /* 5976 if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
5492 * Do the watchdog outside of interrupt context due to the lovely 5977 return;
5493 * delays that some of the newer hardware requires 5978
5494 */ 5979 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
5495 5980
5981 /* if interface is down do nothing */
5496 if (test_bit(__IXGBE_DOWN, &adapter->state)) 5982 if (test_bit(__IXGBE_DOWN, &adapter->state))
5497 goto watchdog_short_circuit; 5983 return;
5984
5985 /* do nothing if we are not using signature filters */
5986 if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE))
5987 return;
5988
5989 adapter->fdir_overflow++;
5990
5991 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
5992 for (i = 0; i < adapter->num_tx_queues; i++)
5993 set_bit(__IXGBE_TX_FDIR_INIT_DONE,
5994 &(adapter->tx_ring[i]->state));
5995 /* re-enable flow director interrupts */
5996 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
5997 } else {
5998 e_err(probe, "failed to finish FDIR re-initialization, "
5999 "ignored adding FDIR ATR filters\n");
6000 }
6001}
6002
6003/**
6004 * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts
6005 * @adapter - pointer to the device adapter structure
6006 *
6007 * This function serves two purposes. First it strobes the interrupt lines
6008 * in order to make certain interrupts are occuring. Secondly it sets the
6009 * bits needed to check for TX hangs. As a result we should immediately
6010 * determine if a hang has occured.
6011 */
6012static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
6013{
6014 struct ixgbe_hw *hw = &adapter->hw;
6015 u64 eics = 0;
6016 int i;
6017
6018 /* If we're down or resetting, just bail */
6019 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
6020 test_bit(__IXGBE_RESETTING, &adapter->state))
6021 return;
6022
6023 /* Force detection of hung controller */
6024 if (netif_carrier_ok(adapter->netdev)) {
6025 for (i = 0; i < adapter->num_tx_queues; i++)
6026 set_check_for_tx_hang(adapter->tx_ring[i]);
6027 }
5498 6028
5499 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { 6029 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
5500 /* 6030 /*
@@ -5504,200 +6034,157 @@ static void ixgbe_watchdog(unsigned long data)
5504 */ 6034 */
5505 IXGBE_WRITE_REG(hw, IXGBE_EICS, 6035 IXGBE_WRITE_REG(hw, IXGBE_EICS,
5506 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER)); 6036 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
5507 goto watchdog_reschedule; 6037 } else {
5508 } 6038 /* get one bit for every active tx/rx interrupt vector */
5509 6039 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
5510 /* get one bit for every active tx/rx interrupt vector */ 6040 struct ixgbe_q_vector *qv = adapter->q_vector[i];
5511 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { 6041 if (qv->rxr_count || qv->txr_count)
5512 struct ixgbe_q_vector *qv = adapter->q_vector[i]; 6042 eics |= ((u64)1 << i);
5513 if (qv->rxr_count || qv->txr_count) 6043 }
5514 eics |= ((u64)1 << i);
5515 } 6044 }
5516 6045
5517 /* Cause software interrupt to ensure rx rings are cleaned */ 6046 /* Cause software interrupt to ensure rings are cleaned */
5518 ixgbe_irq_rearm_queues(adapter, eics); 6047 ixgbe_irq_rearm_queues(adapter, eics);
5519 6048
5520watchdog_reschedule:
5521 /* Reset the timer */
5522 mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
5523
5524watchdog_short_circuit:
5525 schedule_work(&adapter->watchdog_task);
5526} 6049}
5527 6050
5528/** 6051/**
5529 * ixgbe_multispeed_fiber_task - worker thread to configure multispeed fiber 6052 * ixgbe_watchdog_update_link - update the link status
5530 * @work: pointer to work_struct containing our data 6053 * @adapter - pointer to the device adapter structure
6054 * @link_speed - pointer to a u32 to store the link_speed
5531 **/ 6055 **/
5532static void ixgbe_multispeed_fiber_task(struct work_struct *work) 6056static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
5533{ 6057{
5534 struct ixgbe_adapter *adapter = container_of(work,
5535 struct ixgbe_adapter,
5536 multispeed_fiber_task);
5537 struct ixgbe_hw *hw = &adapter->hw; 6058 struct ixgbe_hw *hw = &adapter->hw;
5538 u32 autoneg; 6059 u32 link_speed = adapter->link_speed;
5539 bool negotiation; 6060 bool link_up = adapter->link_up;
6061 int i;
5540 6062
5541 adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK; 6063 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
5542 autoneg = hw->phy.autoneg_advertised; 6064 return;
5543 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 6065
5544 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation); 6066 if (hw->mac.ops.check_link) {
5545 hw->mac.autotry_restart = false; 6067 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
5546 if (hw->mac.ops.setup_link) 6068 } else {
5547 hw->mac.ops.setup_link(hw, autoneg, negotiation, true); 6069 /* always assume link is up, if no check link function */
5548 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 6070 link_speed = IXGBE_LINK_SPEED_10GB_FULL;
5549 adapter->flags &= ~IXGBE_FLAG_IN_SFP_LINK_TASK; 6071 link_up = true;
6072 }
6073 if (link_up) {
6074 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6075 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
6076 hw->mac.ops.fc_enable(hw, i);
6077 } else {
6078 hw->mac.ops.fc_enable(hw, 0);
6079 }
6080 }
6081
6082 if (link_up ||
6083 time_after(jiffies, (adapter->link_check_timeout +
6084 IXGBE_TRY_LINK_TIMEOUT))) {
6085 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
6086 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
6087 IXGBE_WRITE_FLUSH(hw);
6088 }
6089
6090 adapter->link_up = link_up;
6091 adapter->link_speed = link_speed;
5550} 6092}
5551 6093
5552/** 6094/**
5553 * ixgbe_sfp_config_module_task - worker thread to configure a new SFP+ module 6095 * ixgbe_watchdog_link_is_up - update netif_carrier status and
5554 * @work: pointer to work_struct containing our data 6096 * print link up message
6097 * @adapter - pointer to the device adapter structure
5555 **/ 6098 **/
5556static void ixgbe_sfp_config_module_task(struct work_struct *work) 6099static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
5557{ 6100{
5558 struct ixgbe_adapter *adapter = container_of(work, 6101 struct net_device *netdev = adapter->netdev;
5559 struct ixgbe_adapter,
5560 sfp_config_module_task);
5561 struct ixgbe_hw *hw = &adapter->hw; 6102 struct ixgbe_hw *hw = &adapter->hw;
5562 u32 err; 6103 u32 link_speed = adapter->link_speed;
6104 bool flow_rx, flow_tx;
5563 6105
5564 adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK; 6106 /* only continue if link was previously down */
6107 if (netif_carrier_ok(netdev))
6108 return;
5565 6109
5566 /* Time for electrical oscillations to settle down */ 6110 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
5567 msleep(100);
5568 err = hw->phy.ops.identify_sfp(hw);
5569 6111
5570 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 6112 switch (hw->mac.type) {
5571 e_dev_err("failed to initialize because an unsupported SFP+ " 6113 case ixgbe_mac_82598EB: {
5572 "module type was detected.\n"); 6114 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
5573 e_dev_err("Reload the driver after installing a supported " 6115 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
5574 "module.\n"); 6116 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
5575 unregister_netdev(adapter->netdev); 6117 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
5576 return; 6118 }
6119 break;
6120 case ixgbe_mac_X540:
6121 case ixgbe_mac_82599EB: {
6122 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
6123 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
6124 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
6125 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
5577 } 6126 }
5578 hw->mac.ops.setup_sfp(hw); 6127 break;
6128 default:
6129 flow_tx = false;
6130 flow_rx = false;
6131 break;
6132 }
6133 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
6134 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
6135 "10 Gbps" :
6136 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
6137 "1 Gbps" :
6138 (link_speed == IXGBE_LINK_SPEED_100_FULL ?
6139 "100 Mbps" :
6140 "unknown speed"))),
6141 ((flow_rx && flow_tx) ? "RX/TX" :
6142 (flow_rx ? "RX" :
6143 (flow_tx ? "TX" : "None"))));
5579 6144
5580 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) 6145 netif_carrier_on(netdev);
5581 /* This will also work for DA Twinax connections */ 6146#ifdef HAVE_IPLINK_VF_CONFIG
5582 schedule_work(&adapter->multispeed_fiber_task); 6147 ixgbe_check_vf_rate_limit(adapter);
5583 adapter->flags &= ~IXGBE_FLAG_IN_SFP_MOD_TASK; 6148#endif /* HAVE_IPLINK_VF_CONFIG */
5584} 6149}
5585 6150
5586/** 6151/**
5587 * ixgbe_fdir_reinit_task - worker thread to reinit FDIR filter table 6152 * ixgbe_watchdog_link_is_down - update netif_carrier status and
5588 * @work: pointer to work_struct containing our data 6153 * print link down message
6154 * @adapter - pointer to the adapter structure
5589 **/ 6155 **/
5590static void ixgbe_fdir_reinit_task(struct work_struct *work) 6156static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter* adapter)
5591{ 6157{
5592 struct ixgbe_adapter *adapter = container_of(work, 6158 struct net_device *netdev = adapter->netdev;
5593 struct ixgbe_adapter,
5594 fdir_reinit_task);
5595 struct ixgbe_hw *hw = &adapter->hw; 6159 struct ixgbe_hw *hw = &adapter->hw;
5596 int i;
5597 6160
5598 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { 6161 adapter->link_up = false;
5599 for (i = 0; i < adapter->num_tx_queues; i++) 6162 adapter->link_speed = 0;
5600 set_bit(__IXGBE_FDIR_INIT_DONE,
5601 &(adapter->tx_ring[i]->reinit_state));
5602 } else {
5603 e_err(probe, "failed to finish FDIR re-initialization, "
5604 "ignored adding FDIR ATR filters\n");
5605 }
5606 /* Done FDIR Re-initialization, enable transmits */
5607 netif_tx_start_all_queues(adapter->netdev);
5608}
5609 6163
5610static DEFINE_MUTEX(ixgbe_watchdog_lock); 6164 /* only continue if link was up previously */
6165 if (!netif_carrier_ok(netdev))
6166 return;
6167
6168 /* poll for SFP+ cable when link is down */
6169 if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
6170 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
6171
6172 e_info(drv, "NIC Link is Down\n");
6173 netif_carrier_off(netdev);
6174}
5611 6175
5612/** 6176/**
5613 * ixgbe_watchdog_task - worker thread to bring link up 6177 * ixgbe_watchdog_flush_tx - flush queues on link down
5614 * @work: pointer to work_struct containing our data 6178 * @adapter - pointer to the device adapter structure
5615 **/ 6179 **/
5616static void ixgbe_watchdog_task(struct work_struct *work) 6180static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
5617{ 6181{
5618 struct ixgbe_adapter *adapter = container_of(work,
5619 struct ixgbe_adapter,
5620 watchdog_task);
5621 struct net_device *netdev = adapter->netdev;
5622 struct ixgbe_hw *hw = &adapter->hw;
5623 u32 link_speed;
5624 bool link_up;
5625 int i; 6182 int i;
5626 struct ixgbe_ring *tx_ring;
5627 int some_tx_pending = 0; 6183 int some_tx_pending = 0;
5628 6184
5629 mutex_lock(&ixgbe_watchdog_lock); 6185 if (!netif_carrier_ok(adapter->netdev)) {
5630
5631 link_up = adapter->link_up;
5632 link_speed = adapter->link_speed;
5633
5634 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
5635 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
5636 if (link_up) {
5637#ifdef CONFIG_DCB
5638 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
5639 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
5640 hw->mac.ops.fc_enable(hw, i);
5641 } else {
5642 hw->mac.ops.fc_enable(hw, 0);
5643 }
5644#else
5645 hw->mac.ops.fc_enable(hw, 0);
5646#endif
5647 }
5648
5649 if (link_up ||
5650 time_after(jiffies, (adapter->link_check_timeout +
5651 IXGBE_TRY_LINK_TIMEOUT))) {
5652 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
5653 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
5654 }
5655 adapter->link_up = link_up;
5656 adapter->link_speed = link_speed;
5657 }
5658
5659 if (link_up) {
5660 if (!netif_carrier_ok(netdev)) {
5661 bool flow_rx, flow_tx;
5662
5663 if (hw->mac.type == ixgbe_mac_82599EB) {
5664 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
5665 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
5666 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
5667 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
5668 } else {
5669 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
5670 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
5671 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
5672 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
5673 }
5674
5675 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
5676 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
5677 "10 Gbps" :
5678 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
5679 "1 Gbps" : "unknown speed")),
5680 ((flow_rx && flow_tx) ? "RX/TX" :
5681 (flow_rx ? "RX" :
5682 (flow_tx ? "TX" : "None"))));
5683
5684 netif_carrier_on(netdev);
5685 } else {
5686 /* Force detection of hung controller */
5687 adapter->detect_tx_hung = true;
5688 }
5689 } else {
5690 adapter->link_up = false;
5691 adapter->link_speed = 0;
5692 if (netif_carrier_ok(netdev)) {
5693 e_info(drv, "NIC Link is Down\n");
5694 netif_carrier_off(netdev);
5695 }
5696 }
5697
5698 if (!netif_carrier_ok(netdev)) {
5699 for (i = 0; i < adapter->num_tx_queues; i++) { 6186 for (i = 0; i < adapter->num_tx_queues; i++) {
5700 tx_ring = adapter->tx_ring[i]; 6187 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
5701 if (tx_ring->next_to_use != tx_ring->next_to_clean) { 6188 if (tx_ring->next_to_use != tx_ring->next_to_clean) {
5702 some_tx_pending = 1; 6189 some_tx_pending = 1;
5703 break; 6190 break;
@@ -5710,17 +6197,216 @@ static void ixgbe_watchdog_task(struct work_struct *work)
5710 * to get done, so reset controller to flush Tx. 6197 * to get done, so reset controller to flush Tx.
5711 * (Do the reset outside of interrupt context). 6198 * (Do the reset outside of interrupt context).
5712 */ 6199 */
5713 schedule_work(&adapter->reset_task); 6200 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
5714 } 6201 }
5715 } 6202 }
6203}
6204
6205static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
6206{
6207 u32 ssvpc;
6208
6209 /* Do not perform spoof check for 82598 */
6210 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
6211 return;
6212
6213 ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC);
6214
6215 /*
6216 * ssvpc register is cleared on read, if zero then no
6217 * spoofed packets in the last interval.
6218 */
6219 if (!ssvpc)
6220 return;
6221
6222 e_warn(drv, "%d Spoofed packets detected\n", ssvpc);
6223}
6224
6225/**
6226 * ixgbe_watchdog_subtask - check and bring link up
6227 * @adapter - pointer to the device adapter structure
6228 **/
6229static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
6230{
6231 /* if interface is down do nothing */
6232 if (test_bit(__IXGBE_DOWN, &adapter->state))
6233 return;
6234
6235 ixgbe_watchdog_update_link(adapter);
6236
6237 if (adapter->link_up)
6238 ixgbe_watchdog_link_is_up(adapter);
6239 else
6240 ixgbe_watchdog_link_is_down(adapter);
5716 6241
6242 ixgbe_spoof_check(adapter);
5717 ixgbe_update_stats(adapter); 6243 ixgbe_update_stats(adapter);
5718 mutex_unlock(&ixgbe_watchdog_lock); 6244
6245 ixgbe_watchdog_flush_tx(adapter);
6246}
6247
6248/**
6249 * ixgbe_sfp_detection_subtask - poll for SFP+ cable
6250 * @adapter - the ixgbe adapter structure
6251 **/
6252static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
6253{
6254 struct ixgbe_hw *hw = &adapter->hw;
6255 s32 err;
6256
6257 /* not searching for SFP so there is nothing to do here */
6258 if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
6259 !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
6260 return;
6261
6262 /* someone else is in init, wait until next service event */
6263 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
6264 return;
6265
6266 err = hw->phy.ops.identify_sfp(hw);
6267 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
6268 goto sfp_out;
6269
6270 if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
6271 /* If no cable is present, then we need to reset
6272 * the next time we find a good cable. */
6273 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
6274 }
6275
6276 /* exit on error */
6277 if (err)
6278 goto sfp_out;
6279
6280 /* exit if reset not needed */
6281 if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
6282 goto sfp_out;
6283
6284 adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET;
6285
6286 /*
6287 * A module may be identified correctly, but the EEPROM may not have
6288 * support for that module. setup_sfp() will fail in that case, so
6289 * we should not allow that module to load.
6290 */
6291 if (hw->mac.type == ixgbe_mac_82598EB)
6292 err = hw->phy.ops.reset(hw);
6293 else
6294 err = hw->mac.ops.setup_sfp(hw);
6295
6296 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
6297 goto sfp_out;
6298
6299 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
6300 e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type);
6301
6302sfp_out:
6303 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
6304
6305 if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) &&
6306 (adapter->netdev->reg_state == NETREG_REGISTERED)) {
6307 e_dev_err("failed to initialize because an unsupported "
6308 "SFP+ module type was detected.\n");
6309 e_dev_err("Reload the driver after installing a "
6310 "supported module.\n");
6311 unregister_netdev(adapter->netdev);
6312 }
6313}
6314
6315/**
6316 * ixgbe_sfp_link_config_subtask - set up link SFP after module install
6317 * @adapter - the ixgbe adapter structure
6318 **/
6319static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
6320{
6321 struct ixgbe_hw *hw = &adapter->hw;
6322 u32 autoneg;
6323 bool negotiation;
6324
6325 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG))
6326 return;
6327
6328 /* someone else is in init, wait until next service event */
6329 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
6330 return;
6331
6332 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
6333
6334 autoneg = hw->phy.autoneg_advertised;
6335 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
6336 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
6337 hw->mac.autotry_restart = false;
6338 if (hw->mac.ops.setup_link)
6339 hw->mac.ops.setup_link(hw, autoneg, negotiation, true);
6340
6341 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
6342 adapter->link_check_timeout = jiffies;
6343 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
6344}
6345
6346/**
6347 * ixgbe_service_timer - Timer Call-back
6348 * @data: pointer to adapter cast into an unsigned long
6349 **/
6350static void ixgbe_service_timer(unsigned long data)
6351{
6352 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
6353 unsigned long next_event_offset;
6354
6355 /* poll faster when waiting for link */
6356 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
6357 next_event_offset = HZ / 10;
6358 else
6359 next_event_offset = HZ * 2;
6360
6361 /* Reset the timer */
6362 mod_timer(&adapter->service_timer, next_event_offset + jiffies);
6363
6364 ixgbe_service_event_schedule(adapter);
6365}
6366
6367static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
6368{
6369 if (!(adapter->flags2 & IXGBE_FLAG2_RESET_REQUESTED))
6370 return;
6371
6372 adapter->flags2 &= ~IXGBE_FLAG2_RESET_REQUESTED;
6373
6374 /* If we're already down or resetting, just bail */
6375 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
6376 test_bit(__IXGBE_RESETTING, &adapter->state))
6377 return;
6378
6379 ixgbe_dump(adapter);
6380 netdev_err(adapter->netdev, "Reset adapter\n");
6381 adapter->tx_timeout_count++;
6382
6383 ixgbe_reinit_locked(adapter);
6384}
6385
6386/**
6387 * ixgbe_service_task - manages and runs subtasks
6388 * @work: pointer to work_struct containing our data
6389 **/
6390static void ixgbe_service_task(struct work_struct *work)
6391{
6392 struct ixgbe_adapter *adapter = container_of(work,
6393 struct ixgbe_adapter,
6394 service_task);
6395
6396 ixgbe_reset_subtask(adapter);
6397 ixgbe_sfp_detection_subtask(adapter);
6398 ixgbe_sfp_link_config_subtask(adapter);
6399 ixgbe_check_overtemp_subtask(adapter);
6400 ixgbe_watchdog_subtask(adapter);
6401 ixgbe_fdir_reinit_subtask(adapter);
6402 ixgbe_check_hang_subtask(adapter);
6403
6404 ixgbe_service_event_complete(adapter);
5719} 6405}
5720 6406
5721static int ixgbe_tso(struct ixgbe_adapter *adapter, 6407static int ixgbe_tso(struct ixgbe_adapter *adapter,
5722 struct ixgbe_ring *tx_ring, struct sk_buff *skb, 6408 struct ixgbe_ring *tx_ring, struct sk_buff *skb,
5723 u32 tx_flags, u8 *hdr_len) 6409 u32 tx_flags, u8 *hdr_len, __be16 protocol)
5724{ 6410{
5725 struct ixgbe_adv_tx_context_desc *context_desc; 6411 struct ixgbe_adv_tx_context_desc *context_desc;
5726 unsigned int i; 6412 unsigned int i;
@@ -5738,33 +6424,33 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
5738 l4len = tcp_hdrlen(skb); 6424 l4len = tcp_hdrlen(skb);
5739 *hdr_len += l4len; 6425 *hdr_len += l4len;
5740 6426
5741 if (skb->protocol == htons(ETH_P_IP)) { 6427 if (protocol == htons(ETH_P_IP)) {
5742 struct iphdr *iph = ip_hdr(skb); 6428 struct iphdr *iph = ip_hdr(skb);
5743 iph->tot_len = 0; 6429 iph->tot_len = 0;
5744 iph->check = 0; 6430 iph->check = 0;
5745 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 6431 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5746 iph->daddr, 0, 6432 iph->daddr, 0,
5747 IPPROTO_TCP, 6433 IPPROTO_TCP,
5748 0); 6434 0);
5749 } else if (skb_is_gso_v6(skb)) { 6435 } else if (skb_is_gso_v6(skb)) {
5750 ipv6_hdr(skb)->payload_len = 0; 6436 ipv6_hdr(skb)->payload_len = 0;
5751 tcp_hdr(skb)->check = 6437 tcp_hdr(skb)->check =
5752 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 6438 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
5753 &ipv6_hdr(skb)->daddr, 6439 &ipv6_hdr(skb)->daddr,
5754 0, IPPROTO_TCP, 0); 6440 0, IPPROTO_TCP, 0);
5755 } 6441 }
5756 6442
5757 i = tx_ring->next_to_use; 6443 i = tx_ring->next_to_use;
5758 6444
5759 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 6445 tx_buffer_info = &tx_ring->tx_buffer_info[i];
5760 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i); 6446 context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
5761 6447
5762 /* VLAN MACLEN IPLEN */ 6448 /* VLAN MACLEN IPLEN */
5763 if (tx_flags & IXGBE_TX_FLAGS_VLAN) 6449 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
5764 vlan_macip_lens |= 6450 vlan_macip_lens |=
5765 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); 6451 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
5766 vlan_macip_lens |= ((skb_network_offset(skb)) << 6452 vlan_macip_lens |= ((skb_network_offset(skb)) <<
5767 IXGBE_ADVTXD_MACLEN_SHIFT); 6453 IXGBE_ADVTXD_MACLEN_SHIFT);
5768 *hdr_len += skb_network_offset(skb); 6454 *hdr_len += skb_network_offset(skb);
5769 vlan_macip_lens |= 6455 vlan_macip_lens |=
5770 (skb_transport_header(skb) - skb_network_header(skb)); 6456 (skb_transport_header(skb) - skb_network_header(skb));
@@ -5775,9 +6461,9 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
5775 6461
5776 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 6462 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
5777 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT | 6463 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
5778 IXGBE_ADVTXD_DTYP_CTXT); 6464 IXGBE_ADVTXD_DTYP_CTXT);
5779 6465
5780 if (skb->protocol == htons(ETH_P_IP)) 6466 if (protocol == htons(ETH_P_IP))
5781 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; 6467 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
5782 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; 6468 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
5783 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); 6469 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
@@ -5803,9 +6489,48 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
5803 return false; 6489 return false;
5804} 6490}
5805 6491
6492static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb,
6493 __be16 protocol)
6494{
6495 u32 rtn = 0;
6496
6497 switch (protocol) {
6498 case cpu_to_be16(ETH_P_IP):
6499 rtn |= IXGBE_ADVTXD_TUCMD_IPV4;
6500 switch (ip_hdr(skb)->protocol) {
6501 case IPPROTO_TCP:
6502 rtn |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
6503 break;
6504 case IPPROTO_SCTP:
6505 rtn |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
6506 break;
6507 }
6508 break;
6509 case cpu_to_be16(ETH_P_IPV6):
6510 /* XXX what about other V6 headers?? */
6511 switch (ipv6_hdr(skb)->nexthdr) {
6512 case IPPROTO_TCP:
6513 rtn |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
6514 break;
6515 case IPPROTO_SCTP:
6516 rtn |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
6517 break;
6518 }
6519 break;
6520 default:
6521 if (unlikely(net_ratelimit()))
6522 e_warn(probe, "partial checksum but proto=%x!\n",
6523 protocol);
6524 break;
6525 }
6526
6527 return rtn;
6528}
6529
5806static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, 6530static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
5807 struct ixgbe_ring *tx_ring, 6531 struct ixgbe_ring *tx_ring,
5808 struct sk_buff *skb, u32 tx_flags) 6532 struct sk_buff *skb, u32 tx_flags,
6533 __be16 protocol)
5809{ 6534{
5810 struct ixgbe_adv_tx_context_desc *context_desc; 6535 struct ixgbe_adv_tx_context_desc *context_desc;
5811 unsigned int i; 6536 unsigned int i;
@@ -5816,63 +6541,25 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
5816 (tx_flags & IXGBE_TX_FLAGS_VLAN)) { 6541 (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
5817 i = tx_ring->next_to_use; 6542 i = tx_ring->next_to_use;
5818 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 6543 tx_buffer_info = &tx_ring->tx_buffer_info[i];
5819 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i); 6544 context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
5820 6545
5821 if (tx_flags & IXGBE_TX_FLAGS_VLAN) 6546 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
5822 vlan_macip_lens |= 6547 vlan_macip_lens |=
5823 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); 6548 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
5824 vlan_macip_lens |= (skb_network_offset(skb) << 6549 vlan_macip_lens |= (skb_network_offset(skb) <<
5825 IXGBE_ADVTXD_MACLEN_SHIFT); 6550 IXGBE_ADVTXD_MACLEN_SHIFT);
5826 if (skb->ip_summed == CHECKSUM_PARTIAL) 6551 if (skb->ip_summed == CHECKSUM_PARTIAL)
5827 vlan_macip_lens |= (skb_transport_header(skb) - 6552 vlan_macip_lens |= (skb_transport_header(skb) -
5828 skb_network_header(skb)); 6553 skb_network_header(skb));
5829 6554
5830 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 6555 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
5831 context_desc->seqnum_seed = 0; 6556 context_desc->seqnum_seed = 0;
5832 6557
5833 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT | 6558 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
5834 IXGBE_ADVTXD_DTYP_CTXT); 6559 IXGBE_ADVTXD_DTYP_CTXT);
5835
5836 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5837 __be16 protocol;
5838
5839 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
5840 const struct vlan_ethhdr *vhdr =
5841 (const struct vlan_ethhdr *)skb->data;
5842
5843 protocol = vhdr->h_vlan_encapsulated_proto;
5844 } else {
5845 protocol = skb->protocol;
5846 }
5847 6560
5848 switch (protocol) { 6561 if (skb->ip_summed == CHECKSUM_PARTIAL)
5849 case cpu_to_be16(ETH_P_IP): 6562 type_tucmd_mlhl |= ixgbe_psum(adapter, skb, protocol);
5850 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
5851 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
5852 type_tucmd_mlhl |=
5853 IXGBE_ADVTXD_TUCMD_L4T_TCP;
5854 else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
5855 type_tucmd_mlhl |=
5856 IXGBE_ADVTXD_TUCMD_L4T_SCTP;
5857 break;
5858 case cpu_to_be16(ETH_P_IPV6):
5859 /* XXX what about other V6 headers?? */
5860 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
5861 type_tucmd_mlhl |=
5862 IXGBE_ADVTXD_TUCMD_L4T_TCP;
5863 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
5864 type_tucmd_mlhl |=
5865 IXGBE_ADVTXD_TUCMD_L4T_SCTP;
5866 break;
5867 default:
5868 if (unlikely(net_ratelimit())) {
5869 e_warn(probe, "partial checksum "
5870 "but proto=%x!\n",
5871 skb->protocol);
5872 }
5873 break;
5874 }
5875 }
5876 6563
5877 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); 6564 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
5878 /* use index zero for tx checksum offload */ 6565 /* use index zero for tx checksum offload */
@@ -5893,17 +6580,19 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
5893} 6580}
5894 6581
5895static int ixgbe_tx_map(struct ixgbe_adapter *adapter, 6582static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
5896 struct ixgbe_ring *tx_ring, 6583 struct ixgbe_ring *tx_ring,
5897 struct sk_buff *skb, u32 tx_flags, 6584 struct sk_buff *skb, u32 tx_flags,
5898 unsigned int first) 6585 unsigned int first, const u8 hdr_len)
5899{ 6586{
5900 struct pci_dev *pdev = adapter->pdev; 6587 struct device *dev = tx_ring->dev;
5901 struct ixgbe_tx_buffer *tx_buffer_info; 6588 struct ixgbe_tx_buffer *tx_buffer_info;
5902 unsigned int len; 6589 unsigned int len;
5903 unsigned int total = skb->len; 6590 unsigned int total = skb->len;
5904 unsigned int offset = 0, size, count = 0, i; 6591 unsigned int offset = 0, size, count = 0, i;
5905 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 6592 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
5906 unsigned int f; 6593 unsigned int f;
6594 unsigned int bytecount = skb->len;
6595 u16 gso_segs = 1;
5907 6596
5908 i = tx_ring->next_to_use; 6597 i = tx_ring->next_to_use;
5909 6598
@@ -5918,10 +6607,10 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
5918 6607
5919 tx_buffer_info->length = size; 6608 tx_buffer_info->length = size;
5920 tx_buffer_info->mapped_as_page = false; 6609 tx_buffer_info->mapped_as_page = false;
5921 tx_buffer_info->dma = dma_map_single(&pdev->dev, 6610 tx_buffer_info->dma = dma_map_single(dev,
5922 skb->data + offset, 6611 skb->data + offset,
5923 size, DMA_TO_DEVICE); 6612 size, DMA_TO_DEVICE);
5924 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma)) 6613 if (dma_mapping_error(dev, tx_buffer_info->dma))
5925 goto dma_error; 6614 goto dma_error;
5926 tx_buffer_info->time_stamp = jiffies; 6615 tx_buffer_info->time_stamp = jiffies;
5927 tx_buffer_info->next_to_watch = i; 6616 tx_buffer_info->next_to_watch = i;
@@ -5954,12 +6643,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
5954 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); 6643 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
5955 6644
5956 tx_buffer_info->length = size; 6645 tx_buffer_info->length = size;
5957 tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev, 6646 tx_buffer_info->dma = dma_map_page(dev,
5958 frag->page, 6647 frag->page,
5959 offset, size, 6648 offset, size,
5960 DMA_TO_DEVICE); 6649 DMA_TO_DEVICE);
5961 tx_buffer_info->mapped_as_page = true; 6650 tx_buffer_info->mapped_as_page = true;
5962 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma)) 6651 if (dma_mapping_error(dev, tx_buffer_info->dma))
5963 goto dma_error; 6652 goto dma_error;
5964 tx_buffer_info->time_stamp = jiffies; 6653 tx_buffer_info->time_stamp = jiffies;
5965 tx_buffer_info->next_to_watch = i; 6654 tx_buffer_info->next_to_watch = i;
@@ -5973,6 +6662,19 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
5973 break; 6662 break;
5974 } 6663 }
5975 6664
6665 if (tx_flags & IXGBE_TX_FLAGS_TSO)
6666 gso_segs = skb_shinfo(skb)->gso_segs;
6667#ifdef IXGBE_FCOE
6668 /* adjust for FCoE Sequence Offload */
6669 else if (tx_flags & IXGBE_TX_FLAGS_FSO)
6670 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
6671 skb_shinfo(skb)->gso_size);
6672#endif /* IXGBE_FCOE */
6673 bytecount += (gso_segs - 1) * hdr_len;
6674
6675 /* multiply data chunks by size of headers */
6676 tx_ring->tx_buffer_info[i].bytecount = bytecount;
6677 tx_ring->tx_buffer_info[i].gso_segs = gso_segs;
5976 tx_ring->tx_buffer_info[i].skb = skb; 6678 tx_ring->tx_buffer_info[i].skb = skb;
5977 tx_ring->tx_buffer_info[first].next_to_watch = i; 6679 tx_ring->tx_buffer_info[first].next_to_watch = i;
5978 6680
@@ -5990,19 +6692,18 @@ dma_error:
5990 6692
5991 /* clear timestamp and dma mappings for remaining portion of packet */ 6693 /* clear timestamp and dma mappings for remaining portion of packet */
5992 while (count--) { 6694 while (count--) {
5993 if (i==0) 6695 if (i == 0)
5994 i += tx_ring->count; 6696 i += tx_ring->count;
5995 i--; 6697 i--;
5996 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 6698 tx_buffer_info = &tx_ring->tx_buffer_info[i];
5997 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); 6699 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
5998 } 6700 }
5999 6701
6000 return 0; 6702 return 0;
6001} 6703}
6002 6704
6003static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, 6705static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring,
6004 struct ixgbe_ring *tx_ring, 6706 int tx_flags, int count, u32 paylen, u8 hdr_len)
6005 int tx_flags, int count, u32 paylen, u8 hdr_len)
6006{ 6707{
6007 union ixgbe_adv_tx_desc *tx_desc = NULL; 6708 union ixgbe_adv_tx_desc *tx_desc = NULL;
6008 struct ixgbe_tx_buffer *tx_buffer_info; 6709 struct ixgbe_tx_buffer *tx_buffer_info;
@@ -6021,17 +6722,17 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
6021 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; 6722 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
6022 6723
6023 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 6724 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
6024 IXGBE_ADVTXD_POPTS_SHIFT; 6725 IXGBE_ADVTXD_POPTS_SHIFT;
6025 6726
6026 /* use index 1 context for tso */ 6727 /* use index 1 context for tso */
6027 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); 6728 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
6028 if (tx_flags & IXGBE_TX_FLAGS_IPV4) 6729 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
6029 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 6730 olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
6030 IXGBE_ADVTXD_POPTS_SHIFT; 6731 IXGBE_ADVTXD_POPTS_SHIFT;
6031 6732
6032 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM) 6733 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
6033 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 6734 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
6034 IXGBE_ADVTXD_POPTS_SHIFT; 6735 IXGBE_ADVTXD_POPTS_SHIFT;
6035 6736
6036 if (tx_flags & IXGBE_TX_FLAGS_FCOE) { 6737 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
6037 olinfo_status |= IXGBE_ADVTXD_CC; 6738 olinfo_status |= IXGBE_ADVTXD_CC;
@@ -6045,10 +6746,10 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
6045 i = tx_ring->next_to_use; 6746 i = tx_ring->next_to_use;
6046 while (count--) { 6747 while (count--) {
6047 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 6748 tx_buffer_info = &tx_ring->tx_buffer_info[i];
6048 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); 6749 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
6049 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); 6750 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
6050 tx_desc->read.cmd_type_len = 6751 tx_desc->read.cmd_type_len =
6051 cpu_to_le32(cmd_type_len | tx_buffer_info->length); 6752 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
6052 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 6753 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
6053 i++; 6754 i++;
6054 if (i == tx_ring->count) 6755 if (i == tx_ring->count)
@@ -6066,60 +6767,100 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
6066 wmb(); 6767 wmb();
6067 6768
6068 tx_ring->next_to_use = i; 6769 tx_ring->next_to_use = i;
6069 writel(i, adapter->hw.hw_addr + tx_ring->tail); 6770 writel(i, tx_ring->tail);
6070} 6771}
6071 6772
6072static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, 6773static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb,
6073 int queue, u32 tx_flags) 6774 u32 tx_flags, __be16 protocol)
6074{ 6775{
6075 struct ixgbe_atr_input atr_input; 6776 struct ixgbe_q_vector *q_vector = ring->q_vector;
6777 union ixgbe_atr_hash_dword input = { .dword = 0 };
6778 union ixgbe_atr_hash_dword common = { .dword = 0 };
6779 union {
6780 unsigned char *network;
6781 struct iphdr *ipv4;
6782 struct ipv6hdr *ipv6;
6783 } hdr;
6076 struct tcphdr *th; 6784 struct tcphdr *th;
6077 struct iphdr *iph = ip_hdr(skb); 6785 __be16 vlan_id;
6078 struct ethhdr *eth = (struct ethhdr *)skb->data; 6786
6079 u16 vlan_id, src_port, dst_port, flex_bytes; 6787 /* if ring doesn't have a interrupt vector, cannot perform ATR */
6080 u32 src_ipv4_addr, dst_ipv4_addr; 6788 if (!q_vector)
6081 u8 l4type = 0;
6082
6083 /* Right now, we support IPv4 only */
6084 if (skb->protocol != htons(ETH_P_IP))
6085 return; 6789 return;
6086 /* check if we're UDP or TCP */ 6790
6087 if (iph->protocol == IPPROTO_TCP) { 6791 /* do nothing if sampling is disabled */
6088 th = tcp_hdr(skb); 6792 if (!ring->atr_sample_rate)
6089 src_port = th->source; 6793 return;
6090 dst_port = th->dest; 6794
6091 l4type |= IXGBE_ATR_L4TYPE_TCP; 6795 ring->atr_count++;
6092 /* l4type IPv4 type is 0, no need to assign */ 6796
6093 } else { 6797 /* snag network header to get L4 type and address */
6094 /* Unsupported L4 header, just bail here */ 6798 hdr.network = skb_network_header(skb);
6799
6800 /* Currently only IPv4/IPv6 with TCP is supported */
6801 if ((protocol != __constant_htons(ETH_P_IPV6) ||
6802 hdr.ipv6->nexthdr != IPPROTO_TCP) &&
6803 (protocol != __constant_htons(ETH_P_IP) ||
6804 hdr.ipv4->protocol != IPPROTO_TCP))
6805 return;
6806
6807 th = tcp_hdr(skb);
6808
6809 /* skip this packet since the socket is closing */
6810 if (th->fin)
6811 return;
6812
6813 /* sample on all syn packets or once every atr sample count */
6814 if (!th->syn && (ring->atr_count < ring->atr_sample_rate))
6095 return; 6815 return;
6096 }
6097 6816
6098 memset(&atr_input, 0, sizeof(struct ixgbe_atr_input)); 6817 /* reset sample count */
6818 ring->atr_count = 0;
6099 6819
6100 vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >> 6820 vlan_id = htons(tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
6101 IXGBE_TX_FLAGS_VLAN_SHIFT;
6102 src_ipv4_addr = iph->saddr;
6103 dst_ipv4_addr = iph->daddr;
6104 flex_bytes = eth->h_proto;
6105 6821
6106 ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id); 6822 /*
6107 ixgbe_atr_set_src_port_82599(&atr_input, dst_port); 6823 * src and dst are inverted, think how the receiver sees them
6108 ixgbe_atr_set_dst_port_82599(&atr_input, src_port); 6824 *
6109 ixgbe_atr_set_flex_byte_82599(&atr_input, flex_bytes); 6825 * The input is broken into two sections, a non-compressed section
6110 ixgbe_atr_set_l4type_82599(&atr_input, l4type); 6826 * containing vm_pool, vlan_id, and flow_type. The rest of the data
6111 /* src and dst are inverted, think how the receiver sees them */ 6827 * is XORed together and stored in the compressed dword.
6112 ixgbe_atr_set_src_ipv4_82599(&atr_input, dst_ipv4_addr); 6828 */
6113 ixgbe_atr_set_dst_ipv4_82599(&atr_input, src_ipv4_addr); 6829 input.formatted.vlan_id = vlan_id;
6830
6831 /*
6832 * since src port and flex bytes occupy the same word XOR them together
6833 * and write the value to source port portion of compressed dword
6834 */
6835 if (vlan_id)
6836 common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q);
6837 else
6838 common.port.src ^= th->dest ^ protocol;
6839 common.port.dst ^= th->source;
6840
6841 if (protocol == __constant_htons(ETH_P_IP)) {
6842 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
6843 common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
6844 } else {
6845 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
6846 common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
6847 hdr.ipv6->saddr.s6_addr32[1] ^
6848 hdr.ipv6->saddr.s6_addr32[2] ^
6849 hdr.ipv6->saddr.s6_addr32[3] ^
6850 hdr.ipv6->daddr.s6_addr32[0] ^
6851 hdr.ipv6->daddr.s6_addr32[1] ^
6852 hdr.ipv6->daddr.s6_addr32[2] ^
6853 hdr.ipv6->daddr.s6_addr32[3];
6854 }
6114 6855
6115 /* This assumes the Rx queue and Tx queue are bound to the same CPU */ 6856 /* This assumes the Rx queue and Tx queue are bound to the same CPU */
6116 ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue); 6857 ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
6858 input, common, ring->queue_index);
6117} 6859}
6118 6860
6119static int __ixgbe_maybe_stop_tx(struct net_device *netdev, 6861static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
6120 struct ixgbe_ring *tx_ring, int size)
6121{ 6862{
6122 netif_stop_subqueue(netdev, tx_ring->queue_index); 6863 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
6123 /* Herbert's original patch had: 6864 /* Herbert's original patch had:
6124 * smp_mb__after_netif_stop_queue(); 6865 * smp_mb__after_netif_stop_queue();
6125 * but since that doesn't exist yet, just open code it. */ 6866 * but since that doesn't exist yet, just open code it. */
@@ -6131,37 +6872,33 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
6131 return -EBUSY; 6872 return -EBUSY;
6132 6873
6133 /* A reprieve! - use start_queue because it doesn't call schedule */ 6874 /* A reprieve! - use start_queue because it doesn't call schedule */
6134 netif_start_subqueue(netdev, tx_ring->queue_index); 6875 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
6135 ++tx_ring->restart_queue; 6876 ++tx_ring->tx_stats.restart_queue;
6136 return 0; 6877 return 0;
6137} 6878}
6138 6879
6139static int ixgbe_maybe_stop_tx(struct net_device *netdev, 6880static int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
6140 struct ixgbe_ring *tx_ring, int size)
6141{ 6881{
6142 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) 6882 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
6143 return 0; 6883 return 0;
6144 return __ixgbe_maybe_stop_tx(netdev, tx_ring, size); 6884 return __ixgbe_maybe_stop_tx(tx_ring, size);
6145} 6885}
6146 6886
6147static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) 6887static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
6148{ 6888{
6149 struct ixgbe_adapter *adapter = netdev_priv(dev); 6889 struct ixgbe_adapter *adapter = netdev_priv(dev);
6150 int txq = smp_processor_id(); 6890 int txq = smp_processor_id();
6151
6152#ifdef IXGBE_FCOE 6891#ifdef IXGBE_FCOE
6153 if ((skb->protocol == htons(ETH_P_FCOE)) || 6892 __be16 protocol;
6154 (skb->protocol == htons(ETH_P_FIP))) { 6893
6155 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 6894 protocol = vlan_get_protocol(skb);
6156 txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); 6895
6157 txq += adapter->ring_feature[RING_F_FCOE].mask; 6896 if (((protocol == htons(ETH_P_FCOE)) ||
6158 return txq; 6897 (protocol == htons(ETH_P_FIP))) &&
6159#ifdef CONFIG_IXGBE_DCB 6898 (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
6160 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 6899 txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
6161 txq = adapter->fcoe.up; 6900 txq += adapter->ring_feature[RING_F_FCOE].mask;
6162 return txq; 6901 return txq;
6163#endif
6164 }
6165 } 6902 }
6166#endif 6903#endif
6167 6904
@@ -6171,66 +6908,44 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
6171 return txq; 6908 return txq;
6172 } 6909 }
6173 6910
6174 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6175 if (skb->priority == TC_PRIO_CONTROL)
6176 txq = adapter->ring_feature[RING_F_DCB].indices-1;
6177 else
6178 txq = (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK)
6179 >> 13;
6180 return txq;
6181 }
6182
6183 return skb_tx_hash(dev, skb); 6911 return skb_tx_hash(dev, skb);
6184} 6912}
6185 6913
6186static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, 6914netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6187 struct net_device *netdev) 6915 struct ixgbe_adapter *adapter,
6916 struct ixgbe_ring *tx_ring)
6188{ 6917{
6189 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6190 struct ixgbe_ring *tx_ring;
6191 struct netdev_queue *txq;
6192 unsigned int first; 6918 unsigned int first;
6193 unsigned int tx_flags = 0; 6919 unsigned int tx_flags = 0;
6194 u8 hdr_len = 0; 6920 u8 hdr_len = 0;
6195 int tso; 6921 int tso;
6196 int count = 0; 6922 int count = 0;
6197 unsigned int f; 6923 unsigned int f;
6924 __be16 protocol;
6198 6925
6199 if (adapter->vlgrp && vlan_tx_tag_present(skb)) { 6926 protocol = vlan_get_protocol(skb);
6927
6928 if (vlan_tx_tag_present(skb)) {
6200 tx_flags |= vlan_tx_tag_get(skb); 6929 tx_flags |= vlan_tx_tag_get(skb);
6201 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 6930 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6202 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK; 6931 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
6203 tx_flags |= ((skb->queue_mapping & 0x7) << 13); 6932 tx_flags |= tx_ring->dcb_tc << 13;
6204 } 6933 }
6205 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 6934 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
6206 tx_flags |= IXGBE_TX_FLAGS_VLAN; 6935 tx_flags |= IXGBE_TX_FLAGS_VLAN;
6207 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED && 6936 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED &&
6208 skb->priority != TC_PRIO_CONTROL) { 6937 skb->priority != TC_PRIO_CONTROL) {
6209 tx_flags |= ((skb->queue_mapping & 0x7) << 13); 6938 tx_flags |= tx_ring->dcb_tc << 13;
6210 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 6939 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
6211 tx_flags |= IXGBE_TX_FLAGS_VLAN; 6940 tx_flags |= IXGBE_TX_FLAGS_VLAN;
6212 } 6941 }
6213 6942
6214 tx_ring = adapter->tx_ring[skb->queue_mapping];
6215
6216#ifdef IXGBE_FCOE 6943#ifdef IXGBE_FCOE
6217 /* for FCoE with DCB, we force the priority to what 6944 /* for FCoE with DCB, we force the priority to what
6218 * was specified by the switch */ 6945 * was specified by the switch */
6219 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED && 6946 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED &&
6220 (skb->protocol == htons(ETH_P_FCOE) || 6947 (protocol == htons(ETH_P_FCOE)))
6221 skb->protocol == htons(ETH_P_FIP))) { 6948 tx_flags |= IXGBE_TX_FLAGS_FCOE;
6222#ifdef CONFIG_IXGBE_DCB
6223 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6224 tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
6225 << IXGBE_TX_FLAGS_VLAN_SHIFT);
6226 tx_flags |= ((adapter->fcoe.up << 13)
6227 << IXGBE_TX_FLAGS_VLAN_SHIFT);
6228 }
6229#endif
6230 /* flag for FCoE offloads */
6231 if (skb->protocol == htons(ETH_P_FCOE))
6232 tx_flags |= IXGBE_TX_FLAGS_FCOE;
6233 }
6234#endif 6949#endif
6235 6950
6236 /* four things can cause us to need a context descriptor */ 6951 /* four things can cause us to need a context descriptor */
@@ -6244,8 +6959,8 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
6244 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 6959 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
6245 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 6960 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
6246 6961
6247 if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) { 6962 if (ixgbe_maybe_stop_tx(tx_ring, count)) {
6248 adapter->tx_busy++; 6963 tx_ring->tx_stats.tx_busy++;
6249 return NETDEV_TX_BUSY; 6964 return NETDEV_TX_BUSY;
6250 } 6965 }
6251 6966
@@ -6262,9 +6977,10 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
6262 tx_flags |= IXGBE_TX_FLAGS_FSO; 6977 tx_flags |= IXGBE_TX_FLAGS_FSO;
6263#endif /* IXGBE_FCOE */ 6978#endif /* IXGBE_FCOE */
6264 } else { 6979 } else {
6265 if (skb->protocol == htons(ETH_P_IP)) 6980 if (protocol == htons(ETH_P_IP))
6266 tx_flags |= IXGBE_TX_FLAGS_IPV4; 6981 tx_flags |= IXGBE_TX_FLAGS_IPV4;
6267 tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len); 6982 tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len,
6983 protocol);
6268 if (tso < 0) { 6984 if (tso < 0) {
6269 dev_kfree_skb_any(skb); 6985 dev_kfree_skb_any(skb);
6270 return NETDEV_TX_OK; 6986 return NETDEV_TX_OK;
@@ -6272,30 +6988,19 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
6272 6988
6273 if (tso) 6989 if (tso)
6274 tx_flags |= IXGBE_TX_FLAGS_TSO; 6990 tx_flags |= IXGBE_TX_FLAGS_TSO;
6275 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) && 6991 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags,
6992 protocol) &&
6276 (skb->ip_summed == CHECKSUM_PARTIAL)) 6993 (skb->ip_summed == CHECKSUM_PARTIAL))
6277 tx_flags |= IXGBE_TX_FLAGS_CSUM; 6994 tx_flags |= IXGBE_TX_FLAGS_CSUM;
6278 } 6995 }
6279 6996
6280 count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first); 6997 count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len);
6281 if (count) { 6998 if (count) {
6282 /* add the ATR filter if ATR is on */ 6999 /* add the ATR filter if ATR is on */
6283 if (tx_ring->atr_sample_rate) { 7000 if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
6284 ++tx_ring->atr_count; 7001 ixgbe_atr(tx_ring, skb, tx_flags, protocol);
6285 if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) && 7002 ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len);
6286 test_bit(__IXGBE_FDIR_INIT_DONE, 7003 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
6287 &tx_ring->reinit_state)) {
6288 ixgbe_atr(adapter, skb, tx_ring->queue_index,
6289 tx_flags);
6290 tx_ring->atr_count = 0;
6291 }
6292 }
6293 txq = netdev_get_tx_queue(netdev, tx_ring->queue_index);
6294 txq->tx_bytes += skb->len;
6295 txq->tx_packets++;
6296 ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len,
6297 hdr_len);
6298 ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
6299 7004
6300 } else { 7005 } else {
6301 dev_kfree_skb_any(skb); 7006 dev_kfree_skb_any(skb);
@@ -6306,6 +7011,15 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
6306 return NETDEV_TX_OK; 7011 return NETDEV_TX_OK;
6307} 7012}
6308 7013
7014static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
7015{
7016 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7017 struct ixgbe_ring *tx_ring;
7018
7019 tx_ring = adapter->tx_ring[skb->queue_mapping];
7020 return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
7021}
7022
6309/** 7023/**
6310 * ixgbe_set_mac - Change the Ethernet Address of the NIC 7024 * ixgbe_set_mac - Change the Ethernet Address of the NIC
6311 * @netdev: network interface device structure 7025 * @netdev: network interface device structure
@@ -6436,8 +7150,57 @@ static void ixgbe_netpoll(struct net_device *netdev)
6436} 7150}
6437#endif 7151#endif
6438 7152
7153static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
7154 struct rtnl_link_stats64 *stats)
7155{
7156 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7157 int i;
7158
7159 rcu_read_lock();
7160 for (i = 0; i < adapter->num_rx_queues; i++) {
7161 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
7162 u64 bytes, packets;
7163 unsigned int start;
7164
7165 if (ring) {
7166 do {
7167 start = u64_stats_fetch_begin_bh(&ring->syncp);
7168 packets = ring->stats.packets;
7169 bytes = ring->stats.bytes;
7170 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
7171 stats->rx_packets += packets;
7172 stats->rx_bytes += bytes;
7173 }
7174 }
7175
7176 for (i = 0; i < adapter->num_tx_queues; i++) {
7177 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]);
7178 u64 bytes, packets;
7179 unsigned int start;
7180
7181 if (ring) {
7182 do {
7183 start = u64_stats_fetch_begin_bh(&ring->syncp);
7184 packets = ring->stats.packets;
7185 bytes = ring->stats.bytes;
7186 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
7187 stats->tx_packets += packets;
7188 stats->tx_bytes += bytes;
7189 }
7190 }
7191 rcu_read_unlock();
7192 /* following stats updated by ixgbe_watchdog_task() */
7193 stats->multicast = netdev->stats.multicast;
7194 stats->rx_errors = netdev->stats.rx_errors;
7195 stats->rx_length_errors = netdev->stats.rx_length_errors;
7196 stats->rx_crc_errors = netdev->stats.rx_crc_errors;
7197 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
7198 return stats;
7199}
7200
7201
6439static const struct net_device_ops ixgbe_netdev_ops = { 7202static const struct net_device_ops ixgbe_netdev_ops = {
6440 .ndo_open = ixgbe_open, 7203 .ndo_open = ixgbe_open,
6441 .ndo_stop = ixgbe_close, 7204 .ndo_stop = ixgbe_close,
6442 .ndo_start_xmit = ixgbe_xmit_frame, 7205 .ndo_start_xmit = ixgbe_xmit_frame,
6443 .ndo_select_queue = ixgbe_select_queue, 7206 .ndo_select_queue = ixgbe_select_queue,
@@ -6447,7 +7210,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
6447 .ndo_set_mac_address = ixgbe_set_mac, 7210 .ndo_set_mac_address = ixgbe_set_mac,
6448 .ndo_change_mtu = ixgbe_change_mtu, 7211 .ndo_change_mtu = ixgbe_change_mtu,
6449 .ndo_tx_timeout = ixgbe_tx_timeout, 7212 .ndo_tx_timeout = ixgbe_tx_timeout,
6450 .ndo_vlan_rx_register = ixgbe_vlan_rx_register,
6451 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid, 7213 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
6452 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid, 7214 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
6453 .ndo_do_ioctl = ixgbe_ioctl, 7215 .ndo_do_ioctl = ixgbe_ioctl,
@@ -6455,11 +7217,16 @@ static const struct net_device_ops ixgbe_netdev_ops = {
6455 .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, 7217 .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
6456 .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw, 7218 .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw,
6457 .ndo_get_vf_config = ixgbe_ndo_get_vf_config, 7219 .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
7220 .ndo_get_stats64 = ixgbe_get_stats64,
7221#ifdef CONFIG_IXGBE_DCB
7222 .ndo_setup_tc = ixgbe_setup_tc,
7223#endif
6458#ifdef CONFIG_NET_POLL_CONTROLLER 7224#ifdef CONFIG_NET_POLL_CONTROLLER
6459 .ndo_poll_controller = ixgbe_netpoll, 7225 .ndo_poll_controller = ixgbe_netpoll,
6460#endif 7226#endif
6461#ifdef IXGBE_FCOE 7227#ifdef IXGBE_FCOE
6462 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, 7228 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
7229 .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
6463 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put, 7230 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
6464 .ndo_fcoe_enable = ixgbe_fcoe_enable, 7231 .ndo_fcoe_enable = ixgbe_fcoe_enable,
6465 .ndo_fcoe_disable = ixgbe_fcoe_disable, 7232 .ndo_fcoe_disable = ixgbe_fcoe_disable,
@@ -6473,8 +7240,10 @@ static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
6473#ifdef CONFIG_PCI_IOV 7240#ifdef CONFIG_PCI_IOV
6474 struct ixgbe_hw *hw = &adapter->hw; 7241 struct ixgbe_hw *hw = &adapter->hw;
6475 int err; 7242 int err;
7243 int num_vf_macvlans, i;
7244 struct vf_macvlans *mv_list;
6476 7245
6477 if (hw->mac.type != ixgbe_mac_82599EB || !max_vfs) 7246 if (hw->mac.type == ixgbe_mac_82598EB || !max_vfs)
6478 return; 7247 return;
6479 7248
6480 /* The 82599 supports up to 64 VFs per physical function 7249 /* The 82599 supports up to 64 VFs per physical function
@@ -6489,6 +7258,26 @@ static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
6489 e_err(probe, "Failed to enable PCI sriov: %d\n", err); 7258 e_err(probe, "Failed to enable PCI sriov: %d\n", err);
6490 goto err_novfs; 7259 goto err_novfs;
6491 } 7260 }
7261
7262 num_vf_macvlans = hw->mac.num_rar_entries -
7263 (IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs);
7264
7265 adapter->mv_list = mv_list = kcalloc(num_vf_macvlans,
7266 sizeof(struct vf_macvlans),
7267 GFP_KERNEL);
7268 if (mv_list) {
7269 /* Initialize list of VF macvlans */
7270 INIT_LIST_HEAD(&adapter->vf_mvs.l);
7271 for (i = 0; i < num_vf_macvlans; i++) {
7272 mv_list->vf = -1;
7273 mv_list->free = true;
7274 mv_list->rar_entry = hw->mac.num_rar_entries -
7275 (i + adapter->num_vfs + 1);
7276 list_add(&mv_list->l, &adapter->vf_mvs.l);
7277 mv_list++;
7278 }
7279 }
7280
6492 /* If call to enable VFs succeeded then allocate memory 7281 /* If call to enable VFs succeeded then allocate memory
6493 * for per VF control structures. 7282 * for per VF control structures.
6494 */ 7283 */
@@ -6532,7 +7321,7 @@ err_novfs:
6532 * and a hardware reset occur. 7321 * and a hardware reset occur.
6533 **/ 7322 **/
6534static int __devinit ixgbe_probe(struct pci_dev *pdev, 7323static int __devinit ixgbe_probe(struct pci_dev *pdev,
6535 const struct pci_device_id *ent) 7324 const struct pci_device_id *ent)
6536{ 7325{
6537 struct net_device *netdev; 7326 struct net_device *netdev;
6538 struct ixgbe_adapter *adapter = NULL; 7327 struct ixgbe_adapter *adapter = NULL;
@@ -6540,11 +7329,12 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6540 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; 7329 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
6541 static int cards_found; 7330 static int cards_found;
6542 int i, err, pci_using_dac; 7331 int i, err, pci_using_dac;
7332 u8 part_str[IXGBE_PBANUM_LENGTH];
6543 unsigned int indices = num_possible_cpus(); 7333 unsigned int indices = num_possible_cpus();
6544#ifdef IXGBE_FCOE 7334#ifdef IXGBE_FCOE
6545 u16 device_caps; 7335 u16 device_caps;
6546#endif 7336#endif
6547 u32 part_num, eec; 7337 u32 eec;
6548 7338
6549 /* Catch broken hardware that put the wrong VF device ID in 7339 /* Catch broken hardware that put the wrong VF device ID in
6550 * the PCIe SR-IOV capability. 7340 * the PCIe SR-IOV capability.
@@ -6577,7 +7367,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6577 } 7367 }
6578 7368
6579 err = pci_request_selected_regions(pdev, pci_select_bars(pdev, 7369 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
6580 IORESOURCE_MEM), ixgbe_driver_name); 7370 IORESOURCE_MEM), ixgbe_driver_name);
6581 if (err) { 7371 if (err) {
6582 dev_err(&pdev->dev, 7372 dev_err(&pdev->dev,
6583 "pci_request_selected_regions failed 0x%x\n", err); 7373 "pci_request_selected_regions failed 0x%x\n", err);
@@ -6594,8 +7384,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6594 else 7384 else
6595 indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES); 7385 indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
6596 7386
7387#if defined(CONFIG_DCB)
6597 indices = max_t(unsigned int, indices, IXGBE_MAX_DCB_INDICES); 7388 indices = max_t(unsigned int, indices, IXGBE_MAX_DCB_INDICES);
6598#ifdef IXGBE_FCOE 7389#elif defined(IXGBE_FCOE)
6599 indices += min_t(unsigned int, num_possible_cpus(), 7390 indices += min_t(unsigned int, num_possible_cpus(),
6600 IXGBE_MAX_FCOE_INDICES); 7391 IXGBE_MAX_FCOE_INDICES);
6601#endif 7392#endif
@@ -6607,8 +7398,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6607 7398
6608 SET_NETDEV_DEV(netdev, &pdev->dev); 7399 SET_NETDEV_DEV(netdev, &pdev->dev);
6609 7400
6610 pci_set_drvdata(pdev, netdev);
6611 adapter = netdev_priv(netdev); 7401 adapter = netdev_priv(netdev);
7402 pci_set_drvdata(pdev, adapter);
6612 7403
6613 adapter->netdev = netdev; 7404 adapter->netdev = netdev;
6614 adapter->pdev = pdev; 7405 adapter->pdev = pdev;
@@ -6617,7 +7408,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6617 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; 7408 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
6618 7409
6619 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 7410 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
6620 pci_resource_len(pdev, 0)); 7411 pci_resource_len(pdev, 0));
6621 if (!hw->hw_addr) { 7412 if (!hw->hw_addr) {
6622 err = -EIO; 7413 err = -EIO;
6623 goto err_ioremap; 7414 goto err_ioremap;
@@ -6631,7 +7422,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6631 netdev->netdev_ops = &ixgbe_netdev_ops; 7422 netdev->netdev_ops = &ixgbe_netdev_ops;
6632 ixgbe_set_ethtool_ops(netdev); 7423 ixgbe_set_ethtool_ops(netdev);
6633 netdev->watchdog_timeo = 5 * HZ; 7424 netdev->watchdog_timeo = 5 * HZ;
6634 strcpy(netdev->name, pci_name(pdev)); 7425 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
6635 7426
6636 adapter->bd_number = cards_found; 7427 adapter->bd_number = cards_found;
6637 7428
@@ -6657,22 +7448,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6657 hw->phy.mdio.mdio_read = ixgbe_mdio_read; 7448 hw->phy.mdio.mdio_read = ixgbe_mdio_read;
6658 hw->phy.mdio.mdio_write = ixgbe_mdio_write; 7449 hw->phy.mdio.mdio_write = ixgbe_mdio_write;
6659 7450
6660 /* set up this timer and work struct before calling get_invariants
6661 * which might start the timer
6662 */
6663 init_timer(&adapter->sfp_timer);
6664 adapter->sfp_timer.function = &ixgbe_sfp_timer;
6665 adapter->sfp_timer.data = (unsigned long) adapter;
6666
6667 INIT_WORK(&adapter->sfp_task, ixgbe_sfp_task);
6668
6669 /* multispeed fiber has its own tasklet, called from GPI SDP1 context */
6670 INIT_WORK(&adapter->multispeed_fiber_task, ixgbe_multispeed_fiber_task);
6671
6672 /* a new SFP+ module arrival, called from GPI SDP2 context */
6673 INIT_WORK(&adapter->sfp_config_module_task,
6674 ixgbe_sfp_config_module_task);
6675
6676 ii->get_invariants(hw); 7451 ii->get_invariants(hw);
6677 7452
6678 /* setup the private structure */ 7453 /* setup the private structure */
@@ -6681,8 +7456,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6681 goto err_sw_init; 7456 goto err_sw_init;
6682 7457
6683 /* Make it possible the adapter to be woken up via WOL */ 7458 /* Make it possible the adapter to be woken up via WOL */
6684 if (adapter->hw.mac.type == ixgbe_mac_82599EB) 7459 switch (adapter->hw.mac.type) {
7460 case ixgbe_mac_82599EB:
7461 case ixgbe_mac_X540:
6685 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); 7462 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
7463 break;
7464 default:
7465 break;
7466 }
6686 7467
6687 /* 7468 /*
6688 * If there is a fan on this device and it has failed log the 7469 * If there is a fan on this device and it has failed log the
@@ -6700,17 +7481,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6700 hw->phy.reset_if_overtemp = false; 7481 hw->phy.reset_if_overtemp = false;
6701 if (err == IXGBE_ERR_SFP_NOT_PRESENT && 7482 if (err == IXGBE_ERR_SFP_NOT_PRESENT &&
6702 hw->mac.type == ixgbe_mac_82598EB) { 7483 hw->mac.type == ixgbe_mac_82598EB) {
6703 /*
6704 * Start a kernel thread to watch for a module to arrive.
6705 * Only do this for 82598, since 82599 will generate
6706 * interrupts on module arrival.
6707 */
6708 set_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
6709 mod_timer(&adapter->sfp_timer,
6710 round_jiffies(jiffies + (2 * HZ)));
6711 err = 0; 7484 err = 0;
6712 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 7485 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
6713 e_dev_err("failed to initialize because an unsupported SFP+ " 7486 e_dev_err("failed to load because an unsupported SFP+ "
6714 "module type was detected.\n"); 7487 "module type was detected.\n");
6715 e_dev_err("Reload the driver after installing a supported " 7488 e_dev_err("Reload the driver after installing a supported "
6716 "module.\n"); 7489 "module.\n");
@@ -6723,18 +7496,25 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6723 ixgbe_probe_vf(adapter, ii); 7496 ixgbe_probe_vf(adapter, ii);
6724 7497
6725 netdev->features = NETIF_F_SG | 7498 netdev->features = NETIF_F_SG |
6726 NETIF_F_IP_CSUM | 7499 NETIF_F_IP_CSUM |
6727 NETIF_F_HW_VLAN_TX | 7500 NETIF_F_HW_VLAN_TX |
6728 NETIF_F_HW_VLAN_RX | 7501 NETIF_F_HW_VLAN_RX |
6729 NETIF_F_HW_VLAN_FILTER; 7502 NETIF_F_HW_VLAN_FILTER;
6730 7503
6731 netdev->features |= NETIF_F_IPV6_CSUM; 7504 netdev->features |= NETIF_F_IPV6_CSUM;
6732 netdev->features |= NETIF_F_TSO; 7505 netdev->features |= NETIF_F_TSO;
6733 netdev->features |= NETIF_F_TSO6; 7506 netdev->features |= NETIF_F_TSO6;
6734 netdev->features |= NETIF_F_GRO; 7507 netdev->features |= NETIF_F_GRO;
7508 netdev->features |= NETIF_F_RXHASH;
6735 7509
6736 if (adapter->hw.mac.type == ixgbe_mac_82599EB) 7510 switch (adapter->hw.mac.type) {
7511 case ixgbe_mac_82599EB:
7512 case ixgbe_mac_X540:
6737 netdev->features |= NETIF_F_SCTP_CSUM; 7513 netdev->features |= NETIF_F_SCTP_CSUM;
7514 break;
7515 default:
7516 break;
7517 }
6738 7518
6739 netdev->vlan_features |= NETIF_F_TSO; 7519 netdev->vlan_features |= NETIF_F_TSO;
6740 netdev->vlan_features |= NETIF_F_TSO6; 7520 netdev->vlan_features |= NETIF_F_TSO6;
@@ -6745,8 +7525,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6745 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) 7525 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
6746 adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED | 7526 adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED |
6747 IXGBE_FLAG_DCB_ENABLED); 7527 IXGBE_FLAG_DCB_ENABLED);
6748 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
6749 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
6750 7528
6751#ifdef CONFIG_IXGBE_DCB 7529#ifdef CONFIG_IXGBE_DCB
6752 netdev->dcbnl_ops = &dcbnl_ops; 7530 netdev->dcbnl_ops = &dcbnl_ops;
@@ -6766,8 +7544,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6766 netdev->vlan_features |= NETIF_F_FCOE_MTU; 7544 netdev->vlan_features |= NETIF_F_FCOE_MTU;
6767 } 7545 }
6768#endif /* IXGBE_FCOE */ 7546#endif /* IXGBE_FCOE */
6769 if (pci_using_dac) 7547 if (pci_using_dac) {
6770 netdev->features |= NETIF_F_HIGHDMA; 7548 netdev->features |= NETIF_F_HIGHDMA;
7549 netdev->vlan_features |= NETIF_F_HIGHDMA;
7550 }
6771 7551
6772 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) 7552 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
6773 netdev->features |= NETIF_F_LRO; 7553 netdev->features |= NETIF_F_LRO;
@@ -6788,25 +7568,42 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6788 goto err_eeprom; 7568 goto err_eeprom;
6789 } 7569 }
6790 7570
6791 /* power down the optics */ 7571 /* power down the optics for multispeed fiber and 82599 SFP+ fiber */
6792 if (hw->phy.multispeed_fiber) 7572 if (hw->mac.ops.disable_tx_laser &&
7573 ((hw->phy.multispeed_fiber) ||
7574 ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
7575 (hw->mac.type == ixgbe_mac_82599EB))))
6793 hw->mac.ops.disable_tx_laser(hw); 7576 hw->mac.ops.disable_tx_laser(hw);
6794 7577
6795 init_timer(&adapter->watchdog_timer); 7578 setup_timer(&adapter->service_timer, &ixgbe_service_timer,
6796 adapter->watchdog_timer.function = &ixgbe_watchdog; 7579 (unsigned long) adapter);
6797 adapter->watchdog_timer.data = (unsigned long)adapter;
6798 7580
6799 INIT_WORK(&adapter->reset_task, ixgbe_reset_task); 7581 INIT_WORK(&adapter->service_task, ixgbe_service_task);
6800 INIT_WORK(&adapter->watchdog_task, ixgbe_watchdog_task); 7582 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
6801 7583
6802 err = ixgbe_init_interrupt_scheme(adapter); 7584 err = ixgbe_init_interrupt_scheme(adapter);
6803 if (err) 7585 if (err)
6804 goto err_sw_init; 7586 goto err_sw_init;
6805 7587
7588 if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
7589 netdev->features &= ~NETIF_F_RXHASH;
7590
6806 switch (pdev->device) { 7591 switch (pdev->device) {
7592 case IXGBE_DEV_ID_82599_SFP:
7593 /* Only this subdevice supports WOL */
7594 if (pdev->subsystem_device == IXGBE_SUBDEV_ID_82599_SFP)
7595 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
7596 IXGBE_WUFC_MC | IXGBE_WUFC_BC);
7597 break;
7598 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
7599 /* All except this subdevice support WOL */
7600 if (pdev->subsystem_device != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
7601 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
7602 IXGBE_WUFC_MC | IXGBE_WUFC_BC);
7603 break;
6807 case IXGBE_DEV_ID_82599_KX4: 7604 case IXGBE_DEV_ID_82599_KX4:
6808 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX | 7605 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
6809 IXGBE_WUFC_MC | IXGBE_WUFC_BC); 7606 IXGBE_WUFC_MC | IXGBE_WUFC_BC);
6810 break; 7607 break;
6811 default: 7608 default:
6812 adapter->wol = 0; 7609 adapter->wol = 0;
@@ -6819,23 +7616,25 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6819 7616
6820 /* print bus type/speed/width info */ 7617 /* print bus type/speed/width info */
6821 e_dev_info("(PCI Express:%s:%s) %pM\n", 7618 e_dev_info("(PCI Express:%s:%s) %pM\n",
6822 ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s": 7619 (hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0GT/s" :
6823 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"), 7620 hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5GT/s" :
6824 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" : 7621 "Unknown"),
6825 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" : 7622 (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" :
6826 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" : 7623 hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" :
6827 "Unknown"), 7624 hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" :
6828 netdev->dev_addr); 7625 "Unknown"),
6829 ixgbe_read_pba_num_generic(hw, &part_num); 7626 netdev->dev_addr);
7627
7628 err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH);
7629 if (err)
7630 strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH);
6830 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) 7631 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
6831 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, " 7632 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
6832 "PBA No: %06x-%03x\n",
6833 hw->mac.type, hw->phy.type, hw->phy.sfp_type, 7633 hw->mac.type, hw->phy.type, hw->phy.sfp_type,
6834 (part_num >> 8), (part_num & 0xff)); 7634 part_str);
6835 else 7635 else
6836 e_dev_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n", 7636 e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
6837 hw->mac.type, hw->phy.type, 7637 hw->mac.type, hw->phy.type, part_str);
6838 (part_num >> 8), (part_num & 0xff));
6839 7638
6840 if (hw->bus.width <= ixgbe_bus_width_pcie_x4) { 7639 if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
6841 e_dev_warn("PCI-Express bandwidth available for this card is " 7640 e_dev_warn("PCI-Express bandwidth available for this card is "
@@ -6867,12 +7666,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6867 /* carrier off reporting is important to ethtool even BEFORE open */ 7666 /* carrier off reporting is important to ethtool even BEFORE open */
6868 netif_carrier_off(netdev); 7667 netif_carrier_off(netdev);
6869 7668
6870 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
6871 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
6872 INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task);
6873
6874 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
6875 INIT_WORK(&adapter->check_overtemp_task, ixgbe_check_overtemp_task);
6876#ifdef CONFIG_IXGBE_DCA 7669#ifdef CONFIG_IXGBE_DCA
6877 if (dca_add_requester(&pdev->dev) == 0) { 7670 if (dca_add_requester(&pdev->dev) == 0) {
6878 adapter->flags |= IXGBE_FLAG_DCA_ENABLED; 7671 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
@@ -6899,17 +7692,13 @@ err_sw_init:
6899err_eeprom: 7692err_eeprom:
6900 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) 7693 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
6901 ixgbe_disable_sriov(adapter); 7694 ixgbe_disable_sriov(adapter);
6902 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); 7695 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
6903 del_timer_sync(&adapter->sfp_timer);
6904 cancel_work_sync(&adapter->sfp_task);
6905 cancel_work_sync(&adapter->multispeed_fiber_task);
6906 cancel_work_sync(&adapter->sfp_config_module_task);
6907 iounmap(hw->hw_addr); 7696 iounmap(hw->hw_addr);
6908err_ioremap: 7697err_ioremap:
6909 free_netdev(netdev); 7698 free_netdev(netdev);
6910err_alloc_etherdev: 7699err_alloc_etherdev:
6911 pci_release_selected_regions(pdev, pci_select_bars(pdev, 7700 pci_release_selected_regions(pdev,
6912 IORESOURCE_MEM)); 7701 pci_select_bars(pdev, IORESOURCE_MEM));
6913err_pci_reg: 7702err_pci_reg:
6914err_dma: 7703err_dma:
6915 pci_disable_device(pdev); 7704 pci_disable_device(pdev);
@@ -6927,25 +7716,11 @@ err_dma:
6927 **/ 7716 **/
6928static void __devexit ixgbe_remove(struct pci_dev *pdev) 7717static void __devexit ixgbe_remove(struct pci_dev *pdev)
6929{ 7718{
6930 struct net_device *netdev = pci_get_drvdata(pdev); 7719 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
6931 struct ixgbe_adapter *adapter = netdev_priv(netdev); 7720 struct net_device *netdev = adapter->netdev;
6932 7721
6933 set_bit(__IXGBE_DOWN, &adapter->state); 7722 set_bit(__IXGBE_DOWN, &adapter->state);
6934 /* clear the module not found bit to make sure the worker won't 7723 cancel_work_sync(&adapter->service_task);
6935 * reschedule
6936 */
6937 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
6938 del_timer_sync(&adapter->watchdog_timer);
6939
6940 del_timer_sync(&adapter->sfp_timer);
6941 cancel_work_sync(&adapter->watchdog_task);
6942 cancel_work_sync(&adapter->sfp_task);
6943 cancel_work_sync(&adapter->multispeed_fiber_task);
6944 cancel_work_sync(&adapter->sfp_config_module_task);
6945 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
6946 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
6947 cancel_work_sync(&adapter->fdir_reinit_task);
6948 flush_scheduled_work();
6949 7724
6950#ifdef CONFIG_IXGBE_DCA 7725#ifdef CONFIG_IXGBE_DCA
6951 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { 7726 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
@@ -6976,7 +7751,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
6976 7751
6977 iounmap(adapter->hw.hw_addr); 7752 iounmap(adapter->hw.hw_addr);
6978 pci_release_selected_regions(pdev, pci_select_bars(pdev, 7753 pci_release_selected_regions(pdev, pci_select_bars(pdev,
6979 IORESOURCE_MEM)); 7754 IORESOURCE_MEM));
6980 7755
6981 e_dev_info("complete\n"); 7756 e_dev_info("complete\n");
6982 7757
@@ -6996,10 +7771,10 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
6996 * this device has been detected. 7771 * this device has been detected.
6997 */ 7772 */
6998static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, 7773static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
6999 pci_channel_state_t state) 7774 pci_channel_state_t state)
7000{ 7775{
7001 struct net_device *netdev = pci_get_drvdata(pdev); 7776 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7002 struct ixgbe_adapter *adapter = netdev_priv(netdev); 7777 struct net_device *netdev = adapter->netdev;
7003 7778
7004 netif_device_detach(netdev); 7779 netif_device_detach(netdev);
7005 7780
@@ -7022,8 +7797,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
7022 */ 7797 */
7023static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) 7798static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
7024{ 7799{
7025 struct net_device *netdev = pci_get_drvdata(pdev); 7800 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7026 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7027 pci_ers_result_t result; 7801 pci_ers_result_t result;
7028 int err; 7802 int err;
7029 7803
@@ -7061,8 +7835,8 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
7061 */ 7835 */
7062static void ixgbe_io_resume(struct pci_dev *pdev) 7836static void ixgbe_io_resume(struct pci_dev *pdev)
7063{ 7837{
7064 struct net_device *netdev = pci_get_drvdata(pdev); 7838 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7065 struct ixgbe_adapter *adapter = netdev_priv(netdev); 7839 struct net_device *netdev = adapter->netdev;
7066 7840
7067 if (netif_running(netdev)) { 7841 if (netif_running(netdev)) {
7068 if (ixgbe_up(adapter)) { 7842 if (ixgbe_up(adapter)) {
@@ -7102,8 +7876,7 @@ static struct pci_driver ixgbe_driver = {
7102static int __init ixgbe_init_module(void) 7876static int __init ixgbe_init_module(void)
7103{ 7877{
7104 int ret; 7878 int ret;
7105 pr_info("%s - version %s\n", ixgbe_driver_string, 7879 pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
7106 ixgbe_driver_version);
7107 pr_info("%s\n", ixgbe_copyright); 7880 pr_info("%s\n", ixgbe_copyright);
7108 7881
7109#ifdef CONFIG_IXGBE_DCA 7882#ifdef CONFIG_IXGBE_DCA
@@ -7128,32 +7901,23 @@ static void __exit ixgbe_exit_module(void)
7128 dca_unregister_notify(&dca_notifier); 7901 dca_unregister_notify(&dca_notifier);
7129#endif 7902#endif
7130 pci_unregister_driver(&ixgbe_driver); 7903 pci_unregister_driver(&ixgbe_driver);
7904 rcu_barrier(); /* Wait for completion of call_rcu()'s */
7131} 7905}
7132 7906
7133#ifdef CONFIG_IXGBE_DCA 7907#ifdef CONFIG_IXGBE_DCA
7134static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, 7908static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
7135 void *p) 7909 void *p)
7136{ 7910{
7137 int ret_val; 7911 int ret_val;
7138 7912
7139 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event, 7913 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
7140 __ixgbe_notify_dca); 7914 __ixgbe_notify_dca);
7141 7915
7142 return ret_val ? NOTIFY_BAD : NOTIFY_DONE; 7916 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
7143} 7917}
7144 7918
7145#endif /* CONFIG_IXGBE_DCA */ 7919#endif /* CONFIG_IXGBE_DCA */
7146 7920
7147/**
7148 * ixgbe_get_hw_dev return device
7149 * used by hardware layer to print debugging information
7150 **/
7151struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw)
7152{
7153 struct ixgbe_adapter *adapter = hw->back;
7154 return adapter->netdev;
7155}
7156
7157module_exit(ixgbe_exit_module); 7921module_exit(ixgbe_exit_module);
7158 7922
7159/* ixgbe_main.c */ 7923/* ixgbe_main.c */
diff --git a/drivers/net/ixgbe/ixgbe_mbx.c b/drivers/net/ixgbe/ixgbe_mbx.c
index d75f9148eb1f..1ff0eefcfd0a 100644
--- a/drivers/net/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ixgbe/ixgbe_mbx.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -154,9 +154,6 @@ static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
154 udelay(mbx->usec_delay); 154 udelay(mbx->usec_delay);
155 } 155 }
156 156
157 /* if we failed, all future posted messages fail until reset */
158 if (!countdown)
159 mbx->timeout = 0;
160out: 157out:
161 return countdown ? 0 : IXGBE_ERR_MBX; 158 return countdown ? 0 : IXGBE_ERR_MBX;
162} 159}
@@ -183,9 +180,6 @@ static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
183 udelay(mbx->usec_delay); 180 udelay(mbx->usec_delay);
184 } 181 }
185 182
186 /* if we failed, all future posted messages fail until reset */
187 if (!countdown)
188 mbx->timeout = 0;
189out: 183out:
190 return countdown ? 0 : IXGBE_ERR_MBX; 184 return countdown ? 0 : IXGBE_ERR_MBX;
191} 185}
@@ -200,7 +194,8 @@ out:
200 * returns SUCCESS if it successfully received a message notification and 194 * returns SUCCESS if it successfully received a message notification and
201 * copied it into the receive buffer. 195 * copied it into the receive buffer.
202 **/ 196 **/
203s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) 197static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
198 u16 mbx_id)
204{ 199{
205 struct ixgbe_mbx_info *mbx = &hw->mbx; 200 struct ixgbe_mbx_info *mbx = &hw->mbx;
206 s32 ret_val = IXGBE_ERR_MBX; 201 s32 ret_val = IXGBE_ERR_MBX;
@@ -227,7 +222,7 @@ out:
227 * returns SUCCESS if it successfully copied message into the buffer and 222 * returns SUCCESS if it successfully copied message into the buffer and
228 * received an ack to that message within delay * timeout period 223 * received an ack to that message within delay * timeout period
229 **/ 224 **/
230s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, 225static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
231 u16 mbx_id) 226 u16 mbx_id)
232{ 227{
233 struct ixgbe_mbx_info *mbx = &hw->mbx; 228 struct ixgbe_mbx_info *mbx = &hw->mbx;
@@ -247,20 +242,6 @@ out:
247 return ret_val; 242 return ret_val;
248} 243}
249 244
250/**
251 * ixgbe_init_mbx_ops_generic - Initialize MB function pointers
252 * @hw: pointer to the HW structure
253 *
254 * Setup the mailbox read and write message function pointers
255 **/
256void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw)
257{
258 struct ixgbe_mbx_info *mbx = &hw->mbx;
259
260 mbx->ops.read_posted = ixgbe_read_posted_mbx;
261 mbx->ops.write_posted = ixgbe_write_posted_mbx;
262}
263
264static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index) 245static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
265{ 246{
266 u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index)); 247 u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
@@ -332,8 +313,16 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
332 u32 vflre = 0; 313 u32 vflre = 0;
333 s32 ret_val = IXGBE_ERR_MBX; 314 s32 ret_val = IXGBE_ERR_MBX;
334 315
335 if (hw->mac.type == ixgbe_mac_82599EB) 316 switch (hw->mac.type) {
317 case ixgbe_mac_82599EB:
336 vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset)); 318 vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
319 break;
320 case ixgbe_mac_X540:
321 vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
322 break;
323 default:
324 break;
325 }
337 326
338 if (vflre & (1 << vf_shift)) { 327 if (vflre & (1 << vf_shift)) {
339 ret_val = 0; 328 ret_val = 0;
@@ -442,6 +431,7 @@ out_no_read:
442 return ret_val; 431 return ret_val;
443} 432}
444 433
434#ifdef CONFIG_PCI_IOV
445/** 435/**
446 * ixgbe_init_mbx_params_pf - set initial values for pf mailbox 436 * ixgbe_init_mbx_params_pf - set initial values for pf mailbox
447 * @hw: pointer to the HW structure 437 * @hw: pointer to the HW structure
@@ -452,22 +442,24 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
452{ 442{
453 struct ixgbe_mbx_info *mbx = &hw->mbx; 443 struct ixgbe_mbx_info *mbx = &hw->mbx;
454 444
455 if (hw->mac.type != ixgbe_mac_82599EB) 445 if (hw->mac.type != ixgbe_mac_82599EB &&
446 hw->mac.type != ixgbe_mac_X540)
456 return; 447 return;
457 448
458 mbx->timeout = 0; 449 mbx->timeout = 0;
459 mbx->usec_delay = 0; 450 mbx->usec_delay = 0;
460 451
461 mbx->size = IXGBE_VFMAILBOX_SIZE;
462
463 mbx->stats.msgs_tx = 0; 452 mbx->stats.msgs_tx = 0;
464 mbx->stats.msgs_rx = 0; 453 mbx->stats.msgs_rx = 0;
465 mbx->stats.reqs = 0; 454 mbx->stats.reqs = 0;
466 mbx->stats.acks = 0; 455 mbx->stats.acks = 0;
467 mbx->stats.rsts = 0; 456 mbx->stats.rsts = 0;
457
458 mbx->size = IXGBE_VFMAILBOX_SIZE;
468} 459}
460#endif /* CONFIG_PCI_IOV */
469 461
470struct ixgbe_mbx_operations mbx_ops_82599 = { 462struct ixgbe_mbx_operations mbx_ops_generic = {
471 .read = ixgbe_read_mbx_pf, 463 .read = ixgbe_read_mbx_pf,
472 .write = ixgbe_write_mbx_pf, 464 .write = ixgbe_write_mbx_pf,
473 .read_posted = ixgbe_read_posted_mbx, 465 .read_posted = ixgbe_read_posted_mbx,
diff --git a/drivers/net/ixgbe/ixgbe_mbx.h b/drivers/net/ixgbe/ixgbe_mbx.h
index be7ab3309ab7..b239bdac38da 100644
--- a/drivers/net/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ixgbe/ixgbe_mbx.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -36,9 +36,6 @@
36#define IXGBE_VFMAILBOX 0x002FC 36#define IXGBE_VFMAILBOX 0x002FC
37#define IXGBE_VFMBMEM 0x00200 37#define IXGBE_VFMBMEM 0x00200
38 38
39#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * x))
40#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * vfn))
41
42#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */ 39#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */
43#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ 40#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
44#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ 41#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
@@ -70,6 +67,7 @@
70#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ 67#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
71#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ 68#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
72#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ 69#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
70#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
73 71
74/* length of permanent address message returned from PF */ 72/* length of permanent address message returned from PF */
75#define IXGBE_VF_PERMADDR_MSG_LEN 4 73#define IXGBE_VF_PERMADDR_MSG_LEN 4
@@ -83,14 +81,13 @@
83 81
84s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16); 82s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
85s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16); 83s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
86s32 ixgbe_read_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
87s32 ixgbe_write_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
88s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16); 84s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
89s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16); 85s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
90s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16); 86s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
91void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw); 87#ifdef CONFIG_PCI_IOV
92void ixgbe_init_mbx_params_pf(struct ixgbe_hw *); 88void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
89#endif /* CONFIG_PCI_IOV */
93 90
94extern struct ixgbe_mbx_operations mbx_ops_82599; 91extern struct ixgbe_mbx_operations mbx_ops_generic;
95 92
96#endif /* _IXGBE_MBX_H_ */ 93#endif /* _IXGBE_MBX_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 6c0d42e33f21..735f686c3b36 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -57,6 +57,7 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
57{ 57{
58 s32 status = IXGBE_ERR_PHY_ADDR_INVALID; 58 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
59 u32 phy_addr; 59 u32 phy_addr;
60 u16 ext_ability = 0;
60 61
61 if (hw->phy.type == ixgbe_phy_unknown) { 62 if (hw->phy.type == ixgbe_phy_unknown) {
62 for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { 63 for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
@@ -65,12 +66,29 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
65 ixgbe_get_phy_id(hw); 66 ixgbe_get_phy_id(hw);
66 hw->phy.type = 67 hw->phy.type =
67 ixgbe_get_phy_type_from_id(hw->phy.id); 68 ixgbe_get_phy_type_from_id(hw->phy.id);
69
70 if (hw->phy.type == ixgbe_phy_unknown) {
71 hw->phy.ops.read_reg(hw,
72 MDIO_PMA_EXTABLE,
73 MDIO_MMD_PMAPMD,
74 &ext_ability);
75 if (ext_ability &
76 (MDIO_PMA_EXTABLE_10GBT |
77 MDIO_PMA_EXTABLE_1000BT))
78 hw->phy.type =
79 ixgbe_phy_cu_unknown;
80 else
81 hw->phy.type =
82 ixgbe_phy_generic;
83 }
84
68 status = 0; 85 status = 0;
69 break; 86 break;
70 } 87 }
71 } 88 }
72 /* clear value if nothing found */ 89 /* clear value if nothing found */
73 hw->phy.mdio.prtad = 0; 90 if (status != 0)
91 hw->phy.mdio.prtad = 0;
74 } else { 92 } else {
75 status = 0; 93 status = 0;
76 } 94 }
@@ -115,6 +133,9 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
115 case TN1010_PHY_ID: 133 case TN1010_PHY_ID:
116 phy_type = ixgbe_phy_tn; 134 phy_type = ixgbe_phy_tn;
117 break; 135 break;
136 case X540_PHY_ID:
137 phy_type = ixgbe_phy_aq;
138 break;
118 case QT2022_PHY_ID: 139 case QT2022_PHY_ID:
119 phy_type = ixgbe_phy_qt; 140 phy_type = ixgbe_phy_qt;
120 break; 141 break;
@@ -135,17 +156,51 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
135 **/ 156 **/
136s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) 157s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
137{ 158{
159 u32 i;
160 u16 ctrl = 0;
161 s32 status = 0;
162
163 if (hw->phy.type == ixgbe_phy_unknown)
164 status = ixgbe_identify_phy_generic(hw);
165
166 if (status != 0 || hw->phy.type == ixgbe_phy_none)
167 goto out;
168
138 /* Don't reset PHY if it's shut down due to overtemp. */ 169 /* Don't reset PHY if it's shut down due to overtemp. */
139 if (!hw->phy.reset_if_overtemp && 170 if (!hw->phy.reset_if_overtemp &&
140 (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw))) 171 (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
141 return 0; 172 goto out;
142 173
143 /* 174 /*
144 * Perform soft PHY reset to the PHY_XS. 175 * Perform soft PHY reset to the PHY_XS.
145 * This will cause a soft reset to the PHY 176 * This will cause a soft reset to the PHY
146 */ 177 */
147 return hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, 178 hw->phy.ops.write_reg(hw, MDIO_CTRL1,
148 MDIO_CTRL1_RESET); 179 MDIO_MMD_PHYXS,
180 MDIO_CTRL1_RESET);
181
182 /*
183 * Poll for reset bit to self-clear indicating reset is complete.
184 * Some PHYs could take up to 3 seconds to complete and need about
185 * 1.7 usec delay after the reset is complete.
186 */
187 for (i = 0; i < 30; i++) {
188 msleep(100);
189 hw->phy.ops.read_reg(hw, MDIO_CTRL1,
190 MDIO_MMD_PHYXS, &ctrl);
191 if (!(ctrl & MDIO_CTRL1_RESET)) {
192 udelay(2);
193 break;
194 }
195 }
196
197 if (ctrl & MDIO_CTRL1_RESET) {
198 status = IXGBE_ERR_RESET_FAILED;
199 hw_dbg(hw, "PHY reset polling failed to complete.\n");
200 }
201
202out:
203 return status;
149} 204}
150 205
151/** 206/**
@@ -168,7 +223,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
168 else 223 else
169 gssr = IXGBE_GSSR_PHY0_SM; 224 gssr = IXGBE_GSSR_PHY0_SM;
170 225
171 if (ixgbe_acquire_swfw_sync(hw, gssr) != 0) 226 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
172 status = IXGBE_ERR_SWFW_SYNC; 227 status = IXGBE_ERR_SWFW_SYNC;
173 228
174 if (status == 0) { 229 if (status == 0) {
@@ -240,7 +295,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
240 } 295 }
241 } 296 }
242 297
243 ixgbe_release_swfw_sync(hw, gssr); 298 hw->mac.ops.release_swfw_sync(hw, gssr);
244 } 299 }
245 300
246 return status; 301 return status;
@@ -266,7 +321,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
266 else 321 else
267 gssr = IXGBE_GSSR_PHY0_SM; 322 gssr = IXGBE_GSSR_PHY0_SM;
268 323
269 if (ixgbe_acquire_swfw_sync(hw, gssr) != 0) 324 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
270 status = IXGBE_ERR_SWFW_SYNC; 325 status = IXGBE_ERR_SWFW_SYNC;
271 326
272 if (status == 0) { 327 if (status == 0) {
@@ -333,7 +388,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
333 } 388 }
334 } 389 }
335 390
336 ixgbe_release_swfw_sync(hw, gssr); 391 hw->mac.ops.release_swfw_sync(hw, gssr);
337 } 392 }
338 393
339 return status; 394 return status;
@@ -347,49 +402,90 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
347 **/ 402 **/
348s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) 403s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
349{ 404{
350 s32 status = IXGBE_NOT_IMPLEMENTED; 405 s32 status = 0;
351 u32 time_out; 406 u32 time_out;
352 u32 max_time_out = 10; 407 u32 max_time_out = 10;
353 u16 autoneg_reg; 408 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
409 bool autoneg = false;
410 ixgbe_link_speed speed;
354 411
355 /* 412 ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
356 * Set advertisement settings in PHY based on autoneg_advertised 413
357 * settings. If autoneg_advertised = 0, then advertise default values 414 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
358 * tnx devices cannot be "forced" to a autoneg 10G and fail. But can 415 /* Set or unset auto-negotiation 10G advertisement */
359 * for a 1G. 416 hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL,
360 */ 417 MDIO_MMD_AN,
361 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, &autoneg_reg); 418 &autoneg_reg);
362 419
363 if (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_1GB_FULL)
364 autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G; 420 autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G;
365 else 421 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
366 autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G; 422 autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G;
423
424 hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL,
425 MDIO_MMD_AN,
426 autoneg_reg);
427 }
428
429 if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
430 /* Set or unset auto-negotiation 1G advertisement */
431 hw->phy.ops.read_reg(hw,
432 IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
433 MDIO_MMD_AN,
434 &autoneg_reg);
435
436 autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE;
437 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
438 autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE;
439
440 hw->phy.ops.write_reg(hw,
441 IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
442 MDIO_MMD_AN,
443 autoneg_reg);
444 }
367 445
368 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, autoneg_reg); 446 if (speed & IXGBE_LINK_SPEED_100_FULL) {
447 /* Set or unset auto-negotiation 100M advertisement */
448 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
449 MDIO_MMD_AN,
450 &autoneg_reg);
451
452 autoneg_reg &= ~(ADVERTISE_100FULL |
453 ADVERTISE_100HALF);
454 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
455 autoneg_reg |= ADVERTISE_100FULL;
456
457 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
458 MDIO_MMD_AN,
459 autoneg_reg);
460 }
369 461
370 /* Restart PHY autonegotiation and wait for completion */ 462 /* Restart PHY autonegotiation and wait for completion */
371 hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_AN, &autoneg_reg); 463 hw->phy.ops.read_reg(hw, MDIO_CTRL1,
464 MDIO_MMD_AN, &autoneg_reg);
372 465
373 autoneg_reg |= MDIO_AN_CTRL1_RESTART; 466 autoneg_reg |= MDIO_AN_CTRL1_RESTART;
374 467
375 hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_AN, autoneg_reg); 468 hw->phy.ops.write_reg(hw, MDIO_CTRL1,
469 MDIO_MMD_AN, autoneg_reg);
376 470
377 /* Wait for autonegotiation to finish */ 471 /* Wait for autonegotiation to finish */
378 for (time_out = 0; time_out < max_time_out; time_out++) { 472 for (time_out = 0; time_out < max_time_out; time_out++) {
379 udelay(10); 473 udelay(10);
380 /* Restart PHY autonegotiation and wait for completion */ 474 /* Restart PHY autonegotiation and wait for completion */
381 status = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, 475 status = hw->phy.ops.read_reg(hw, MDIO_STAT1,
382 &autoneg_reg); 476 MDIO_MMD_AN,
477 &autoneg_reg);
383 478
384 autoneg_reg &= MDIO_AN_STAT1_COMPLETE; 479 autoneg_reg &= MDIO_AN_STAT1_COMPLETE;
385 if (autoneg_reg == MDIO_AN_STAT1_COMPLETE) { 480 if (autoneg_reg == MDIO_AN_STAT1_COMPLETE) {
386 status = 0;
387 break; 481 break;
388 } 482 }
389 } 483 }
390 484
391 if (time_out == max_time_out) 485 if (time_out == max_time_out) {
392 status = IXGBE_ERR_LINK_SETUP; 486 status = IXGBE_ERR_LINK_SETUP;
487 hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out");
488 }
393 489
394 return status; 490 return status;
395} 491}
@@ -418,6 +514,9 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
418 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 514 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
419 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; 515 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
420 516
517 if (speed & IXGBE_LINK_SPEED_100_FULL)
518 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
519
421 /* Setup link based on the new speed settings */ 520 /* Setup link based on the new speed settings */
422 hw->phy.ops.setup_link(hw); 521 hw->phy.ops.setup_link(hw);
423 522
@@ -425,6 +524,214 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
425} 524}
426 525
427/** 526/**
527 * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities
528 * @hw: pointer to hardware structure
529 * @speed: pointer to link speed
530 * @autoneg: boolean auto-negotiation value
531 *
532 * Determines the link capabilities by reading the AUTOC register.
533 */
534s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
535 ixgbe_link_speed *speed,
536 bool *autoneg)
537{
538 s32 status = IXGBE_ERR_LINK_SETUP;
539 u16 speed_ability;
540
541 *speed = 0;
542 *autoneg = true;
543
544 status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
545 &speed_ability);
546
547 if (status == 0) {
548 if (speed_ability & MDIO_SPEED_10G)
549 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
550 if (speed_ability & MDIO_PMA_SPEED_1000)
551 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
552 if (speed_ability & MDIO_PMA_SPEED_100)
553 *speed |= IXGBE_LINK_SPEED_100_FULL;
554 }
555
556 return status;
557}
558
559/**
560 * ixgbe_check_phy_link_tnx - Determine link and speed status
561 * @hw: pointer to hardware structure
562 *
563 * Reads the VS1 register to determine if link is up and the current speed for
564 * the PHY.
565 **/
566s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
567 bool *link_up)
568{
569 s32 status = 0;
570 u32 time_out;
571 u32 max_time_out = 10;
572 u16 phy_link = 0;
573 u16 phy_speed = 0;
574 u16 phy_data = 0;
575
576 /* Initialize speed and link to default case */
577 *link_up = false;
578 *speed = IXGBE_LINK_SPEED_10GB_FULL;
579
580 /*
581 * Check current speed and link status of the PHY register.
582 * This is a vendor specific register and may have to
583 * be changed for other copper PHYs.
584 */
585 for (time_out = 0; time_out < max_time_out; time_out++) {
586 udelay(10);
587 status = hw->phy.ops.read_reg(hw,
588 MDIO_STAT1,
589 MDIO_MMD_VEND1,
590 &phy_data);
591 phy_link = phy_data &
592 IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
593 phy_speed = phy_data &
594 IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
595 if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
596 *link_up = true;
597 if (phy_speed ==
598 IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS)
599 *speed = IXGBE_LINK_SPEED_1GB_FULL;
600 break;
601 }
602 }
603
604 return status;
605}
606
607/**
608 * ixgbe_setup_phy_link_tnx - Set and restart autoneg
609 * @hw: pointer to hardware structure
610 *
611 * Restart autonegotiation and PHY and waits for completion.
612 **/
613s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
614{
615 s32 status = 0;
616 u32 time_out;
617 u32 max_time_out = 10;
618 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
619 bool autoneg = false;
620 ixgbe_link_speed speed;
621
622 ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
623
624 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
625 /* Set or unset auto-negotiation 10G advertisement */
626 hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL,
627 MDIO_MMD_AN,
628 &autoneg_reg);
629
630 autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G;
631 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
632 autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G;
633
634 hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL,
635 MDIO_MMD_AN,
636 autoneg_reg);
637 }
638
639 if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
640 /* Set or unset auto-negotiation 1G advertisement */
641 hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
642 MDIO_MMD_AN,
643 &autoneg_reg);
644
645 autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
646 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
647 autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
648
649 hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
650 MDIO_MMD_AN,
651 autoneg_reg);
652 }
653
654 if (speed & IXGBE_LINK_SPEED_100_FULL) {
655 /* Set or unset auto-negotiation 100M advertisement */
656 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
657 MDIO_MMD_AN,
658 &autoneg_reg);
659
660 autoneg_reg &= ~(ADVERTISE_100FULL |
661 ADVERTISE_100HALF);
662 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
663 autoneg_reg |= ADVERTISE_100FULL;
664
665 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
666 MDIO_MMD_AN,
667 autoneg_reg);
668 }
669
670 /* Restart PHY autonegotiation and wait for completion */
671 hw->phy.ops.read_reg(hw, MDIO_CTRL1,
672 MDIO_MMD_AN, &autoneg_reg);
673
674 autoneg_reg |= MDIO_AN_CTRL1_RESTART;
675
676 hw->phy.ops.write_reg(hw, MDIO_CTRL1,
677 MDIO_MMD_AN, autoneg_reg);
678
679 /* Wait for autonegotiation to finish */
680 for (time_out = 0; time_out < max_time_out; time_out++) {
681 udelay(10);
682 /* Restart PHY autonegotiation and wait for completion */
683 status = hw->phy.ops.read_reg(hw, MDIO_STAT1,
684 MDIO_MMD_AN,
685 &autoneg_reg);
686
687 autoneg_reg &= MDIO_AN_STAT1_COMPLETE;
688 if (autoneg_reg == MDIO_AN_STAT1_COMPLETE)
689 break;
690 }
691
692 if (time_out == max_time_out) {
693 status = IXGBE_ERR_LINK_SETUP;
694 hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out");
695 }
696
697 return status;
698}
699
700/**
701 * ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version
702 * @hw: pointer to hardware structure
703 * @firmware_version: pointer to the PHY Firmware Version
704 **/
705s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
706 u16 *firmware_version)
707{
708 s32 status = 0;
709
710 status = hw->phy.ops.read_reg(hw, TNX_FW_REV,
711 MDIO_MMD_VEND1,
712 firmware_version);
713
714 return status;
715}
716
717/**
718 * ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version
719 * @hw: pointer to hardware structure
720 * @firmware_version: pointer to the PHY Firmware Version
721 **/
722s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
723 u16 *firmware_version)
724{
725 s32 status = 0;
726
727 status = hw->phy.ops.read_reg(hw, AQ_FW_REV,
728 MDIO_MMD_VEND1,
729 firmware_version);
730
731 return status;
732}
733
734/**
428 * ixgbe_reset_phy_nl - Performs a PHY reset 735 * ixgbe_reset_phy_nl - Performs a PHY reset
429 * @hw: pointer to hardware structure 736 * @hw: pointer to hardware structure
430 **/ 737 **/
@@ -448,7 +755,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
448 &phy_data); 755 &phy_data);
449 if ((phy_data & MDIO_CTRL1_RESET) == 0) 756 if ((phy_data & MDIO_CTRL1_RESET) == 0)
450 break; 757 break;
451 msleep(10); 758 usleep_range(10000, 20000);
452 } 759 }
453 760
454 if ((phy_data & MDIO_CTRL1_RESET) != 0) { 761 if ((phy_data & MDIO_CTRL1_RESET) != 0) {
@@ -477,7 +784,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
477 case IXGBE_DELAY_NL: 784 case IXGBE_DELAY_NL:
478 data_offset++; 785 data_offset++;
479 hw_dbg(hw, "DELAY: %d MS\n", edata); 786 hw_dbg(hw, "DELAY: %d MS\n", edata);
480 msleep(edata); 787 usleep_range(edata * 1000, edata * 2000);
481 break; 788 break;
482 case IXGBE_DATA_NL: 789 case IXGBE_DATA_NL:
483 hw_dbg(hw, "DATA:\n"); 790 hw_dbg(hw, "DATA:\n");
@@ -520,11 +827,10 @@ out:
520} 827}
521 828
522/** 829/**
523 * ixgbe_identify_sfp_module_generic - Identifies SFP module and assigns 830 * ixgbe_identify_sfp_module_generic - Identifies SFP modules
524 * the PHY type.
525 * @hw: pointer to hardware structure 831 * @hw: pointer to hardware structure
526 * 832 *
527 * Searches for and indentifies the SFP module. Assings appropriate PHY type. 833 * Searches for and identifies the SFP module and assigns appropriate PHY type.
528 **/ 834 **/
529s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) 835s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
530{ 836{
@@ -545,41 +851,62 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
545 goto out; 851 goto out;
546 } 852 }
547 853
548 status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER, 854 status = hw->phy.ops.read_i2c_eeprom(hw,
855 IXGBE_SFF_IDENTIFIER,
549 &identifier); 856 &identifier);
550 857
551 if (status == IXGBE_ERR_SFP_NOT_PRESENT || status == IXGBE_ERR_I2C) { 858 if (status == IXGBE_ERR_SWFW_SYNC ||
552 status = IXGBE_ERR_SFP_NOT_PRESENT; 859 status == IXGBE_ERR_I2C ||
553 hw->phy.sfp_type = ixgbe_sfp_type_not_present; 860 status == IXGBE_ERR_SFP_NOT_PRESENT)
554 if (hw->phy.type != ixgbe_phy_nl) { 861 goto err_read_i2c_eeprom;
555 hw->phy.id = 0;
556 hw->phy.type = ixgbe_phy_unknown;
557 }
558 goto out;
559 }
560 862
561 if (identifier == IXGBE_SFF_IDENTIFIER_SFP) { 863 /* LAN ID is needed for sfp_type determination */
562 hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_1GBE_COMP_CODES, 864 hw->mac.ops.set_lan_id(hw);
563 &comp_codes_1g); 865
564 hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_10GBE_COMP_CODES, 866 if (identifier != IXGBE_SFF_IDENTIFIER_SFP) {
565 &comp_codes_10g); 867 hw->phy.type = ixgbe_phy_sfp_unsupported;
566 hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_CABLE_TECHNOLOGY, 868 status = IXGBE_ERR_SFP_NOT_SUPPORTED;
567 &cable_tech); 869 } else {
568 870 status = hw->phy.ops.read_i2c_eeprom(hw,
569 /* ID Module 871 IXGBE_SFF_1GBE_COMP_CODES,
570 * ========= 872 &comp_codes_1g);
571 * 0 SFP_DA_CU 873
572 * 1 SFP_SR 874 if (status == IXGBE_ERR_SWFW_SYNC ||
573 * 2 SFP_LR 875 status == IXGBE_ERR_I2C ||
574 * 3 SFP_DA_CORE0 - 82599-specific 876 status == IXGBE_ERR_SFP_NOT_PRESENT)
575 * 4 SFP_DA_CORE1 - 82599-specific 877 goto err_read_i2c_eeprom;
576 * 5 SFP_SR/LR_CORE0 - 82599-specific 878
577 * 6 SFP_SR/LR_CORE1 - 82599-specific 879 status = hw->phy.ops.read_i2c_eeprom(hw,
578 * 7 SFP_act_lmt_DA_CORE0 - 82599-specific 880 IXGBE_SFF_10GBE_COMP_CODES,
579 * 8 SFP_act_lmt_DA_CORE1 - 82599-specific 881 &comp_codes_10g);
580 * 9 SFP_1g_cu_CORE0 - 82599-specific 882
581 * 10 SFP_1g_cu_CORE1 - 82599-specific 883 if (status == IXGBE_ERR_SWFW_SYNC ||
582 */ 884 status == IXGBE_ERR_I2C ||
885 status == IXGBE_ERR_SFP_NOT_PRESENT)
886 goto err_read_i2c_eeprom;
887 status = hw->phy.ops.read_i2c_eeprom(hw,
888 IXGBE_SFF_CABLE_TECHNOLOGY,
889 &cable_tech);
890
891 if (status == IXGBE_ERR_SWFW_SYNC ||
892 status == IXGBE_ERR_I2C ||
893 status == IXGBE_ERR_SFP_NOT_PRESENT)
894 goto err_read_i2c_eeprom;
895
896 /* ID Module
897 * =========
898 * 0 SFP_DA_CU
899 * 1 SFP_SR
900 * 2 SFP_LR
901 * 3 SFP_DA_CORE0 - 82599-specific
902 * 4 SFP_DA_CORE1 - 82599-specific
903 * 5 SFP_SR/LR_CORE0 - 82599-specific
904 * 6 SFP_SR/LR_CORE1 - 82599-specific
905 * 7 SFP_act_lmt_DA_CORE0 - 82599-specific
906 * 8 SFP_act_lmt_DA_CORE1 - 82599-specific
907 * 9 SFP_1g_cu_CORE0 - 82599-specific
908 * 10 SFP_1g_cu_CORE1 - 82599-specific
909 */
583 if (hw->mac.type == ixgbe_mac_82598EB) { 910 if (hw->mac.type == ixgbe_mac_82598EB) {
584 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) 911 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
585 hw->phy.sfp_type = ixgbe_sfp_type_da_cu; 912 hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
@@ -611,31 +938,27 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
611 ixgbe_sfp_type_da_act_lmt_core1; 938 ixgbe_sfp_type_da_act_lmt_core1;
612 } else { 939 } else {
613 hw->phy.sfp_type = 940 hw->phy.sfp_type =
614 ixgbe_sfp_type_unknown; 941 ixgbe_sfp_type_unknown;
615 } 942 }
616 } else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) 943 } else if (comp_codes_10g &
617 if (hw->bus.lan_id == 0) 944 (IXGBE_SFF_10GBASESR_CAPABLE |
618 hw->phy.sfp_type = 945 IXGBE_SFF_10GBASELR_CAPABLE)) {
619 ixgbe_sfp_type_srlr_core0;
620 else
621 hw->phy.sfp_type =
622 ixgbe_sfp_type_srlr_core1;
623 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
624 if (hw->bus.lan_id == 0) 946 if (hw->bus.lan_id == 0)
625 hw->phy.sfp_type = 947 hw->phy.sfp_type =
626 ixgbe_sfp_type_srlr_core0; 948 ixgbe_sfp_type_srlr_core0;
627 else 949 else
628 hw->phy.sfp_type = 950 hw->phy.sfp_type =
629 ixgbe_sfp_type_srlr_core1; 951 ixgbe_sfp_type_srlr_core1;
630 else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) 952 } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
631 if (hw->bus.lan_id == 0) 953 if (hw->bus.lan_id == 0)
632 hw->phy.sfp_type = 954 hw->phy.sfp_type =
633 ixgbe_sfp_type_1g_cu_core0; 955 ixgbe_sfp_type_1g_cu_core0;
634 else 956 else
635 hw->phy.sfp_type = 957 hw->phy.sfp_type =
636 ixgbe_sfp_type_1g_cu_core1; 958 ixgbe_sfp_type_1g_cu_core1;
637 else 959 } else {
638 hw->phy.sfp_type = ixgbe_sfp_type_unknown; 960 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
961 }
639 } 962 }
640 963
641 if (hw->phy.sfp_type != stored_sfp_type) 964 if (hw->phy.sfp_type != stored_sfp_type)
@@ -652,16 +975,33 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
652 /* Determine PHY vendor */ 975 /* Determine PHY vendor */
653 if (hw->phy.type != ixgbe_phy_nl) { 976 if (hw->phy.type != ixgbe_phy_nl) {
654 hw->phy.id = identifier; 977 hw->phy.id = identifier;
655 hw->phy.ops.read_i2c_eeprom(hw, 978 status = hw->phy.ops.read_i2c_eeprom(hw,
656 IXGBE_SFF_VENDOR_OUI_BYTE0, 979 IXGBE_SFF_VENDOR_OUI_BYTE0,
657 &oui_bytes[0]); 980 &oui_bytes[0]);
658 hw->phy.ops.read_i2c_eeprom(hw, 981
982 if (status == IXGBE_ERR_SWFW_SYNC ||
983 status == IXGBE_ERR_I2C ||
984 status == IXGBE_ERR_SFP_NOT_PRESENT)
985 goto err_read_i2c_eeprom;
986
987 status = hw->phy.ops.read_i2c_eeprom(hw,
659 IXGBE_SFF_VENDOR_OUI_BYTE1, 988 IXGBE_SFF_VENDOR_OUI_BYTE1,
660 &oui_bytes[1]); 989 &oui_bytes[1]);
661 hw->phy.ops.read_i2c_eeprom(hw, 990
991 if (status == IXGBE_ERR_SWFW_SYNC ||
992 status == IXGBE_ERR_I2C ||
993 status == IXGBE_ERR_SFP_NOT_PRESENT)
994 goto err_read_i2c_eeprom;
995
996 status = hw->phy.ops.read_i2c_eeprom(hw,
662 IXGBE_SFF_VENDOR_OUI_BYTE2, 997 IXGBE_SFF_VENDOR_OUI_BYTE2,
663 &oui_bytes[2]); 998 &oui_bytes[2]);
664 999
1000 if (status == IXGBE_ERR_SWFW_SYNC ||
1001 status == IXGBE_ERR_I2C ||
1002 status == IXGBE_ERR_SFP_NOT_PRESENT)
1003 goto err_read_i2c_eeprom;
1004
665 vendor_oui = 1005 vendor_oui =
666 ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) | 1006 ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
667 (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) | 1007 (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
@@ -671,7 +1011,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
671 case IXGBE_SFF_VENDOR_OUI_TYCO: 1011 case IXGBE_SFF_VENDOR_OUI_TYCO:
672 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) 1012 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
673 hw->phy.type = 1013 hw->phy.type =
674 ixgbe_phy_sfp_passive_tyco; 1014 ixgbe_phy_sfp_passive_tyco;
675 break; 1015 break;
676 case IXGBE_SFF_VENDOR_OUI_FTL: 1016 case IXGBE_SFF_VENDOR_OUI_FTL:
677 if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) 1017 if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
@@ -688,7 +1028,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
688 default: 1028 default:
689 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) 1029 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
690 hw->phy.type = 1030 hw->phy.type =
691 ixgbe_phy_sfp_passive_unknown; 1031 ixgbe_phy_sfp_passive_unknown;
692 else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) 1032 else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
693 hw->phy.type = 1033 hw->phy.type =
694 ixgbe_phy_sfp_active_unknown; 1034 ixgbe_phy_sfp_active_unknown;
@@ -698,7 +1038,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
698 } 1038 }
699 } 1039 }
700 1040
701 /* All passive DA cables are supported */ 1041 /* Allow any DA cable vendor */
702 if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE | 1042 if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE |
703 IXGBE_SFF_DA_ACTIVE_CABLE)) { 1043 IXGBE_SFF_DA_ACTIVE_CABLE)) {
704 status = 0; 1044 status = 0;
@@ -720,7 +1060,6 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
720 goto out; 1060 goto out;
721 } 1061 }
722 1062
723 /* This is guaranteed to be 82599, no need to check for NULL */
724 hw->mac.ops.get_device_caps(hw, &enforce_sfp); 1063 hw->mac.ops.get_device_caps(hw, &enforce_sfp);
725 if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) && 1064 if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
726 !((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) || 1065 !((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) ||
@@ -740,15 +1079,24 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
740 1079
741out: 1080out:
742 return status; 1081 return status;
1082
1083err_read_i2c_eeprom:
1084 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1085 if (hw->phy.type != ixgbe_phy_nl) {
1086 hw->phy.id = 0;
1087 hw->phy.type = ixgbe_phy_unknown;
1088 }
1089 return IXGBE_ERR_SFP_NOT_PRESENT;
743} 1090}
744 1091
745/** 1092/**
746 * ixgbe_get_sfp_init_sequence_offsets - Checks the MAC's EEPROM to see 1093 * ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence
747 * if it supports a given SFP+ module type, if so it returns the offsets to the
748 * phy init sequence block.
749 * @hw: pointer to hardware structure 1094 * @hw: pointer to hardware structure
750 * @list_offset: offset to the SFP ID list 1095 * @list_offset: offset to the SFP ID list
751 * @data_offset: offset to the SFP data block 1096 * @data_offset: offset to the SFP data block
1097 *
1098 * Checks the MAC's EEPROM to see if it supports a given SFP+ module type, if
1099 * so it returns the offsets to the phy init sequence block.
752 **/ 1100 **/
753s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, 1101s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
754 u16 *list_offset, 1102 u16 *list_offset,
@@ -863,11 +1211,22 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
863 u8 dev_addr, u8 *data) 1211 u8 dev_addr, u8 *data)
864{ 1212{
865 s32 status = 0; 1213 s32 status = 0;
866 u32 max_retry = 1; 1214 u32 max_retry = 10;
867 u32 retry = 0; 1215 u32 retry = 0;
1216 u16 swfw_mask = 0;
868 bool nack = 1; 1217 bool nack = 1;
869 1218
1219 if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
1220 swfw_mask = IXGBE_GSSR_PHY1_SM;
1221 else
1222 swfw_mask = IXGBE_GSSR_PHY0_SM;
1223
870 do { 1224 do {
1225 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != 0) {
1226 status = IXGBE_ERR_SWFW_SYNC;
1227 goto read_byte_out;
1228 }
1229
871 ixgbe_i2c_start(hw); 1230 ixgbe_i2c_start(hw);
872 1231
873 /* Device Address and write indication */ 1232 /* Device Address and write indication */
@@ -910,6 +1269,8 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
910 break; 1269 break;
911 1270
912fail: 1271fail:
1272 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
1273 msleep(100);
913 ixgbe_i2c_bus_clear(hw); 1274 ixgbe_i2c_bus_clear(hw);
914 retry++; 1275 retry++;
915 if (retry < max_retry) 1276 if (retry < max_retry)
@@ -919,6 +1280,9 @@ fail:
919 1280
920 } while (retry < max_retry); 1281 } while (retry < max_retry);
921 1282
1283 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
1284
1285read_byte_out:
922 return status; 1286 return status;
923} 1287}
924 1288
@@ -937,6 +1301,17 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
937 s32 status = 0; 1301 s32 status = 0;
938 u32 max_retry = 1; 1302 u32 max_retry = 1;
939 u32 retry = 0; 1303 u32 retry = 0;
1304 u16 swfw_mask = 0;
1305
1306 if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
1307 swfw_mask = IXGBE_GSSR_PHY1_SM;
1308 else
1309 swfw_mask = IXGBE_GSSR_PHY0_SM;
1310
1311 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != 0) {
1312 status = IXGBE_ERR_SWFW_SYNC;
1313 goto write_byte_out;
1314 }
940 1315
941 do { 1316 do {
942 ixgbe_i2c_start(hw); 1317 ixgbe_i2c_start(hw);
@@ -977,6 +1352,9 @@ fail:
977 hw_dbg(hw, "I2C byte write error.\n"); 1352 hw_dbg(hw, "I2C byte write error.\n");
978 } while (retry < max_retry); 1353 } while (retry < max_retry);
979 1354
1355 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
1356
1357write_byte_out:
980 return status; 1358 return status;
981} 1359}
982 1360
@@ -1295,6 +1673,8 @@ static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
1295 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); 1673 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
1296 u32 i; 1674 u32 i;
1297 1675
1676 ixgbe_i2c_start(hw);
1677
1298 ixgbe_set_i2c_data(hw, &i2cctl, 1); 1678 ixgbe_set_i2c_data(hw, &i2cctl, 1);
1299 1679
1300 for (i = 0; i < 9; i++) { 1680 for (i = 0; i < 9; i++) {
@@ -1309,76 +1689,14 @@ static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
1309 udelay(IXGBE_I2C_T_LOW); 1689 udelay(IXGBE_I2C_T_LOW);
1310 } 1690 }
1311 1691
1692 ixgbe_i2c_start(hw);
1693
1312 /* Put the i2c bus back to default state */ 1694 /* Put the i2c bus back to default state */
1313 ixgbe_i2c_stop(hw); 1695 ixgbe_i2c_stop(hw);
1314} 1696}
1315 1697
1316/** 1698/**
1317 * ixgbe_check_phy_link_tnx - Determine link and speed status 1699 * ixgbe_tn_check_overtemp - Checks if an overtemp occurred.
1318 * @hw: pointer to hardware structure
1319 *
1320 * Reads the VS1 register to determine if link is up and the current speed for
1321 * the PHY.
1322 **/
1323s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
1324 bool *link_up)
1325{
1326 s32 status = 0;
1327 u32 time_out;
1328 u32 max_time_out = 10;
1329 u16 phy_link = 0;
1330 u16 phy_speed = 0;
1331 u16 phy_data = 0;
1332
1333 /* Initialize speed and link to default case */
1334 *link_up = false;
1335 *speed = IXGBE_LINK_SPEED_10GB_FULL;
1336
1337 /*
1338 * Check current speed and link status of the PHY register.
1339 * This is a vendor specific register and may have to
1340 * be changed for other copper PHYs.
1341 */
1342 for (time_out = 0; time_out < max_time_out; time_out++) {
1343 udelay(10);
1344 status = hw->phy.ops.read_reg(hw,
1345 IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS,
1346 MDIO_MMD_VEND1,
1347 &phy_data);
1348 phy_link = phy_data &
1349 IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
1350 phy_speed = phy_data &
1351 IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
1352 if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
1353 *link_up = true;
1354 if (phy_speed ==
1355 IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS)
1356 *speed = IXGBE_LINK_SPEED_1GB_FULL;
1357 break;
1358 }
1359 }
1360
1361 return status;
1362}
1363
1364/**
1365 * ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version
1366 * @hw: pointer to hardware structure
1367 * @firmware_version: pointer to the PHY Firmware Version
1368 **/
1369s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
1370 u16 *firmware_version)
1371{
1372 s32 status = 0;
1373
1374 status = hw->phy.ops.read_reg(hw, TNX_FW_REV, MDIO_MMD_VEND1,
1375 firmware_version);
1376
1377 return status;
1378}
1379
1380/**
1381 * ixgbe_tn_check_overtemp - Checks if an overtemp occured.
1382 * @hw: pointer to hardware structure 1700 * @hw: pointer to hardware structure
1383 * 1701 *
1384 * Checks if the LASI temp alarm status was triggered due to overtemp 1702 * Checks if the LASI temp alarm status was triggered due to overtemp
diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h
index fb3898f12fc5..197bdd13106a 100644
--- a/drivers/net/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ixgbe/ixgbe_phy.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -58,6 +58,10 @@
58#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2 58#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
59#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 59#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
60 60
61/* Flow control defines */
62#define IXGBE_TAF_SYM_PAUSE 0x400
63#define IXGBE_TAF_ASM_PAUSE 0x800
64
61/* Bit-shift macros */ 65/* Bit-shift macros */
62#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24 66#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24
63#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16 67#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16
@@ -96,13 +100,19 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
96 ixgbe_link_speed speed, 100 ixgbe_link_speed speed,
97 bool autoneg, 101 bool autoneg,
98 bool autoneg_wait_to_complete); 102 bool autoneg_wait_to_complete);
103s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
104 ixgbe_link_speed *speed,
105 bool *autoneg);
99 106
100/* PHY specific */ 107/* PHY specific */
101s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, 108s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
102 ixgbe_link_speed *speed, 109 ixgbe_link_speed *speed,
103 bool *link_up); 110 bool *link_up);
111s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw);
104s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, 112s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
105 u16 *firmware_version); 113 u16 *firmware_version);
114s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
115 u16 *firmware_version);
106 116
107s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); 117s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
108s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); 118s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
index 49661a138e22..ac99b0458fe2 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -43,8 +43,8 @@
43 43
44#include "ixgbe_sriov.h" 44#include "ixgbe_sriov.h"
45 45
46int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter, 46static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
47 int entries, u16 *hash_list, u32 vf) 47 int entries, u16 *hash_list, u32 vf)
48{ 48{
49 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; 49 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
50 struct ixgbe_hw *hw = &adapter->hw; 50 struct ixgbe_hw *hw = &adapter->hw;
@@ -68,7 +68,7 @@ int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
68 * addresses 68 * addresses
69 */ 69 */
70 for (i = 0; i < entries; i++) { 70 for (i = 0; i < entries; i++) {
71 vfinfo->vf_mc_hashes[i] = hash_list[i];; 71 vfinfo->vf_mc_hashes[i] = hash_list[i];
72 } 72 }
73 73
74 for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { 74 for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
@@ -82,6 +82,21 @@ int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
82 return 0; 82 return 0;
83} 83}
84 84
85static void ixgbe_restore_vf_macvlans(struct ixgbe_adapter *adapter)
86{
87 struct ixgbe_hw *hw = &adapter->hw;
88 struct list_head *pos;
89 struct vf_macvlans *entry;
90
91 list_for_each(pos, &adapter->vf_mvs.l) {
92 entry = list_entry(pos, struct vf_macvlans, l);
93 if (entry->free == false)
94 hw->mac.ops.set_rar(hw, entry->rar_entry,
95 entry->vf_macvlan,
96 entry->vf, IXGBE_RAH_AV);
97 }
98}
99
85void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter) 100void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
86{ 101{
87 struct ixgbe_hw *hw = &adapter->hw; 102 struct ixgbe_hw *hw = &adapter->hw;
@@ -102,19 +117,48 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
102 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); 117 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
103 } 118 }
104 } 119 }
120
121 /* Restore any VF macvlans */
122 ixgbe_restore_vf_macvlans(adapter);
105} 123}
106 124
107int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf) 125static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
126 u32 vf)
108{ 127{
109 return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); 128 return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
110} 129}
111 130
131static void ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf)
132{
133 struct ixgbe_hw *hw = &adapter->hw;
134 int new_mtu = msgbuf[1];
135 u32 max_frs;
136 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
137
138 /* Only X540 supports jumbo frames in IOV mode */
139 if (adapter->hw.mac.type != ixgbe_mac_X540)
140 return;
141
142 /* MTU < 68 is an error and causes problems on some kernels */
143 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) {
144 e_err(drv, "VF mtu %d out of range\n", new_mtu);
145 return;
146 }
147
148 max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) &
149 IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
150 if (max_frs < new_mtu) {
151 max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT;
152 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
153 }
112 154
113void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) 155 e_info(hw, "VF requests change max MTU to %d\n", new_mtu);
156}
157
158static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
114{ 159{
115 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); 160 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
116 vmolr |= (IXGBE_VMOLR_ROMPE | 161 vmolr |= (IXGBE_VMOLR_ROMPE |
117 IXGBE_VMOLR_ROPE |
118 IXGBE_VMOLR_BAM); 162 IXGBE_VMOLR_BAM);
119 if (aupe) 163 if (aupe)
120 vmolr |= IXGBE_VMOLR_AUPE; 164 vmolr |= IXGBE_VMOLR_AUPE;
@@ -134,7 +178,7 @@ static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, u32 vid, u32 vf)
134 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0); 178 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
135} 179}
136 180
137inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) 181static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
138{ 182{
139 struct ixgbe_hw *hw = &adapter->hw; 183 struct ixgbe_hw *hw = &adapter->hw;
140 int rar_entry = hw->mac.num_rar_entries - (vf + 1); 184 int rar_entry = hw->mac.num_rar_entries - (vf + 1);
@@ -162,8 +206,8 @@ inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
162 hw->mac.ops.clear_rar(hw, rar_entry); 206 hw->mac.ops.clear_rar(hw, rar_entry);
163} 207}
164 208
165int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, 209static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
166 int vf, unsigned char *mac_addr) 210 int vf, unsigned char *mac_addr)
167{ 211{
168 struct ixgbe_hw *hw = &adapter->hw; 212 struct ixgbe_hw *hw = &adapter->hw;
169 int rar_entry = hw->mac.num_rar_entries - (vf + 1); 213 int rar_entry = hw->mac.num_rar_entries - (vf + 1);
@@ -174,11 +218,65 @@ int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
174 return 0; 218 return 0;
175} 219}
176 220
221static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
222 int vf, int index, unsigned char *mac_addr)
223{
224 struct ixgbe_hw *hw = &adapter->hw;
225 struct list_head *pos;
226 struct vf_macvlans *entry;
227
228 if (index <= 1) {
229 list_for_each(pos, &adapter->vf_mvs.l) {
230 entry = list_entry(pos, struct vf_macvlans, l);
231 if (entry->vf == vf) {
232 entry->vf = -1;
233 entry->free = true;
234 entry->is_macvlan = false;
235 hw->mac.ops.clear_rar(hw, entry->rar_entry);
236 }
237 }
238 }
239
240 /*
241 * If index was zero then we were asked to clear the uc list
242 * for the VF. We're done.
243 */
244 if (!index)
245 return 0;
246
247 entry = NULL;
248
249 list_for_each(pos, &adapter->vf_mvs.l) {
250 entry = list_entry(pos, struct vf_macvlans, l);
251 if (entry->free)
252 break;
253 }
254
255 /*
256 * If we traversed the entire list and didn't find a free entry
257 * then we're out of space on the RAR table. Also entry may
258 * be NULL because the original memory allocation for the list
259 * failed, which is not fatal but does mean we can't support
260 * VF requests for MACVLAN because we couldn't allocate
261 * memory for the list management required.
262 */
263 if (!entry || !entry->free)
264 return -ENOSPC;
265
266 entry->free = false;
267 entry->is_macvlan = true;
268 entry->vf = vf;
269 memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
270
271 hw->mac.ops.set_rar(hw, entry->rar_entry, mac_addr, vf, IXGBE_RAH_AV);
272
273 return 0;
274}
275
177int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) 276int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
178{ 277{
179 unsigned char vf_mac_addr[6]; 278 unsigned char vf_mac_addr[6];
180 struct net_device *netdev = pci_get_drvdata(pdev); 279 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
181 struct ixgbe_adapter *adapter = netdev_priv(netdev);
182 unsigned int vfn = (event_mask & 0x3f); 280 unsigned int vfn = (event_mask & 0x3f);
183 281
184 bool enable = ((event_mask & 0x10000000U) != 0); 282 bool enable = ((event_mask & 0x10000000U) != 0);
@@ -197,7 +295,7 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
197 return 0; 295 return 0;
198} 296}
199 297
200inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) 298static inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
201{ 299{
202 struct ixgbe_hw *hw = &adapter->hw; 300 struct ixgbe_hw *hw = &adapter->hw;
203 u32 reg; 301 u32 reg;
@@ -215,18 +313,24 @@ inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
215 reg |= (reg | (1 << vf_shift)); 313 reg |= (reg | (1 << vf_shift));
216 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg); 314 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
217 315
316 /* Enable counting of spoofed packets in the SSVPC register */
317 reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
318 reg |= (1 << vf_shift);
319 IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
320
218 ixgbe_vf_reset_event(adapter, vf); 321 ixgbe_vf_reset_event(adapter, vf);
219} 322}
220 323
221static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) 324static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
222{ 325{
223 u32 mbx_size = IXGBE_VFMAILBOX_SIZE; 326 u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
224 u32 msgbuf[mbx_size]; 327 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
225 struct ixgbe_hw *hw = &adapter->hw; 328 struct ixgbe_hw *hw = &adapter->hw;
226 s32 retval; 329 s32 retval;
227 int entries; 330 int entries;
228 u16 *hash_list; 331 u16 *hash_list;
229 int add, vid; 332 int add, vid, index;
333 u8 *new_mac;
230 334
231 retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf); 335 retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
232 336
@@ -244,15 +348,22 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
244 348
245 if (msgbuf[0] == IXGBE_VF_RESET) { 349 if (msgbuf[0] == IXGBE_VF_RESET) {
246 unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; 350 unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
247 u8 *addr = (u8 *)(&msgbuf[1]); 351 new_mac = (u8 *)(&msgbuf[1]);
248 e_info(probe, "VF Reset msg received from vf %d\n", vf); 352 e_info(probe, "VF Reset msg received from vf %d\n", vf);
249 adapter->vfinfo[vf].clear_to_send = false; 353 adapter->vfinfo[vf].clear_to_send = false;
250 ixgbe_vf_reset_msg(adapter, vf); 354 ixgbe_vf_reset_msg(adapter, vf);
251 adapter->vfinfo[vf].clear_to_send = true; 355 adapter->vfinfo[vf].clear_to_send = true;
252 356
357 if (is_valid_ether_addr(new_mac) &&
358 !adapter->vfinfo[vf].pf_set_mac)
359 ixgbe_set_vf_mac(adapter, vf, vf_mac);
360 else
361 ixgbe_set_vf_mac(adapter,
362 vf, adapter->vfinfo[vf].vf_mac_addresses);
363
253 /* reply to reset with ack and vf mac address */ 364 /* reply to reset with ack and vf mac address */
254 msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK; 365 msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
255 memcpy(addr, vf_mac, IXGBE_ETH_LENGTH_OF_ADDRESS); 366 memcpy(new_mac, vf_mac, IXGBE_ETH_LENGTH_OF_ADDRESS);
256 /* 367 /*
257 * Piggyback the multicast filter type so VF can compute the 368 * Piggyback the multicast filter type so VF can compute the
258 * correct vectors 369 * correct vectors
@@ -271,14 +382,16 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
271 382
272 switch ((msgbuf[0] & 0xFFFF)) { 383 switch ((msgbuf[0] & 0xFFFF)) {
273 case IXGBE_VF_SET_MAC_ADDR: 384 case IXGBE_VF_SET_MAC_ADDR:
274 { 385 new_mac = ((u8 *)(&msgbuf[1]));
275 u8 *new_mac = ((u8 *)(&msgbuf[1])); 386 if (is_valid_ether_addr(new_mac) &&
276 if (is_valid_ether_addr(new_mac) && 387 !adapter->vfinfo[vf].pf_set_mac) {
277 !adapter->vfinfo[vf].pf_set_mac) 388 ixgbe_set_vf_mac(adapter, vf, new_mac);
278 ixgbe_set_vf_mac(adapter, vf, new_mac); 389 } else if (memcmp(adapter->vfinfo[vf].vf_mac_addresses,
279 else 390 new_mac, ETH_ALEN)) {
280 ixgbe_set_vf_mac(adapter, 391 e_warn(drv, "VF %d attempted to override "
281 vf, adapter->vfinfo[vf].vf_mac_addresses); 392 "administratively set MAC address\nReload "
393 "the VF driver to resume operations\n", vf);
394 retval = -1;
282 } 395 }
283 break; 396 break;
284 case IXGBE_VF_SET_MULTICAST: 397 case IXGBE_VF_SET_MULTICAST:
@@ -289,13 +402,39 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
289 hash_list, vf); 402 hash_list, vf);
290 break; 403 break;
291 case IXGBE_VF_SET_LPE: 404 case IXGBE_VF_SET_LPE:
292 WARN_ON((msgbuf[0] & 0xFFFF) == IXGBE_VF_SET_LPE); 405 ixgbe_set_vf_lpe(adapter, msgbuf);
293 break; 406 break;
294 case IXGBE_VF_SET_VLAN: 407 case IXGBE_VF_SET_VLAN:
295 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) 408 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
296 >> IXGBE_VT_MSGINFO_SHIFT; 409 >> IXGBE_VT_MSGINFO_SHIFT;
297 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); 410 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
298 retval = ixgbe_set_vf_vlan(adapter, add, vid, vf); 411 if (adapter->vfinfo[vf].pf_vlan) {
412 e_warn(drv, "VF %d attempted to override "
413 "administratively set VLAN configuration\n"
414 "Reload the VF driver to resume operations\n",
415 vf);
416 retval = -1;
417 } else {
418 retval = ixgbe_set_vf_vlan(adapter, add, vid, vf);
419 }
420 break;
421 case IXGBE_VF_SET_MACVLAN:
422 index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
423 IXGBE_VT_MSGINFO_SHIFT;
424 /*
425 * If the VF is allowed to set MAC filters then turn off
426 * anti-spoofing to avoid false positives. An index
427 * greater than 0 will indicate the VF is setting a
428 * macvlan MAC filter.
429 */
430 if (index > 0 && adapter->antispoofing_enabled) {
431 hw->mac.ops.set_mac_anti_spoofing(hw, false,
432 adapter->num_vfs);
433 hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
434 adapter->antispoofing_enabled = false;
435 }
436 retval = ixgbe_set_vf_macvlan(adapter, vf, index,
437 (unsigned char *)(&msgbuf[1]));
299 break; 438 break;
300 default: 439 default:
301 e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); 440 e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
@@ -394,6 +533,7 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
394{ 533{
395 int err = 0; 534 int err = 0;
396 struct ixgbe_adapter *adapter = netdev_priv(netdev); 535 struct ixgbe_adapter *adapter = netdev_priv(netdev);
536 struct ixgbe_hw *hw = &adapter->hw;
397 537
398 if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7)) 538 if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
399 return -EINVAL; 539 return -EINVAL;
@@ -402,7 +542,9 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
402 if (err) 542 if (err)
403 goto out; 543 goto out;
404 ixgbe_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf); 544 ixgbe_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
405 ixgbe_set_vmolr(&adapter->hw, vf, false); 545 ixgbe_set_vmolr(hw, vf, false);
546 if (adapter->antispoofing_enabled)
547 hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
406 adapter->vfinfo[vf].pf_vlan = vlan; 548 adapter->vfinfo[vf].pf_vlan = vlan;
407 adapter->vfinfo[vf].pf_qos = qos; 549 adapter->vfinfo[vf].pf_qos = qos;
408 dev_info(&adapter->pdev->dev, 550 dev_info(&adapter->pdev->dev,
@@ -419,7 +561,8 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
419 err = ixgbe_set_vf_vlan(adapter, false, 561 err = ixgbe_set_vf_vlan(adapter, false,
420 adapter->vfinfo[vf].pf_vlan, vf); 562 adapter->vfinfo[vf].pf_vlan, vf);
421 ixgbe_set_vmvir(adapter, vlan, vf); 563 ixgbe_set_vmvir(adapter, vlan, vf);
422 ixgbe_set_vmolr(&adapter->hw, vf, true); 564 ixgbe_set_vmolr(hw, vf, true);
565 hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
423 adapter->vfinfo[vf].pf_vlan = 0; 566 adapter->vfinfo[vf].pf_vlan = 0;
424 adapter->vfinfo[vf].pf_qos = 0; 567 adapter->vfinfo[vf].pf_qos = 0;
425 } 568 }
@@ -427,9 +570,90 @@ out:
427 return err; 570 return err;
428} 571}
429 572
573static int ixgbe_link_mbps(int internal_link_speed)
574{
575 switch (internal_link_speed) {
576 case IXGBE_LINK_SPEED_100_FULL:
577 return 100;
578 case IXGBE_LINK_SPEED_1GB_FULL:
579 return 1000;
580 case IXGBE_LINK_SPEED_10GB_FULL:
581 return 10000;
582 default:
583 return 0;
584 }
585}
586
587static void ixgbe_set_vf_rate_limit(struct ixgbe_hw *hw, int vf, int tx_rate,
588 int link_speed)
589{
590 int rf_dec, rf_int;
591 u32 bcnrc_val;
592
593 if (tx_rate != 0) {
594 /* Calculate the rate factor values to set */
595 rf_int = link_speed / tx_rate;
596 rf_dec = (link_speed - (rf_int * tx_rate));
597 rf_dec = (rf_dec * (1<<IXGBE_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
598
599 bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
600 bcnrc_val |= ((rf_int<<IXGBE_RTTBCNRC_RF_INT_SHIFT) &
601 IXGBE_RTTBCNRC_RF_INT_MASK);
602 bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
603 } else {
604 bcnrc_val = 0;
605 }
606
607 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, 2*vf); /* vf Y uses queue 2*Y */
608 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
609}
610
611void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter)
612{
613 int actual_link_speed, i;
614 bool reset_rate = false;
615
616 /* VF Tx rate limit was not set */
617 if (adapter->vf_rate_link_speed == 0)
618 return;
619
620 actual_link_speed = ixgbe_link_mbps(adapter->link_speed);
621 if (actual_link_speed != adapter->vf_rate_link_speed) {
622 reset_rate = true;
623 adapter->vf_rate_link_speed = 0;
624 dev_info(&adapter->pdev->dev,
625 "Link speed has been changed. VF Transmit rate "
626 "is disabled\n");
627 }
628
629 for (i = 0; i < adapter->num_vfs; i++) {
630 if (reset_rate)
631 adapter->vfinfo[i].tx_rate = 0;
632
633 ixgbe_set_vf_rate_limit(&adapter->hw, i,
634 adapter->vfinfo[i].tx_rate,
635 actual_link_speed);
636 }
637}
638
430int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate) 639int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
431{ 640{
432 return -EOPNOTSUPP; 641 struct ixgbe_adapter *adapter = netdev_priv(netdev);
642 struct ixgbe_hw *hw = &adapter->hw;
643 int actual_link_speed;
644
645 actual_link_speed = ixgbe_link_mbps(adapter->link_speed);
646 if ((vf >= adapter->num_vfs) || (!adapter->link_up) ||
647 (tx_rate > actual_link_speed) || (actual_link_speed != 10000) ||
648 ((tx_rate != 0) && (tx_rate <= 10)))
649 /* rate limit cannot be set to 10Mb or less in 10Gb adapters */
650 return -EINVAL;
651
652 adapter->vf_rate_link_speed = actual_link_speed;
653 adapter->vfinfo[vf].tx_rate = (u16)tx_rate;
654 ixgbe_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
655
656 return 0;
433} 657}
434 658
435int ixgbe_ndo_get_vf_config(struct net_device *netdev, 659int ixgbe_ndo_get_vf_config(struct net_device *netdev,
@@ -440,7 +664,7 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev,
440 return -EINVAL; 664 return -EINVAL;
441 ivi->vf = vf; 665 ivi->vf = vf;
442 memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN); 666 memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN);
443 ivi->tx_rate = 0; 667 ivi->tx_rate = adapter->vfinfo[vf].tx_rate;
444 ivi->vlan = adapter->vfinfo[vf].pf_vlan; 668 ivi->vlan = adapter->vfinfo[vf].pf_vlan;
445 ivi->qos = adapter->vfinfo[vf].pf_qos; 669 ivi->qos = adapter->vfinfo[vf].pf_qos;
446 return 0; 670 return 0;
diff --git a/drivers/net/ixgbe/ixgbe_sriov.h b/drivers/net/ixgbe/ixgbe_sriov.h
index 184730ecdfb6..34175564bb78 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ixgbe/ixgbe_sriov.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -28,16 +28,8 @@
28#ifndef _IXGBE_SRIOV_H_ 28#ifndef _IXGBE_SRIOV_H_
29#define _IXGBE_SRIOV_H_ 29#define _IXGBE_SRIOV_H_
30 30
31int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
32 int entries, u16 *hash_list, u32 vf);
33void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter); 31void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
34int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf);
35void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe);
36void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf);
37void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf);
38void ixgbe_msg_task(struct ixgbe_adapter *adapter); 32void ixgbe_msg_task(struct ixgbe_adapter *adapter);
39int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
40 int vf, unsigned char *mac_addr);
41int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask); 33int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
42void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter); 34void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
43void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter); 35void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
@@ -48,6 +40,7 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
48int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); 40int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
49int ixgbe_ndo_get_vf_config(struct net_device *netdev, 41int ixgbe_ndo_get_vf_config(struct net_device *netdev,
50 int vf, struct ifla_vf_info *ivi); 42 int vf, struct ifla_vf_info *ivi);
43void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);
51 44
52#endif /* _IXGBE_SRIOV_H_ */ 45#endif /* _IXGBE_SRIOV_H_ */
53 46
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 9587d975d66c..fa43f2507f43 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -54,9 +54,16 @@
54#define IXGBE_DEV_ID_82599_T3_LOM 0x151C 54#define IXGBE_DEV_ID_82599_T3_LOM 0x151C
55#define IXGBE_DEV_ID_82599_CX4 0x10F9 55#define IXGBE_DEV_ID_82599_CX4 0x10F9
56#define IXGBE_DEV_ID_82599_SFP 0x10FB 56#define IXGBE_DEV_ID_82599_SFP 0x10FB
57#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152a
58#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529
59#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
57#define IXGBE_DEV_ID_82599_SFP_EM 0x1507 60#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
61#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D
58#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC 62#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
59#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8 63#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
64#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C
65#define IXGBE_DEV_ID_82599_LS 0x154F
66#define IXGBE_DEV_ID_X540T 0x1528
60 67
61/* General Registers */ 68/* General Registers */
62#define IXGBE_CTRL 0x00000 69#define IXGBE_CTRL 0x00000
@@ -86,7 +93,7 @@
86 93
87/* General Receive Control */ 94/* General Receive Control */
88#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */ 95#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */
89#define IXGBE_GRC_APME 0x00000002 /* Advanced Power Management Enable */ 96#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */
90 97
91#define IXGBE_VPDDIAG0 0x10204 98#define IXGBE_VPDDIAG0 0x10204
92#define IXGBE_VPDDIAG1 0x10208 99#define IXGBE_VPDDIAG1 0x10208
@@ -158,6 +165,9 @@
158 (0x0D018 + ((_i - 64) * 0x40))) 165 (0x0D018 + ((_i - 64) * 0x40)))
159#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \ 166#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \
160 (0x0D028 + ((_i - 64) * 0x40))) 167 (0x0D028 + ((_i - 64) * 0x40)))
168#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \
169 (0x0D02C + ((_i - 64) * 0x40)))
170#define IXGBE_RSCDBU 0x03028
161#define IXGBE_RDDCC 0x02F20 171#define IXGBE_RDDCC 0x02F20
162#define IXGBE_RXMEMWRAP 0x03190 172#define IXGBE_RXMEMWRAP 0x03190
163#define IXGBE_STARCTRL 0x03024 173#define IXGBE_STARCTRL 0x03024
@@ -222,16 +232,23 @@
222#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */ 232#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */
223#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */ 233#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */
224#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */ 234#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */
225#define IXGBE_VT_CTL 0x051B0 235#define IXGBE_VT_CTL 0x051B0
226#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4)) 236#define IXGBE_PFMAILBOX(_i) (0x04B00 + (4 * (_i))) /* 64 total */
227#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4)) 237#define IXGBE_PFMBMEM(_i) (0x13000 + (64 * (_i))) /* 64 Mailboxes, 16 DW each */
228#define IXGBE_QDE 0x2F04 238#define IXGBE_PFMBICR(_i) (0x00710 + (4 * (_i))) /* 4 total */
229#define IXGBE_VMOLR(_i) (0x0F000 + ((_i) * 4)) /* 64 total */ 239#define IXGBE_PFMBIMR(_i) (0x00720 + (4 * (_i))) /* 4 total */
230#define IXGBE_UTA(_i) (0x0F400 + ((_i) * 4)) 240#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4))
231#define IXGBE_VMRCTL(_i) (0x0F600 + ((_i) * 4)) 241#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4))
232#define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4)) 242#define IXGBE_VMECM(_i) (0x08790 + ((_i) * 4))
233#define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4)) 243#define IXGBE_QDE 0x2F04
234#define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/ 244#define IXGBE_VMTXSW(_i) (0x05180 + ((_i) * 4)) /* 2 total */
245#define IXGBE_VMOLR(_i) (0x0F000 + ((_i) * 4)) /* 64 total */
246#define IXGBE_UTA(_i) (0x0F400 + ((_i) * 4))
247#define IXGBE_MRCTL(_i) (0x0F600 + ((_i) * 4))
248#define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4))
249#define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4))
250#define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/
251#define IXGBE_RXFECCERR0 0x051B8
235#define IXGBE_LLITHRESH 0x0EC90 252#define IXGBE_LLITHRESH 0x0EC90
236#define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */ 253#define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */
237#define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */ 254#define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */
@@ -279,7 +296,8 @@
279#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40)) 296#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40))
280#define IXGBE_DTXCTL 0x07E00 297#define IXGBE_DTXCTL 0x07E00
281 298
282#define IXGBE_DMATXCTL 0x04A80 299#define IXGBE_DMATXCTL 0x04A80
300#define IXGBE_PFVFSPOOF(_i) (0x08200 + ((_i) * 4)) /* 8 of these 0 - 7 */
283#define IXGBE_PFDTXGSWC 0x08220 301#define IXGBE_PFDTXGSWC 0x08220
284#define IXGBE_DTXMXSZRQ 0x08100 302#define IXGBE_DTXMXSZRQ 0x08100
285#define IXGBE_DTXTCPFLGL 0x04A88 303#define IXGBE_DTXTCPFLGL 0x04A88
@@ -293,6 +311,13 @@
293#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */ 311#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */
294 312
295#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */ 313#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */
314
315/* Anti-spoofing defines */
316#define IXGBE_SPOOF_MACAS_MASK 0xFF
317#define IXGBE_SPOOF_VLANAS_MASK 0xFF00
318#define IXGBE_SPOOF_VLANAS_SHIFT 8
319#define IXGBE_PFVFSPOOF_REG_COUNT 8
320
296#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */ 321#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */
297/* Tx DCA Control register : 128 of these (0-127) */ 322/* Tx DCA Control register : 128 of these (0-127) */
298#define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40)) 323#define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40))
@@ -328,7 +353,7 @@
328/* Wake Up Control */ 353/* Wake Up Control */
329#define IXGBE_WUC_PME_EN 0x00000002 /* PME Enable */ 354#define IXGBE_WUC_PME_EN 0x00000002 /* PME Enable */
330#define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */ 355#define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */
331#define IXGBE_WUC_ADVD3WUC 0x00000010 /* D3Cold wake up cap. enable*/ 356#define IXGBE_WUC_WKEN 0x00000010 /* Enable PE_WAKE_N pin assertion */
332 357
333/* Wake Up Filter Control */ 358/* Wake Up Filter Control */
334#define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ 359#define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
@@ -350,7 +375,7 @@
350#define IXGBE_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */ 375#define IXGBE_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */
351#define IXGBE_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */ 376#define IXGBE_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */
352#define IXGBE_WUFC_EXT_FLX_FILTERS 0x00300000 /* Mask for Ext. flex filters */ 377#define IXGBE_WUFC_EXT_FLX_FILTERS 0x00300000 /* Mask for Ext. flex filters */
353#define IXGBE_WUFC_ALL_FILTERS 0x003F00FF /* Mask for all 6 wakeup filters*/ 378#define IXGBE_WUFC_ALL_FILTERS 0x003F00FF /* Mask for all wakeup filters */
354#define IXGBE_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */ 379#define IXGBE_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */
355 380
356/* Wake Up Status */ 381/* Wake Up Status */
@@ -392,7 +417,6 @@
392#define IXGBE_SECTXSTAT 0x08804 417#define IXGBE_SECTXSTAT 0x08804
393#define IXGBE_SECTXBUFFAF 0x08808 418#define IXGBE_SECTXBUFFAF 0x08808
394#define IXGBE_SECTXMINIFG 0x08810 419#define IXGBE_SECTXMINIFG 0x08810
395#define IXGBE_SECTXSTAT 0x08804
396#define IXGBE_SECRXCTRL 0x08D00 420#define IXGBE_SECRXCTRL 0x08D00
397#define IXGBE_SECRXSTAT 0x08D04 421#define IXGBE_SECRXSTAT 0x08D04
398 422
@@ -485,21 +509,6 @@
485 509
486#define IXGBE_SECTXCTRL_STORE_FORWARD_ENABLE 0x4 510#define IXGBE_SECTXCTRL_STORE_FORWARD_ENABLE 0x4
487 511
488/* HW RSC registers */
489#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \
490 (0x0D02C + ((_i - 64) * 0x40)))
491#define IXGBE_RSCDBU 0x03028
492#define IXGBE_RSCCTL_RSCEN 0x01
493#define IXGBE_RSCCTL_MAXDESC_1 0x00
494#define IXGBE_RSCCTL_MAXDESC_4 0x04
495#define IXGBE_RSCCTL_MAXDESC_8 0x08
496#define IXGBE_RSCCTL_MAXDESC_16 0x0C
497#define IXGBE_RXDADV_RSCCNT_SHIFT 17
498#define IXGBE_GPIE_RSC_DELAY_SHIFT 11
499#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000
500#define IXGBE_RSCDBU_RSCACKDIS 0x00000080
501#define IXGBE_RDRXCTL_RSCFRSTSIZE 0x003E0000
502
503/* DCB registers */ 512/* DCB registers */
504#define IXGBE_RTRPCS 0x02430 513#define IXGBE_RTRPCS 0x02430
505#define IXGBE_RTTDCS 0x04900 514#define IXGBE_RTTDCS 0x04900
@@ -508,6 +517,7 @@
508#define IXGBE_RTRUP2TC 0x03020 517#define IXGBE_RTRUP2TC 0x03020
509#define IXGBE_RTTUP2TC 0x0C800 518#define IXGBE_RTTUP2TC 0x0C800
510#define IXGBE_RTRPT4C(_i) (0x02140 + ((_i) * 4)) /* 8 of these (0-7) */ 519#define IXGBE_RTRPT4C(_i) (0x02140 + ((_i) * 4)) /* 8 of these (0-7) */
520#define IXGBE_TXLLQ(_i) (0x082E0 + ((_i) * 4)) /* 4 of these (0-3) */
511#define IXGBE_RTRPT4S(_i) (0x02160 + ((_i) * 4)) /* 8 of these (0-7) */ 521#define IXGBE_RTRPT4S(_i) (0x02160 + ((_i) * 4)) /* 8 of these (0-7) */
512#define IXGBE_RTTDT2C(_i) (0x04910 + ((_i) * 4)) /* 8 of these (0-7) */ 522#define IXGBE_RTTDT2C(_i) (0x04910 + ((_i) * 4)) /* 8 of these (0-7) */
513#define IXGBE_RTTDT2S(_i) (0x04930 + ((_i) * 4)) /* 8 of these (0-7) */ 523#define IXGBE_RTTDT2S(_i) (0x04930 + ((_i) * 4)) /* 8 of these (0-7) */
@@ -519,8 +529,14 @@
519#define IXGBE_RTTDTECC 0x04990 529#define IXGBE_RTTDTECC 0x04990
520#define IXGBE_RTTDTECC_NO_BCN 0x00000100 530#define IXGBE_RTTDTECC_NO_BCN 0x00000100
521#define IXGBE_RTTBCNRC 0x04984 531#define IXGBE_RTTBCNRC 0x04984
532#define IXGBE_RTTBCNRC_RS_ENA 0x80000000
533#define IXGBE_RTTBCNRC_RF_DEC_MASK 0x00003FFF
534#define IXGBE_RTTBCNRC_RF_INT_SHIFT 14
535#define IXGBE_RTTBCNRC_RF_INT_MASK \
536 (IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT)
522 537
523/* FCoE registers */ 538
539/* FCoE DMA Context Registers */
524#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */ 540#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */
525#define IXGBE_FCPTRH 0x02414 /* FC USer Desc. PTR High */ 541#define IXGBE_FCPTRH 0x02414 /* FC USer Desc. PTR High */
526#define IXGBE_FCBUFF 0x02418 /* FC Buffer Control */ 542#define IXGBE_FCBUFF 0x02418 /* FC Buffer Control */
@@ -645,6 +661,8 @@
645#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */ 661#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */
646#define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */ 662#define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */
647#define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */ 663#define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */
664#define IXGBE_QBRC_L(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */
665#define IXGBE_QBRC_H(_i) (0x01038 + ((_i) * 0x40)) /* 16 of these */
648#define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */ 666#define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */
649#define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */ 667#define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */
650#define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */ 668#define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */
@@ -655,6 +673,15 @@
655#define IXGBE_FCOEDWRC 0x0242C /* Number of FCoE DWords Received */ 673#define IXGBE_FCOEDWRC 0x0242C /* Number of FCoE DWords Received */
656#define IXGBE_FCOEPTC 0x08784 /* Number of FCoE Packets Transmitted */ 674#define IXGBE_FCOEPTC 0x08784 /* Number of FCoE Packets Transmitted */
657#define IXGBE_FCOEDWTC 0x08788 /* Number of FCoE DWords Transmitted */ 675#define IXGBE_FCOEDWTC 0x08788 /* Number of FCoE DWords Transmitted */
676#define IXGBE_O2BGPTC 0x041C4
677#define IXGBE_O2BSPC 0x087B0
678#define IXGBE_B2OSPC 0x041C0
679#define IXGBE_B2OGPRC 0x02F90
680#define IXGBE_PCRC8ECL 0x0E810
681#define IXGBE_PCRC8ECH 0x0E811
682#define IXGBE_PCRC8ECH_MASK 0x1F
683#define IXGBE_LDPCECL 0x0E820
684#define IXGBE_LDPCECH 0x0E821
658 685
659/* Management */ 686/* Management */
660#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */ 687#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */
@@ -715,17 +742,10 @@
715#define IXGBE_PBACLR_82599 0x11068 742#define IXGBE_PBACLR_82599 0x11068
716#define IXGBE_CIAA_82599 0x11088 743#define IXGBE_CIAA_82599 0x11088
717#define IXGBE_CIAD_82599 0x1108C 744#define IXGBE_CIAD_82599 0x1108C
718#define IXGBE_PCIE_DIAG_0_82599 0x11090 745#define IXGBE_PICAUSE 0x110B0
719#define IXGBE_PCIE_DIAG_1_82599 0x11094 746#define IXGBE_PIENA 0x110B8
720#define IXGBE_PCIE_DIAG_2_82599 0x11098
721#define IXGBE_PCIE_DIAG_3_82599 0x1109C
722#define IXGBE_PCIE_DIAG_4_82599 0x110A0
723#define IXGBE_PCIE_DIAG_5_82599 0x110A4
724#define IXGBE_PCIE_DIAG_6_82599 0x110A8
725#define IXGBE_PCIE_DIAG_7_82599 0x110C0
726#define IXGBE_INTRPT_CSR_82599 0x110B0
727#define IXGBE_INTRPT_MASK_82599 0x110B8
728#define IXGBE_CDQ_MBR_82599 0x110B4 747#define IXGBE_CDQ_MBR_82599 0x110B4
748#define IXGBE_PCIESPARE 0x110BC
729#define IXGBE_MISC_REG_82599 0x110F0 749#define IXGBE_MISC_REG_82599 0x110F0
730#define IXGBE_ECC_CTRL_0_82599 0x11100 750#define IXGBE_ECC_CTRL_0_82599 0x11100
731#define IXGBE_ECC_CTRL_1_82599 0x11104 751#define IXGBE_ECC_CTRL_1_82599 0x11104
@@ -758,7 +778,19 @@
758#define IXGBE_SYSTIML 0x08C0C /* System time register Low - RO */ 778#define IXGBE_SYSTIML 0x08C0C /* System time register Low - RO */
759#define IXGBE_SYSTIMH 0x08C10 /* System time register High - RO */ 779#define IXGBE_SYSTIMH 0x08C10 /* System time register High - RO */
760#define IXGBE_TIMINCA 0x08C14 /* Increment attributes register - RW */ 780#define IXGBE_TIMINCA 0x08C14 /* Increment attributes register - RW */
761#define IXGBE_RXUDP 0x08C1C /* Time Sync Rx UDP Port - RW */ 781#define IXGBE_TIMADJL 0x08C18 /* Time Adjustment Offset register Low - RW */
782#define IXGBE_TIMADJH 0x08C1C /* Time Adjustment Offset register High - RW */
783#define IXGBE_TSAUXC 0x08C20 /* TimeSync Auxiliary Control register - RW */
784#define IXGBE_TRGTTIML0 0x08C24 /* Target Time Register 0 Low - RW */
785#define IXGBE_TRGTTIMH0 0x08C28 /* Target Time Register 0 High - RW */
786#define IXGBE_TRGTTIML1 0x08C2C /* Target Time Register 1 Low - RW */
787#define IXGBE_TRGTTIMH1 0x08C30 /* Target Time Register 1 High - RW */
788#define IXGBE_FREQOUT0 0x08C34 /* Frequency Out 0 Control register - RW */
789#define IXGBE_FREQOUT1 0x08C38 /* Frequency Out 1 Control register - RW */
790#define IXGBE_AUXSTMPL0 0x08C3C /* Auxiliary Time Stamp 0 register Low - RO */
791#define IXGBE_AUXSTMPH0 0x08C40 /* Auxiliary Time Stamp 0 register High - RO */
792#define IXGBE_AUXSTMPL1 0x08C44 /* Auxiliary Time Stamp 1 register Low - RO */
793#define IXGBE_AUXSTMPH1 0x08C48 /* Auxiliary Time Stamp 1 register High - RO */
762 794
763/* Diagnostic Registers */ 795/* Diagnostic Registers */
764#define IXGBE_RDSTATCTL 0x02C20 796#define IXGBE_RDSTATCTL 0x02C20
@@ -802,8 +834,20 @@
802#define IXGBE_TXDATARDPTR(_i) (0x0C720 + ((_i) * 4)) /* 8 of these C720-C72C*/ 834#define IXGBE_TXDATARDPTR(_i) (0x0C720 + ((_i) * 4)) /* 8 of these C720-C72C*/
803#define IXGBE_TXDESCRDPTR(_i) (0x0C730 + ((_i) * 4)) /* 8 of these C730-C73C*/ 835#define IXGBE_TXDESCRDPTR(_i) (0x0C730 + ((_i) * 4)) /* 8 of these C730-C73C*/
804#define IXGBE_PCIEECCCTL 0x1106C 836#define IXGBE_PCIEECCCTL 0x1106C
837#define IXGBE_RXWRPTR(_i) (0x03100 + ((_i) * 4)) /* 8 of these 3100-310C*/
838#define IXGBE_RXUSED(_i) (0x03120 + ((_i) * 4)) /* 8 of these 3120-312C*/
839#define IXGBE_RXRDPTR(_i) (0x03140 + ((_i) * 4)) /* 8 of these 3140-314C*/
840#define IXGBE_RXRDWRPTR(_i) (0x03160 + ((_i) * 4)) /* 8 of these 3160-310C*/
841#define IXGBE_TXWRPTR(_i) (0x0C100 + ((_i) * 4)) /* 8 of these C100-C10C*/
842#define IXGBE_TXUSED(_i) (0x0C120 + ((_i) * 4)) /* 8 of these C120-C12C*/
843#define IXGBE_TXRDPTR(_i) (0x0C140 + ((_i) * 4)) /* 8 of these C140-C14C*/
844#define IXGBE_TXRDWRPTR(_i) (0x0C160 + ((_i) * 4)) /* 8 of these C160-C10C*/
805#define IXGBE_PCIEECCCTL0 0x11100 845#define IXGBE_PCIEECCCTL0 0x11100
806#define IXGBE_PCIEECCCTL1 0x11104 846#define IXGBE_PCIEECCCTL1 0x11104
847#define IXGBE_RXDBUECC 0x03F70
848#define IXGBE_TXDBUECC 0x0CF70
849#define IXGBE_RXDBUEST 0x03F74
850#define IXGBE_TXDBUEST 0x0CF74
807#define IXGBE_PBTXECC 0x0C300 851#define IXGBE_PBTXECC 0x0C300
808#define IXGBE_PBRXECC 0x03300 852#define IXGBE_PBRXECC 0x03300
809#define IXGBE_GHECCR 0x110B0 853#define IXGBE_GHECCR 0x110B0
@@ -844,6 +888,7 @@
844#define IXGBE_AUTOC3 0x042AC 888#define IXGBE_AUTOC3 0x042AC
845#define IXGBE_ANLP1 0x042B0 889#define IXGBE_ANLP1 0x042B0
846#define IXGBE_ANLP2 0x042B4 890#define IXGBE_ANLP2 0x042B4
891#define IXGBE_MACC 0x04330
847#define IXGBE_ATLASCTL 0x04800 892#define IXGBE_ATLASCTL 0x04800
848#define IXGBE_MMNGC 0x042D0 893#define IXGBE_MMNGC 0x042D0
849#define IXGBE_ANLPNP1 0x042D4 894#define IXGBE_ANLPNP1 0x042D4
@@ -856,14 +901,49 @@
856#define IXGBE_MPVC 0x04318 901#define IXGBE_MPVC 0x04318
857#define IXGBE_SGMIIC 0x04314 902#define IXGBE_SGMIIC 0x04314
858 903
904/* Statistics Registers */
905#define IXGBE_RXNFGPC 0x041B0
906#define IXGBE_RXNFGBCL 0x041B4
907#define IXGBE_RXNFGBCH 0x041B8
908#define IXGBE_RXDGPC 0x02F50
909#define IXGBE_RXDGBCL 0x02F54
910#define IXGBE_RXDGBCH 0x02F58
911#define IXGBE_RXDDGPC 0x02F5C
912#define IXGBE_RXDDGBCL 0x02F60
913#define IXGBE_RXDDGBCH 0x02F64
914#define IXGBE_RXLPBKGPC 0x02F68
915#define IXGBE_RXLPBKGBCL 0x02F6C
916#define IXGBE_RXLPBKGBCH 0x02F70
917#define IXGBE_RXDLPBKGPC 0x02F74
918#define IXGBE_RXDLPBKGBCL 0x02F78
919#define IXGBE_RXDLPBKGBCH 0x02F7C
920#define IXGBE_TXDGPC 0x087A0
921#define IXGBE_TXDGBCL 0x087A4
922#define IXGBE_TXDGBCH 0x087A8
923
924#define IXGBE_RXDSTATCTRL 0x02F40
925
926/* Copper Pond 2 link timeout */
859#define IXGBE_VALIDATE_LINK_READY_TIMEOUT 50 927#define IXGBE_VALIDATE_LINK_READY_TIMEOUT 50
860 928
861/* Omer CORECTL */ 929/* Omer CORECTL */
862#define IXGBE_CORECTL 0x014F00 930#define IXGBE_CORECTL 0x014F00
863/* BARCTRL */ 931/* BARCTRL */
864#define IXGBE_BARCTRL 0x110F4 932#define IXGBE_BARCTRL 0x110F4
865#define IXGBE_BARCTRL_FLSIZE 0x0700 933#define IXGBE_BARCTRL_FLSIZE 0x0700
866#define IXGBE_BARCTRL_CSRSIZE 0x2000 934#define IXGBE_BARCTRL_FLSIZE_SHIFT 8
935#define IXGBE_BARCTRL_CSRSIZE 0x2000
936
937/* RSCCTL Bit Masks */
938#define IXGBE_RSCCTL_RSCEN 0x01
939#define IXGBE_RSCCTL_MAXDESC_1 0x00
940#define IXGBE_RSCCTL_MAXDESC_4 0x04
941#define IXGBE_RSCCTL_MAXDESC_8 0x08
942#define IXGBE_RSCCTL_MAXDESC_16 0x0C
943
944/* RSCDBU Bit Masks */
945#define IXGBE_RSCDBU_RSCSMALDIS_MASK 0x0000007F
946#define IXGBE_RSCDBU_RSCACKDIS 0x00000080
867 947
868/* RDRXCTL Bit Masks */ 948/* RDRXCTL Bit Masks */
869#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 /* Rx Desc Min Threshold Size */ 949#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 /* Rx Desc Min Threshold Size */
@@ -871,6 +951,10 @@
871#define IXGBE_RDRXCTL_MVMEN 0x00000020 951#define IXGBE_RDRXCTL_MVMEN 0x00000020
872#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */ 952#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */
873#define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */ 953#define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */
954#define IXGBE_RDRXCTL_RSCFRSTSIZE 0x003E0000 /* RSC First packet size */
955#define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disable RSC compl on LLI */
956#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC enabled */
957#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC enabled */
874 958
875/* RQTC Bit Masks and Shifts */ 959/* RQTC Bit Masks and Shifts */
876#define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4) 960#define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4)
@@ -940,8 +1024,8 @@
940#define IXGBE_MSCA_OP_CODE_SHIFT 26 /* OP CODE shift */ 1024#define IXGBE_MSCA_OP_CODE_SHIFT 26 /* OP CODE shift */
941#define IXGBE_MSCA_ADDR_CYCLE 0x00000000 /* OP CODE 00 (addr cycle) */ 1025#define IXGBE_MSCA_ADDR_CYCLE 0x00000000 /* OP CODE 00 (addr cycle) */
942#define IXGBE_MSCA_WRITE 0x04000000 /* OP CODE 01 (write) */ 1026#define IXGBE_MSCA_WRITE 0x04000000 /* OP CODE 01 (write) */
943#define IXGBE_MSCA_READ 0x08000000 /* OP CODE 10 (read) */ 1027#define IXGBE_MSCA_READ 0x0C000000 /* OP CODE 11 (read) */
944#define IXGBE_MSCA_READ_AUTOINC 0x0C000000 /* OP CODE 11 (read, auto inc)*/ 1028#define IXGBE_MSCA_READ_AUTOINC 0x08000000 /* OP CODE 10 (read, auto inc)*/
945#define IXGBE_MSCA_ST_CODE_MASK 0x30000000 /* ST Code mask */ 1029#define IXGBE_MSCA_ST_CODE_MASK 0x30000000 /* ST Code mask */
946#define IXGBE_MSCA_ST_CODE_SHIFT 28 /* ST Code shift */ 1030#define IXGBE_MSCA_ST_CODE_SHIFT 28 /* ST Code shift */
947#define IXGBE_MSCA_NEW_PROTOCOL 0x00000000 /* ST CODE 00 (new protocol) */ 1031#define IXGBE_MSCA_NEW_PROTOCOL 0x00000000 /* ST CODE 00 (new protocol) */
@@ -986,14 +1070,23 @@
986#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */ 1070#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */
987#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */ 1071#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */
988 1072
1073/* MII clause 22/28 definitions */
1074#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */
1075#define IXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */
1076#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/
1077#define IXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/
1078#define IXGBE_MII_AUTONEG_REG 0x0
1079
989#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0 1080#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0
990#define IXGBE_MAX_PHY_ADDR 32 1081#define IXGBE_MAX_PHY_ADDR 32
991 1082
992/* PHY IDs*/ 1083/* PHY IDs*/
993#define TN1010_PHY_ID 0x00A19410 1084#define TN1010_PHY_ID 0x00A19410
994#define TNX_FW_REV 0xB 1085#define TNX_FW_REV 0xB
1086#define X540_PHY_ID 0x01540200
995#define QT2022_PHY_ID 0x0043A400 1087#define QT2022_PHY_ID 0x0043A400
996#define ATH_PHY_ID 0x03429050 1088#define ATH_PHY_ID 0x03429050
1089#define AQ_FW_REV 0x20
997 1090
998/* PHY Types */ 1091/* PHY Types */
999#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0 1092#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0
@@ -1019,6 +1112,7 @@
1019#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */ 1112#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */
1020#define IXGBE_GPIE_EIAME 0x40000000 1113#define IXGBE_GPIE_EIAME 0x40000000
1021#define IXGBE_GPIE_PBA_SUPPORT 0x80000000 1114#define IXGBE_GPIE_PBA_SUPPORT 0x80000000
1115#define IXGBE_GPIE_RSC_DELAY_SHIFT 11
1022#define IXGBE_GPIE_VTMODE_MASK 0x0000C000 /* VT Mode Mask */ 1116#define IXGBE_GPIE_VTMODE_MASK 0x0000C000 /* VT Mode Mask */
1023#define IXGBE_GPIE_VTMODE_16 0x00004000 /* 16 VFs 8 queues per VF */ 1117#define IXGBE_GPIE_VTMODE_16 0x00004000 /* 16 VFs 8 queues per VF */
1024#define IXGBE_GPIE_VTMODE_32 0x00008000 /* 32 VFs 4 queues per VF */ 1118#define IXGBE_GPIE_VTMODE_32 0x00008000 /* 32 VFs 4 queues per VF */
@@ -1253,6 +1347,11 @@
1253#define IXGBE_FTQF_POOL_SHIFT 8 1347#define IXGBE_FTQF_POOL_SHIFT 8
1254#define IXGBE_FTQF_5TUPLE_MASK_MASK 0x0000001F 1348#define IXGBE_FTQF_5TUPLE_MASK_MASK 0x0000001F
1255#define IXGBE_FTQF_5TUPLE_MASK_SHIFT 25 1349#define IXGBE_FTQF_5TUPLE_MASK_SHIFT 25
1350#define IXGBE_FTQF_SOURCE_ADDR_MASK 0x1E
1351#define IXGBE_FTQF_DEST_ADDR_MASK 0x1D
1352#define IXGBE_FTQF_SOURCE_PORT_MASK 0x1B
1353#define IXGBE_FTQF_DEST_PORT_MASK 0x17
1354#define IXGBE_FTQF_PROTOCOL_COMP_MASK 0x0F
1256#define IXGBE_FTQF_POOL_MASK_EN 0x40000000 1355#define IXGBE_FTQF_POOL_MASK_EN 0x40000000
1257#define IXGBE_FTQF_QUEUE_ENABLE 0x80000000 1356#define IXGBE_FTQF_QUEUE_ENABLE 0x80000000
1258 1357
@@ -1295,11 +1394,11 @@
1295 * 1394 *
1296 * Current filters: 1395 * Current filters:
1297 * EAPOL 802.1x (0x888e): Filter 0 1396 * EAPOL 802.1x (0x888e): Filter 0
1298 * BCN (0x8904): Filter 1 1397 * FCoE (0x8906): Filter 2
1299 * 1588 (0x88f7): Filter 3 1398 * 1588 (0x88f7): Filter 3
1399 * FIP (0x8914): Filter 4
1300 */ 1400 */
1301#define IXGBE_ETQF_FILTER_EAPOL 0 1401#define IXGBE_ETQF_FILTER_EAPOL 0
1302#define IXGBE_ETQF_FILTER_BCN 1
1303#define IXGBE_ETQF_FILTER_FCOE 2 1402#define IXGBE_ETQF_FILTER_FCOE 2
1304#define IXGBE_ETQF_FILTER_1588 3 1403#define IXGBE_ETQF_FILTER_1588 3
1305#define IXGBE_ETQF_FILTER_FIP 4 1404#define IXGBE_ETQF_FILTER_FIP 4
@@ -1410,6 +1509,11 @@
1410#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) 1509#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
1411#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) 1510#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
1412 1511
1512#define IXGBE_MACC_FLU 0x00000001
1513#define IXGBE_MACC_FSV_10G 0x00030000
1514#define IXGBE_MACC_FS 0x00040000
1515#define IXGBE_MAC_RX2TX_LPBK 0x00000002
1516
1413/* LINKS Bit Masks */ 1517/* LINKS Bit Masks */
1414#define IXGBE_LINKS_KX_AN_COMP 0x80000000 1518#define IXGBE_LINKS_KX_AN_COMP 0x80000000
1415#define IXGBE_LINKS_UP 0x40000000 1519#define IXGBE_LINKS_UP 0x40000000
@@ -1461,6 +1565,7 @@
1461#define IXGBE_ANLP1_PAUSE 0x0C00 1565#define IXGBE_ANLP1_PAUSE 0x0C00
1462#define IXGBE_ANLP1_SYM_PAUSE 0x0400 1566#define IXGBE_ANLP1_SYM_PAUSE 0x0400
1463#define IXGBE_ANLP1_ASM_PAUSE 0x0800 1567#define IXGBE_ANLP1_ASM_PAUSE 0x0800
1568#define IXGBE_ANLP1_AN_STATE_MASK 0x000f0000
1464 1569
1465/* SW Semaphore Register bitmasks */ 1570/* SW Semaphore Register bitmasks */
1466#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ 1571#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
@@ -1474,6 +1579,10 @@
1474#define IXGBE_GSSR_PHY1_SM 0x0004 1579#define IXGBE_GSSR_PHY1_SM 0x0004
1475#define IXGBE_GSSR_MAC_CSR_SM 0x0008 1580#define IXGBE_GSSR_MAC_CSR_SM 0x0008
1476#define IXGBE_GSSR_FLASH_SM 0x0010 1581#define IXGBE_GSSR_FLASH_SM 0x0010
1582#define IXGBE_GSSR_SW_MNG_SM 0x0400
1583
1584/* FW Status register bitmask */
1585#define IXGBE_FWSTS_FWRI 0x00000200 /* Firmware Reset Indication */
1477 1586
1478/* EEC Register */ 1587/* EEC Register */
1479#define IXGBE_EEC_SK 0x00000001 /* EEPROM Clock */ 1588#define IXGBE_EEC_SK 0x00000001 /* EEPROM Clock */
@@ -1489,21 +1598,29 @@
1489#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */ 1598#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */
1490#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */ 1599#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */
1491#define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */ 1600#define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */
1601#define IXGBE_EEC_SEC1VAL 0x02000000 /* Sector 1 Valid */
1492#define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */ 1602#define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */
1493/* EEPROM Addressing bits based on type (0-small, 1-large) */ 1603/* EEPROM Addressing bits based on type (0-small, 1-large) */
1494#define IXGBE_EEC_ADDR_SIZE 0x00000400 1604#define IXGBE_EEC_ADDR_SIZE 0x00000400
1495#define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */ 1605#define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */
1606#define IXGBE_EERD_MAX_ADDR 0x00003FFF /* EERD alows 14 bits for addr. */
1496 1607
1497#define IXGBE_EEC_SIZE_SHIFT 11 1608#define IXGBE_EEC_SIZE_SHIFT 11
1498#define IXGBE_EEPROM_WORD_SIZE_SHIFT 6 1609#define IXGBE_EEPROM_WORD_SIZE_SHIFT 6
1499#define IXGBE_EEPROM_OPCODE_BITS 8 1610#define IXGBE_EEPROM_OPCODE_BITS 8
1500 1611
1612/* Part Number String Length */
1613#define IXGBE_PBANUM_LENGTH 11
1614
1501/* Checksum and EEPROM pointers */ 1615/* Checksum and EEPROM pointers */
1616#define IXGBE_PBANUM_PTR_GUARD 0xFAFA
1502#define IXGBE_EEPROM_CHECKSUM 0x3F 1617#define IXGBE_EEPROM_CHECKSUM 0x3F
1503#define IXGBE_EEPROM_SUM 0xBABA 1618#define IXGBE_EEPROM_SUM 0xBABA
1504#define IXGBE_PCIE_ANALOG_PTR 0x03 1619#define IXGBE_PCIE_ANALOG_PTR 0x03
1505#define IXGBE_ATLAS0_CONFIG_PTR 0x04 1620#define IXGBE_ATLAS0_CONFIG_PTR 0x04
1621#define IXGBE_PHY_PTR 0x04
1506#define IXGBE_ATLAS1_CONFIG_PTR 0x05 1622#define IXGBE_ATLAS1_CONFIG_PTR 0x05
1623#define IXGBE_OPTION_ROM_PTR 0x05
1507#define IXGBE_PCIE_GENERAL_PTR 0x06 1624#define IXGBE_PCIE_GENERAL_PTR 0x06
1508#define IXGBE_PCIE_CONFIG0_PTR 0x07 1625#define IXGBE_PCIE_CONFIG0_PTR 0x07
1509#define IXGBE_PCIE_CONFIG1_PTR 0x08 1626#define IXGBE_PCIE_CONFIG1_PTR 0x08
@@ -1516,8 +1633,10 @@
1516#define IXGBE_FW_PTR 0x0F 1633#define IXGBE_FW_PTR 0x0F
1517#define IXGBE_PBANUM0_PTR 0x15 1634#define IXGBE_PBANUM0_PTR 0x15
1518#define IXGBE_PBANUM1_PTR 0x16 1635#define IXGBE_PBANUM1_PTR 0x16
1519#define IXGBE_DEVICE_CAPS 0x2C 1636#define IXGBE_FREE_SPACE_PTR 0X3E
1520#define IXGBE_SAN_MAC_ADDR_PTR 0x28 1637#define IXGBE_SAN_MAC_ADDR_PTR 0x28
1638#define IXGBE_DEVICE_CAPS 0x2C
1639#define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11
1521#define IXGBE_PCIE_MSIX_82599_CAPS 0x72 1640#define IXGBE_PCIE_MSIX_82599_CAPS 0x72
1522#define IXGBE_PCIE_MSIX_82598_CAPS 0x62 1641#define IXGBE_PCIE_MSIX_82598_CAPS 0x62
1523 1642
@@ -1554,6 +1673,10 @@
1554 1673
1555#define IXGBE_ETH_LENGTH_OF_ADDRESS 6 1674#define IXGBE_ETH_LENGTH_OF_ADDRESS 6
1556 1675
1676#define IXGBE_EEPROM_PAGE_SIZE_MAX 128
1677#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* EEPROM words # read in burst */
1678#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* EEPROM words # wr in burst */
1679
1557#ifndef IXGBE_EEPROM_GRANT_ATTEMPTS 1680#ifndef IXGBE_EEPROM_GRANT_ATTEMPTS
1558#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ 1681#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */
1559#endif 1682#endif
@@ -1569,14 +1692,25 @@
1569#define IXGBE_FLUDONE_ATTEMPTS 20000 1692#define IXGBE_FLUDONE_ATTEMPTS 20000
1570#endif 1693#endif
1571 1694
1695#define IXGBE_PCIE_CTRL2 0x5 /* PCIe Control 2 Offset */
1696#define IXGBE_PCIE_CTRL2_DUMMY_ENABLE 0x8 /* Dummy Function Enable */
1697#define IXGBE_PCIE_CTRL2_LAN_DISABLE 0x2 /* LAN PCI Disable */
1698#define IXGBE_PCIE_CTRL2_DISABLE_SELECT 0x1 /* LAN Disable Select */
1699
1572#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0 1700#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0
1573#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3 1701#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3
1574#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1 1702#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1
1575#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2 1703#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2
1704#define IXGBE_FW_LESM_PARAMETERS_PTR 0x2
1705#define IXGBE_FW_LESM_STATE_1 0x1
1706#define IXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */
1576#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4 1707#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4
1577#define IXGBE_FW_PATCH_VERSION_4 0x7 1708#define IXGBE_FW_PATCH_VERSION_4 0x7
1578 1709#define IXGBE_FCOE_IBA_CAPS_BLK_PTR 0x33 /* iSCSI/FCOE block */
1579/* Alternative SAN MAC Address Block */ 1710#define IXGBE_FCOE_IBA_CAPS_FCOE 0x20 /* FCOE flags */
1711#define IXGBE_ISCSI_FCOE_BLK_PTR 0x17 /* iSCSI/FCOE block */
1712#define IXGBE_ISCSI_FCOE_FLAGS_OFFSET 0x0 /* FCOE flags */
1713#define IXGBE_ISCSI_FCOE_FLAGS_ENABLE 0x1 /* FCOE flags enable bit */
1580#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x27 /* Alt. SAN MAC block */ 1714#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x27 /* Alt. SAN MAC block */
1581#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt. SAN MAC capability */ 1715#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt. SAN MAC capability */
1582#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt. SAN MAC 0 offset */ 1716#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt. SAN MAC 0 offset */
@@ -1587,6 +1721,8 @@
1587#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt. WWN base exists */ 1721#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt. WWN base exists */
1588 1722
1589/* PCI Bus Info */ 1723/* PCI Bus Info */
1724#define IXGBE_PCI_DEVICE_STATUS 0xAA
1725#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020
1590#define IXGBE_PCI_LINK_STATUS 0xB2 1726#define IXGBE_PCI_LINK_STATUS 0xB2
1591#define IXGBE_PCI_DEVICE_CONTROL2 0xC8 1727#define IXGBE_PCI_DEVICE_CONTROL2 0xC8
1592#define IXGBE_PCI_LINK_WIDTH 0x3F0 1728#define IXGBE_PCI_LINK_WIDTH 0x3F0
@@ -1639,6 +1775,7 @@
1639/* Transmit Config masks */ 1775/* Transmit Config masks */
1640#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */ 1776#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */
1641#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. write-back flushing */ 1777#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. write-back flushing */
1778#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */
1642/* Enable short packet padding to 64 bytes */ 1779/* Enable short packet padding to 64 bytes */
1643#define IXGBE_TX_PAD_ENABLE 0x00000400 1780#define IXGBE_TX_PAD_ENABLE 0x00000400
1644#define IXGBE_JUMBO_FRAME_ENABLE 0x00000004 /* Allow jumbo frames */ 1781#define IXGBE_JUMBO_FRAME_ENABLE 0x00000004 /* Allow jumbo frames */
@@ -1652,6 +1789,8 @@
1652#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */ 1789#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */
1653#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */ 1790#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */
1654#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ 1791#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
1792#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */
1793#define IXGBE_RXDCTL_RLPML_EN 0x00008000
1655#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ 1794#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
1656 1795
1657#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */ 1796#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */
@@ -1668,6 +1807,8 @@
1668#define IXGBE_MFLCN_RPFCE 0x00000004 /* Receive Priority FC Enable */ 1807#define IXGBE_MFLCN_RPFCE 0x00000004 /* Receive Priority FC Enable */
1669#define IXGBE_MFLCN_RFCE 0x00000008 /* Receive FC Enable */ 1808#define IXGBE_MFLCN_RFCE 0x00000008 /* Receive FC Enable */
1670 1809
1810#define IXGBE_MFLCN_RPFCE_SHIFT 4
1811
1671/* Multiple Receive Queue Control */ 1812/* Multiple Receive Queue Control */
1672#define IXGBE_MRQC_RSSEN 0x00000001 /* RSS Enable */ 1813#define IXGBE_MRQC_RSSEN 0x00000001 /* RSS Enable */
1673#define IXGBE_MRQC_MRQE_MASK 0xF /* Bits 3:0 */ 1814#define IXGBE_MRQC_MRQE_MASK 0xF /* Bits 3:0 */
@@ -1808,6 +1949,8 @@
1808#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0 1949#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0
1809#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0 1950#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0
1810#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0 1951#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0
1952#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000
1953#define IXGBE_RXDADV_RSCCNT_SHIFT 17
1811#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5 1954#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5
1812#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000 1955#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000
1813#define IXGBE_RXDADV_SPH 0x8000 1956#define IXGBE_RXDADV_SPH 0x8000
@@ -1883,15 +2026,6 @@
1883#define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600)) 2026#define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600))
1884#define IXGBE_VFLREC(_i) (0x00700 + (_i * 4)) 2027#define IXGBE_VFLREC(_i) (0x00700 + (_i * 4))
1885 2028
1886/* Little Endian defines */
1887#ifndef __le32
1888#define __le32 u32
1889#endif
1890#ifndef __le64
1891#define __le64 u64
1892
1893#endif
1894
1895enum ixgbe_fdir_pballoc_type { 2029enum ixgbe_fdir_pballoc_type {
1896 IXGBE_FDIR_PBALLOC_64K = 0, 2030 IXGBE_FDIR_PBALLOC_64K = 0,
1897 IXGBE_FDIR_PBALLOC_128K, 2031 IXGBE_FDIR_PBALLOC_128K,
@@ -1920,10 +2054,9 @@ enum ixgbe_fdir_pballoc_type {
1920#define IXGBE_FDIRM_VLANID 0x00000001 2054#define IXGBE_FDIRM_VLANID 0x00000001
1921#define IXGBE_FDIRM_VLANP 0x00000002 2055#define IXGBE_FDIRM_VLANP 0x00000002
1922#define IXGBE_FDIRM_POOL 0x00000004 2056#define IXGBE_FDIRM_POOL 0x00000004
1923#define IXGBE_FDIRM_L3P 0x00000008 2057#define IXGBE_FDIRM_L4P 0x00000008
1924#define IXGBE_FDIRM_L4P 0x00000010 2058#define IXGBE_FDIRM_FLEX 0x00000010
1925#define IXGBE_FDIRM_FLEX 0x00000020 2059#define IXGBE_FDIRM_DIPv6 0x00000020
1926#define IXGBE_FDIRM_DIPv6 0x00000040
1927 2060
1928#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF 2061#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF
1929#define IXGBE_FDIRFREE_FREE_SHIFT 0 2062#define IXGBE_FDIRFREE_FREE_SHIFT 0
@@ -1963,6 +2096,7 @@ enum ixgbe_fdir_pballoc_type {
1963#define IXGBE_FDIRCMD_LAST 0x00000800 2096#define IXGBE_FDIRCMD_LAST 0x00000800
1964#define IXGBE_FDIRCMD_COLLISION 0x00001000 2097#define IXGBE_FDIRCMD_COLLISION 0x00001000
1965#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000 2098#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000
2099#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT 5
1966#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16 2100#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16
1967#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24 2101#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24
1968#define IXGBE_FDIR_INIT_DONE_POLL 10 2102#define IXGBE_FDIR_INIT_DONE_POLL 10
@@ -2090,8 +2224,6 @@ typedef u32 ixgbe_link_speed;
2090 IXGBE_LINK_SPEED_1GB_FULL | \ 2224 IXGBE_LINK_SPEED_1GB_FULL | \
2091 IXGBE_LINK_SPEED_10GB_FULL) 2225 IXGBE_LINK_SPEED_10GB_FULL)
2092 2226
2093#define IXGBE_PCIE_DEV_CTRL_2 0xC8
2094#define PCIE_COMPL_TO_VALUE 0x05
2095 2227
2096/* Physical layer type */ 2228/* Physical layer type */
2097typedef u32 ixgbe_physical_layer; 2229typedef u32 ixgbe_physical_layer;
@@ -2111,57 +2243,95 @@ typedef u32 ixgbe_physical_layer;
2111#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000 2243#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000
2112#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000 2244#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000
2113 2245
2246/* Flow Control Macros */
2247#define PAUSE_RTT 8
2248#define PAUSE_MTU(MTU) ((MTU + 1024 - 1) / 1024)
2249
2250#define FC_HIGH_WATER(MTU) ((((PAUSE_RTT + PAUSE_MTU(MTU)) * 144) + 99) / 100 +\
2251 PAUSE_MTU(MTU))
2252#define FC_LOW_WATER(MTU) (2 * (2 * PAUSE_MTU(MTU) + PAUSE_RTT))
2253
2114/* Software ATR hash keys */ 2254/* Software ATR hash keys */
2115#define IXGBE_ATR_BUCKET_HASH_KEY 0xE214AD3D 2255#define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2
2116#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x14364D17 2256#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614
2117
2118/* Software ATR input stream offsets and masks */
2119#define IXGBE_ATR_VLAN_OFFSET 0
2120#define IXGBE_ATR_SRC_IPV6_OFFSET 2
2121#define IXGBE_ATR_SRC_IPV4_OFFSET 14
2122#define IXGBE_ATR_DST_IPV6_OFFSET 18
2123#define IXGBE_ATR_DST_IPV4_OFFSET 30
2124#define IXGBE_ATR_SRC_PORT_OFFSET 34
2125#define IXGBE_ATR_DST_PORT_OFFSET 36
2126#define IXGBE_ATR_FLEX_BYTE_OFFSET 38
2127#define IXGBE_ATR_VM_POOL_OFFSET 40
2128#define IXGBE_ATR_L4TYPE_OFFSET 41
2129 2257
2258/* Software ATR input stream values and masks */
2259#define IXGBE_ATR_HASH_MASK 0x7fff
2130#define IXGBE_ATR_L4TYPE_MASK 0x3 2260#define IXGBE_ATR_L4TYPE_MASK 0x3
2131#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
2132#define IXGBE_ATR_L4TYPE_UDP 0x1 2261#define IXGBE_ATR_L4TYPE_UDP 0x1
2133#define IXGBE_ATR_L4TYPE_TCP 0x2 2262#define IXGBE_ATR_L4TYPE_TCP 0x2
2134#define IXGBE_ATR_L4TYPE_SCTP 0x3 2263#define IXGBE_ATR_L4TYPE_SCTP 0x3
2135#define IXGBE_ATR_HASH_MASK 0x7fff 2264#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
2265enum ixgbe_atr_flow_type {
2266 IXGBE_ATR_FLOW_TYPE_IPV4 = 0x0,
2267 IXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1,
2268 IXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2,
2269 IXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3,
2270 IXGBE_ATR_FLOW_TYPE_IPV6 = 0x4,
2271 IXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5,
2272 IXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6,
2273 IXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7,
2274};
2136 2275
2137/* Flow Director ATR input struct. */ 2276/* Flow Director ATR input struct. */
2138struct ixgbe_atr_input { 2277union ixgbe_atr_input {
2139 /* Byte layout in order, all values with MSB first: 2278 /*
2279 * Byte layout in order, all values with MSB first:
2140 * 2280 *
2281 * vm_pool - 1 byte
2282 * flow_type - 1 byte
2141 * vlan_id - 2 bytes 2283 * vlan_id - 2 bytes
2142 * src_ip - 16 bytes 2284 * src_ip - 16 bytes
2143 * dst_ip - 16 bytes 2285 * dst_ip - 16 bytes
2144 * src_port - 2 bytes 2286 * src_port - 2 bytes
2145 * dst_port - 2 bytes 2287 * dst_port - 2 bytes
2146 * flex_bytes - 2 bytes 2288 * flex_bytes - 2 bytes
2147 * vm_pool - 1 byte 2289 * rsvd0 - 2 bytes - space reserved must be 0.
2148 * l4type - 1 byte
2149 */ 2290 */
2150 u8 byte_stream[42]; 2291 struct {
2292 u8 vm_pool;
2293 u8 flow_type;
2294 __be16 vlan_id;
2295 __be32 dst_ip[4];
2296 __be32 src_ip[4];
2297 __be16 src_port;
2298 __be16 dst_port;
2299 __be16 flex_bytes;
2300 __be16 rsvd0;
2301 } formatted;
2302 __be32 dword_stream[11];
2303};
2304
2305/* Flow Director compressed ATR hash input struct */
2306union ixgbe_atr_hash_dword {
2307 struct {
2308 u8 vm_pool;
2309 u8 flow_type;
2310 __be16 vlan_id;
2311 } formatted;
2312 __be32 ip;
2313 struct {
2314 __be16 src;
2315 __be16 dst;
2316 } port;
2317 __be16 flex_bytes;
2318 __be32 dword;
2151}; 2319};
2152 2320
2153struct ixgbe_atr_input_masks { 2321struct ixgbe_atr_input_masks {
2154 u32 src_ip_mask; 2322 __be16 rsvd0;
2155 u32 dst_ip_mask; 2323 __be16 vlan_id_mask;
2156 u16 src_port_mask; 2324 __be32 dst_ip_mask[4];
2157 u16 dst_port_mask; 2325 __be32 src_ip_mask[4];
2158 u16 vlan_id_mask; 2326 __be16 src_port_mask;
2159 u16 data_mask; 2327 __be16 dst_port_mask;
2328 __be16 flex_mask;
2160}; 2329};
2161 2330
2162enum ixgbe_eeprom_type { 2331enum ixgbe_eeprom_type {
2163 ixgbe_eeprom_uninitialized = 0, 2332 ixgbe_eeprom_uninitialized = 0,
2164 ixgbe_eeprom_spi, 2333 ixgbe_eeprom_spi,
2334 ixgbe_flash,
2165 ixgbe_eeprom_none /* No NVM support */ 2335 ixgbe_eeprom_none /* No NVM support */
2166}; 2336};
2167 2337
@@ -2169,12 +2339,15 @@ enum ixgbe_mac_type {
2169 ixgbe_mac_unknown = 0, 2339 ixgbe_mac_unknown = 0,
2170 ixgbe_mac_82598EB, 2340 ixgbe_mac_82598EB,
2171 ixgbe_mac_82599EB, 2341 ixgbe_mac_82599EB,
2342 ixgbe_mac_X540,
2172 ixgbe_num_macs 2343 ixgbe_num_macs
2173}; 2344};
2174 2345
2175enum ixgbe_phy_type { 2346enum ixgbe_phy_type {
2176 ixgbe_phy_unknown = 0, 2347 ixgbe_phy_unknown = 0,
2348 ixgbe_phy_none,
2177 ixgbe_phy_tn, 2349 ixgbe_phy_tn,
2350 ixgbe_phy_aq,
2178 ixgbe_phy_cu_unknown, 2351 ixgbe_phy_cu_unknown,
2179 ixgbe_phy_qt, 2352 ixgbe_phy_qt,
2180 ixgbe_phy_xaui, 2353 ixgbe_phy_xaui,
@@ -2223,6 +2396,7 @@ enum ixgbe_sfp_type {
2223enum ixgbe_media_type { 2396enum ixgbe_media_type {
2224 ixgbe_media_type_unknown = 0, 2397 ixgbe_media_type_unknown = 0,
2225 ixgbe_media_type_fiber, 2398 ixgbe_media_type_fiber,
2399 ixgbe_media_type_fiber_lco,
2226 ixgbe_media_type_copper, 2400 ixgbe_media_type_copper,
2227 ixgbe_media_type_backplane, 2401 ixgbe_media_type_backplane,
2228 ixgbe_media_type_cx4, 2402 ixgbe_media_type_cx4,
@@ -2261,32 +2435,31 @@ enum ixgbe_bus_type {
2261/* PCI bus speeds */ 2435/* PCI bus speeds */
2262enum ixgbe_bus_speed { 2436enum ixgbe_bus_speed {
2263 ixgbe_bus_speed_unknown = 0, 2437 ixgbe_bus_speed_unknown = 0,
2264 ixgbe_bus_speed_33, 2438 ixgbe_bus_speed_33 = 33,
2265 ixgbe_bus_speed_66, 2439 ixgbe_bus_speed_66 = 66,
2266 ixgbe_bus_speed_100, 2440 ixgbe_bus_speed_100 = 100,
2267 ixgbe_bus_speed_120, 2441 ixgbe_bus_speed_120 = 120,
2268 ixgbe_bus_speed_133, 2442 ixgbe_bus_speed_133 = 133,
2269 ixgbe_bus_speed_2500, 2443 ixgbe_bus_speed_2500 = 2500,
2270 ixgbe_bus_speed_5000, 2444 ixgbe_bus_speed_5000 = 5000,
2271 ixgbe_bus_speed_reserved 2445 ixgbe_bus_speed_reserved
2272}; 2446};
2273 2447
2274/* PCI bus widths */ 2448/* PCI bus widths */
2275enum ixgbe_bus_width { 2449enum ixgbe_bus_width {
2276 ixgbe_bus_width_unknown = 0, 2450 ixgbe_bus_width_unknown = 0,
2277 ixgbe_bus_width_pcie_x1, 2451 ixgbe_bus_width_pcie_x1 = 1,
2278 ixgbe_bus_width_pcie_x2, 2452 ixgbe_bus_width_pcie_x2 = 2,
2279 ixgbe_bus_width_pcie_x4 = 4, 2453 ixgbe_bus_width_pcie_x4 = 4,
2280 ixgbe_bus_width_pcie_x8 = 8, 2454 ixgbe_bus_width_pcie_x8 = 8,
2281 ixgbe_bus_width_32, 2455 ixgbe_bus_width_32 = 32,
2282 ixgbe_bus_width_64, 2456 ixgbe_bus_width_64 = 64,
2283 ixgbe_bus_width_reserved 2457 ixgbe_bus_width_reserved
2284}; 2458};
2285 2459
2286struct ixgbe_addr_filter_info { 2460struct ixgbe_addr_filter_info {
2287 u32 num_mc_addrs; 2461 u32 num_mc_addrs;
2288 u32 rar_used_count; 2462 u32 rar_used_count;
2289 u32 mc_addr_in_rar_count;
2290 u32 mta_in_use; 2463 u32 mta_in_use;
2291 u32 overflow_promisc; 2464 u32 overflow_promisc;
2292 bool uc_set_promisc; 2465 bool uc_set_promisc;
@@ -2387,6 +2560,10 @@ struct ixgbe_hw_stats {
2387 u64 fcoeptc; 2560 u64 fcoeptc;
2388 u64 fcoedwrc; 2561 u64 fcoedwrc;
2389 u64 fcoedwtc; 2562 u64 fcoedwtc;
2563 u64 b2ospc;
2564 u64 b2ogprc;
2565 u64 o2bgptc;
2566 u64 o2bspc;
2390}; 2567};
2391 2568
2392/* forward declaration */ 2569/* forward declaration */
@@ -2400,9 +2577,12 @@ typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
2400struct ixgbe_eeprom_operations { 2577struct ixgbe_eeprom_operations {
2401 s32 (*init_params)(struct ixgbe_hw *); 2578 s32 (*init_params)(struct ixgbe_hw *);
2402 s32 (*read)(struct ixgbe_hw *, u16, u16 *); 2579 s32 (*read)(struct ixgbe_hw *, u16, u16 *);
2580 s32 (*read_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
2403 s32 (*write)(struct ixgbe_hw *, u16, u16); 2581 s32 (*write)(struct ixgbe_hw *, u16, u16);
2582 s32 (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
2404 s32 (*validate_checksum)(struct ixgbe_hw *, u16 *); 2583 s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
2405 s32 (*update_checksum)(struct ixgbe_hw *); 2584 s32 (*update_checksum)(struct ixgbe_hw *);
2585 u16 (*calc_checksum)(struct ixgbe_hw *);
2406}; 2586};
2407 2587
2408struct ixgbe_mac_operations { 2588struct ixgbe_mac_operations {
@@ -2423,6 +2603,8 @@ struct ixgbe_mac_operations {
2423 s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8); 2603 s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
2424 s32 (*setup_sfp)(struct ixgbe_hw *); 2604 s32 (*setup_sfp)(struct ixgbe_hw *);
2425 s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); 2605 s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
2606 s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16);
2607 void (*release_swfw_sync)(struct ixgbe_hw *, u16);
2426 2608
2427 /* Link */ 2609 /* Link */
2428 void (*disable_tx_laser)(struct ixgbe_hw *); 2610 void (*disable_tx_laser)(struct ixgbe_hw *);
@@ -2445,13 +2627,14 @@ struct ixgbe_mac_operations {
2445 s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32); 2627 s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
2446 s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32); 2628 s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
2447 s32 (*init_rx_addrs)(struct ixgbe_hw *); 2629 s32 (*init_rx_addrs)(struct ixgbe_hw *);
2448 s32 (*update_uc_addr_list)(struct ixgbe_hw *, struct net_device *);
2449 s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *); 2630 s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
2450 s32 (*enable_mc)(struct ixgbe_hw *); 2631 s32 (*enable_mc)(struct ixgbe_hw *);
2451 s32 (*disable_mc)(struct ixgbe_hw *); 2632 s32 (*disable_mc)(struct ixgbe_hw *);
2452 s32 (*clear_vfta)(struct ixgbe_hw *); 2633 s32 (*clear_vfta)(struct ixgbe_hw *);
2453 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool); 2634 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
2454 s32 (*init_uta_tables)(struct ixgbe_hw *); 2635 s32 (*init_uta_tables)(struct ixgbe_hw *);
2636 void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int);
2637 void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int);
2455 2638
2456 /* Flow Control */ 2639 /* Flow Control */
2457 s32 (*fc_enable)(struct ixgbe_hw *, s32); 2640 s32 (*fc_enable)(struct ixgbe_hw *, s32);
@@ -2482,8 +2665,10 @@ struct ixgbe_eeprom_info {
2482 u32 semaphore_delay; 2665 u32 semaphore_delay;
2483 u16 word_size; 2666 u16 word_size;
2484 u16 address_bits; 2667 u16 address_bits;
2668 u16 word_page_size;
2485}; 2669};
2486 2670
2671#define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
2487struct ixgbe_mac_info { 2672struct ixgbe_mac_info {
2488 struct ixgbe_mac_operations ops; 2673 struct ixgbe_mac_operations ops;
2489 enum ixgbe_mac_type type; 2674 enum ixgbe_mac_type type;
@@ -2494,11 +2679,14 @@ struct ixgbe_mac_info {
2494 u16 wwnn_prefix; 2679 u16 wwnn_prefix;
2495 /* prefix for World Wide Port Name (WWPN) */ 2680 /* prefix for World Wide Port Name (WWPN) */
2496 u16 wwpn_prefix; 2681 u16 wwpn_prefix;
2682#define IXGBE_MAX_MTA 128
2683 u32 mta_shadow[IXGBE_MAX_MTA];
2497 s32 mc_filter_type; 2684 s32 mc_filter_type;
2498 u32 mcft_size; 2685 u32 mcft_size;
2499 u32 vft_size; 2686 u32 vft_size;
2500 u32 num_rar_entries; 2687 u32 num_rar_entries;
2501 u32 rar_highwater; 2688 u32 rar_highwater;
2689 u32 rx_pb_size;
2502 u32 max_tx_queues; 2690 u32 max_tx_queues;
2503 u32 max_rx_queues; 2691 u32 max_rx_queues;
2504 u32 max_msix_vectors; 2692 u32 max_msix_vectors;
@@ -2506,6 +2694,7 @@ struct ixgbe_mac_info {
2506 u32 orig_autoc2; 2694 u32 orig_autoc2;
2507 bool orig_link_settings_stored; 2695 bool orig_link_settings_stored;
2508 bool autotry_restart; 2696 bool autotry_restart;
2697 u8 flags;
2509}; 2698};
2510 2699
2511struct ixgbe_phy_info { 2700struct ixgbe_phy_info {
@@ -2572,6 +2761,7 @@ struct ixgbe_hw {
2572 u16 subsystem_vendor_id; 2761 u16 subsystem_vendor_id;
2573 u8 revision_id; 2762 u8 revision_id;
2574 bool adapter_stopped; 2763 bool adapter_stopped;
2764 bool force_full_reset;
2575}; 2765};
2576 2766
2577struct ixgbe_info { 2767struct ixgbe_info {
@@ -2611,7 +2801,12 @@ struct ixgbe_info {
2611#define IXGBE_ERR_EEPROM_VERSION -24 2801#define IXGBE_ERR_EEPROM_VERSION -24
2612#define IXGBE_ERR_NO_SPACE -25 2802#define IXGBE_ERR_NO_SPACE -25
2613#define IXGBE_ERR_OVERTEMP -26 2803#define IXGBE_ERR_OVERTEMP -26
2614#define IXGBE_ERR_RAR_INDEX -27 2804#define IXGBE_ERR_FC_NOT_NEGOTIATED -27
2805#define IXGBE_ERR_FC_NOT_SUPPORTED -28
2806#define IXGBE_ERR_FLOW_CONTROL -29
2807#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30
2808#define IXGBE_ERR_PBA_SECTION -31
2809#define IXGBE_ERR_INVALID_ARGUMENT -32
2615#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF 2810#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
2616 2811
2617#endif /* _IXGBE_TYPE_H_ */ 2812#endif /* _IXGBE_TYPE_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ixgbe/ixgbe_x540.c
new file mode 100644
index 000000000000..4ed687be2fe3
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_x540.c
@@ -0,0 +1,938 @@
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/pci.h>
29#include <linux/delay.h>
30#include <linux/sched.h>
31
32#include "ixgbe.h"
33#include "ixgbe_phy.h"
34
35#define IXGBE_X540_MAX_TX_QUEUES 128
36#define IXGBE_X540_MAX_RX_QUEUES 128
37#define IXGBE_X540_RAR_ENTRIES 128
38#define IXGBE_X540_MC_TBL_SIZE 128
39#define IXGBE_X540_VFT_TBL_SIZE 128
40#define IXGBE_X540_RX_PB_SIZE 384
41
42static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
43static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
44static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
45static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
46static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
47static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw);
48
49static enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
50{
51 return ixgbe_media_type_copper;
52}
53
54static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
55{
56 struct ixgbe_mac_info *mac = &hw->mac;
57
58 /* Call PHY identify routine to get the phy type */
59 ixgbe_identify_phy_generic(hw);
60
61 mac->mcft_size = IXGBE_X540_MC_TBL_SIZE;
62 mac->vft_size = IXGBE_X540_VFT_TBL_SIZE;
63 mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES;
64 mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES;
65 mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES;
66 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
67
68 return 0;
69}
70
71/**
72 * ixgbe_setup_mac_link_X540 - Set the auto advertised capabilitires
73 * @hw: pointer to hardware structure
74 * @speed: new link speed
75 * @autoneg: true if autonegotiation enabled
76 * @autoneg_wait_to_complete: true when waiting for completion is needed
77 **/
78static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
79 ixgbe_link_speed speed, bool autoneg,
80 bool autoneg_wait_to_complete)
81{
82 return hw->phy.ops.setup_link_speed(hw, speed, autoneg,
83 autoneg_wait_to_complete);
84}
85
86/**
87 * ixgbe_reset_hw_X540 - Perform hardware reset
88 * @hw: pointer to hardware structure
89 *
90 * Resets the hardware by resetting the transmit and receive units, masks
91 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
92 * reset.
93 **/
94static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
95{
96 ixgbe_link_speed link_speed;
97 s32 status = 0;
98 u32 ctrl;
99 u32 ctrl_ext;
100 u32 reset_bit;
101 u32 i;
102 u32 autoc;
103 u32 autoc2;
104 bool link_up = false;
105
106 /* Call adapter stop to disable tx/rx and clear interrupts */
107 hw->mac.ops.stop_adapter(hw);
108
109 /*
110 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
111 * access and verify no pending requests before reset
112 */
113 ixgbe_disable_pcie_master(hw);
114
115mac_reset_top:
116 /*
117 * Issue global reset to the MAC. Needs to be SW reset if link is up.
118 * If link reset is used when link is up, it might reset the PHY when
119 * mng is using it. If link is down or the flag to force full link
120 * reset is set, then perform link reset.
121 */
122 if (hw->force_full_reset) {
123 reset_bit = IXGBE_CTRL_LNK_RST;
124 } else {
125 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
126 if (!link_up)
127 reset_bit = IXGBE_CTRL_LNK_RST;
128 else
129 reset_bit = IXGBE_CTRL_RST;
130 }
131
132 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
133 IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | reset_bit));
134 IXGBE_WRITE_FLUSH(hw);
135
136 /* Poll for reset bit to self-clear indicating reset is complete */
137 for (i = 0; i < 10; i++) {
138 udelay(1);
139 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
140 if (!(ctrl & reset_bit))
141 break;
142 }
143 if (ctrl & reset_bit) {
144 status = IXGBE_ERR_RESET_FAILED;
145 hw_dbg(hw, "Reset polling failed to complete.\n");
146 }
147
148 /*
149 * Double resets are required for recovery from certain error
150 * conditions. Between resets, it is necessary to stall to allow time
151 * for any pending HW events to complete. We use 1usec since that is
152 * what is needed for ixgbe_disable_pcie_master(). The second reset
153 * then clears out any effects of those events.
154 */
155 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
156 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
157 udelay(1);
158 goto mac_reset_top;
159 }
160
161 /* Clear PF Reset Done bit so PF/VF Mail Ops can work */
162 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
163 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
164 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
165
166 msleep(50);
167
168 /* Set the Rx packet buffer size. */
169 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT);
170
171 /* Store the permanent mac address */
172 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
173
174 /*
175 * Store the original AUTOC/AUTOC2 values if they have not been
176 * stored off yet. Otherwise restore the stored original
177 * values since the reset operation sets back to defaults.
178 */
179 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
180 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
181 if (hw->mac.orig_link_settings_stored == false) {
182 hw->mac.orig_autoc = autoc;
183 hw->mac.orig_autoc2 = autoc2;
184 hw->mac.orig_link_settings_stored = true;
185 } else {
186 if (autoc != hw->mac.orig_autoc)
187 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
188 IXGBE_AUTOC_AN_RESTART));
189
190 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
191 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
192 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
193 autoc2 |= (hw->mac.orig_autoc2 &
194 IXGBE_AUTOC2_UPPER_MASK);
195 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
196 }
197 }
198
199 /*
200 * Store MAC address from RAR0, clear receive address registers, and
201 * clear the multicast table. Also reset num_rar_entries to 128,
202 * since we modify this value when programming the SAN MAC address.
203 */
204 hw->mac.num_rar_entries = IXGBE_X540_MAX_TX_QUEUES;
205 hw->mac.ops.init_rx_addrs(hw);
206
207 /* Store the permanent mac address */
208 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
209
210 /* Store the permanent SAN mac address */
211 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
212
213 /* Add the SAN MAC address to the RAR only if it's a valid address */
214 if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
215 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
216 hw->mac.san_addr, 0, IXGBE_RAH_AV);
217
218 /* Reserve the last RAR for the SAN MAC address */
219 hw->mac.num_rar_entries--;
220 }
221
222 /* Store the alternative WWNN/WWPN prefix */
223 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
224 &hw->mac.wwpn_prefix);
225
226 return status;
227}
228
229/**
230 * ixgbe_start_hw_X540 - Prepare hardware for Tx/Rx
231 * @hw: pointer to hardware structure
232 *
233 * Starts the hardware using the generic start_hw function
234 * and the generation start_hw function.
235 * Then performs revision-specific operations, if any.
236 **/
237static s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
238{
239 s32 ret_val = 0;
240
241 ret_val = ixgbe_start_hw_generic(hw);
242 if (ret_val != 0)
243 goto out;
244
245 ret_val = ixgbe_start_hw_gen2(hw);
246 hw->mac.rx_pb_size = IXGBE_X540_RX_PB_SIZE;
247out:
248 return ret_val;
249}
250
251/**
252 * ixgbe_get_supported_physical_layer_X540 - Returns physical layer type
253 * @hw: pointer to hardware structure
254 *
255 * Determines physical layer capabilities of the current configuration.
256 **/
257static u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw)
258{
259 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
260 u16 ext_ability = 0;
261
262 hw->phy.ops.identify(hw);
263
264 hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD,
265 &ext_ability);
266 if (ext_ability & MDIO_PMA_EXTABLE_10GBT)
267 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
268 if (ext_ability & MDIO_PMA_EXTABLE_1000BT)
269 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
270 if (ext_ability & MDIO_PMA_EXTABLE_100BTX)
271 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
272
273 return physical_layer;
274}
275
276/**
277 * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params
278 * @hw: pointer to hardware structure
279 *
280 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
281 * ixgbe_hw struct in order to set up EEPROM access.
282 **/
283static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
284{
285 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
286 u32 eec;
287 u16 eeprom_size;
288
289 if (eeprom->type == ixgbe_eeprom_uninitialized) {
290 eeprom->semaphore_delay = 10;
291 eeprom->type = ixgbe_flash;
292
293 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
294 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
295 IXGBE_EEC_SIZE_SHIFT);
296 eeprom->word_size = 1 << (eeprom_size +
297 IXGBE_EEPROM_WORD_SIZE_SHIFT);
298
299 hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
300 eeprom->type, eeprom->word_size);
301 }
302
303 return 0;
304}
305
306/**
307 * ixgbe_read_eerd_X540- Read EEPROM word using EERD
308 * @hw: pointer to hardware structure
309 * @offset: offset of word in the EEPROM to read
310 * @data: word read from the EEPROM
311 *
312 * Reads a 16 bit word from the EEPROM using the EERD register.
313 **/
314static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
315{
316 s32 status = 0;
317
318 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
319 0)
320 status = ixgbe_read_eerd_generic(hw, offset, data);
321 else
322 status = IXGBE_ERR_SWFW_SYNC;
323
324 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
325 return status;
326}
327
328/**
329 * ixgbe_read_eerd_buffer_X540 - Read EEPROM word(s) using EERD
330 * @hw: pointer to hardware structure
331 * @offset: offset of word in the EEPROM to read
332 * @words: number of words
333 * @data: word(s) read from the EEPROM
334 *
335 * Reads a 16 bit word(s) from the EEPROM using the EERD register.
336 **/
337static s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
338 u16 offset, u16 words, u16 *data)
339{
340 s32 status = 0;
341
342 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
343 0)
344 status = ixgbe_read_eerd_buffer_generic(hw, offset,
345 words, data);
346 else
347 status = IXGBE_ERR_SWFW_SYNC;
348
349 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
350 return status;
351}
352
353/**
354 * ixgbe_write_eewr_X540 - Write EEPROM word using EEWR
355 * @hw: pointer to hardware structure
356 * @offset: offset of word in the EEPROM to write
357 * @data: word write to the EEPROM
358 *
359 * Write a 16 bit word to the EEPROM using the EEWR register.
360 **/
361static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
362{
363 s32 status = 0;
364
365 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0)
366 status = ixgbe_write_eewr_generic(hw, offset, data);
367 else
368 status = IXGBE_ERR_SWFW_SYNC;
369
370 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
371 return status;
372}
373
374/**
375 * ixgbe_write_eewr_buffer_X540 - Write EEPROM word(s) using EEWR
376 * @hw: pointer to hardware structure
377 * @offset: offset of word in the EEPROM to write
378 * @words: number of words
379 * @data: word(s) write to the EEPROM
380 *
381 * Write a 16 bit word(s) to the EEPROM using the EEWR register.
382 **/
383static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
384 u16 offset, u16 words, u16 *data)
385{
386 s32 status = 0;
387
388 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
389 0)
390 status = ixgbe_write_eewr_buffer_generic(hw, offset,
391 words, data);
392 else
393 status = IXGBE_ERR_SWFW_SYNC;
394
395 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
396 return status;
397}
398
399/**
400 * ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum
401 *
402 * This function does not use synchronization for EERD and EEWR. It can
403 * be used internally by function which utilize ixgbe_acquire_swfw_sync_X540.
404 *
405 * @hw: pointer to hardware structure
406 **/
407static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
408{
409 u16 i;
410 u16 j;
411 u16 checksum = 0;
412 u16 length = 0;
413 u16 pointer = 0;
414 u16 word = 0;
415
416 /*
417 * Do not use hw->eeprom.ops.read because we do not want to take
418 * the synchronization semaphores here. Instead use
419 * ixgbe_read_eerd_generic
420 */
421
422 /* Include 0x0-0x3F in the checksum */
423 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
424 if (ixgbe_read_eerd_generic(hw, i, &word) != 0) {
425 hw_dbg(hw, "EEPROM read failed\n");
426 break;
427 }
428 checksum += word;
429 }
430
431 /*
432 * Include all data from pointers 0x3, 0x6-0xE. This excludes the
433 * FW, PHY module, and PCIe Expansion/Option ROM pointers.
434 */
435 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
436 if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
437 continue;
438
439 if (ixgbe_read_eerd_generic(hw, i, &pointer) != 0) {
440 hw_dbg(hw, "EEPROM read failed\n");
441 break;
442 }
443
444 /* Skip pointer section if the pointer is invalid. */
445 if (pointer == 0xFFFF || pointer == 0 ||
446 pointer >= hw->eeprom.word_size)
447 continue;
448
449 if (ixgbe_read_eerd_generic(hw, pointer, &length) != 0) {
450 hw_dbg(hw, "EEPROM read failed\n");
451 break;
452 }
453
454 /* Skip pointer section if length is invalid. */
455 if (length == 0xFFFF || length == 0 ||
456 (pointer + length) >= hw->eeprom.word_size)
457 continue;
458
459 for (j = pointer+1; j <= pointer+length; j++) {
460 if (ixgbe_read_eerd_generic(hw, j, &word) != 0) {
461 hw_dbg(hw, "EEPROM read failed\n");
462 break;
463 }
464 checksum += word;
465 }
466 }
467
468 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
469
470 return checksum;
471}
472
473/**
474 * ixgbe_validate_eeprom_checksum_X540 - Validate EEPROM checksum
475 * @hw: pointer to hardware structure
476 * @checksum_val: calculated checksum
477 *
478 * Performs checksum calculation and validates the EEPROM checksum. If the
479 * caller does not need checksum_val, the value can be NULL.
480 **/
481static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
482 u16 *checksum_val)
483{
484 s32 status;
485 u16 checksum;
486 u16 read_checksum = 0;
487
488 /*
489 * Read the first word from the EEPROM. If this times out or fails, do
490 * not continue or we could be in for a very long wait while every
491 * EEPROM read fails
492 */
493 status = hw->eeprom.ops.read(hw, 0, &checksum);
494
495 if (status != 0) {
496 hw_dbg(hw, "EEPROM read failed\n");
497 goto out;
498 }
499
500 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
501 checksum = hw->eeprom.ops.calc_checksum(hw);
502
503 /*
504 * Do not use hw->eeprom.ops.read because we do not want to take
505 * the synchronization semaphores twice here.
506 */
507 ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM,
508 &read_checksum);
509
510 /*
511 * Verify read checksum from EEPROM is the same as
512 * calculated checksum
513 */
514 if (read_checksum != checksum)
515 status = IXGBE_ERR_EEPROM_CHECKSUM;
516
517 /* If the user cares, return the calculated checksum */
518 if (checksum_val)
519 *checksum_val = checksum;
520 } else {
521 status = IXGBE_ERR_SWFW_SYNC;
522 }
523
524 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
525out:
526 return status;
527}
528
529/**
530 * ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash
531 * @hw: pointer to hardware structure
532 *
533 * After writing EEPROM to shadow RAM using EEWR register, software calculates
534 * checksum and updates the EEPROM and instructs the hardware to update
535 * the flash.
536 **/
537static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
538{
539 s32 status;
540 u16 checksum;
541
542 /*
543 * Read the first word from the EEPROM. If this times out or fails, do
544 * not continue or we could be in for a very long wait while every
545 * EEPROM read fails
546 */
547 status = hw->eeprom.ops.read(hw, 0, &checksum);
548
549 if (status != 0)
550 hw_dbg(hw, "EEPROM read failed\n");
551
552 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
553 checksum = hw->eeprom.ops.calc_checksum(hw);
554
555 /*
556 * Do not use hw->eeprom.ops.write because we do not want to
557 * take the synchronization semaphores twice here.
558 */
559 status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM,
560 checksum);
561
562 if (status == 0)
563 status = ixgbe_update_flash_X540(hw);
564 else
565 status = IXGBE_ERR_SWFW_SYNC;
566 }
567
568 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
569
570 return status;
571}
572
573/**
574 * ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device
575 * @hw: pointer to hardware structure
576 *
577 * Set FLUP (bit 23) of the EEC register to instruct Hardware to copy
578 * EEPROM from shadow RAM to the flash device.
579 **/
580static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
581{
582 u32 flup;
583 s32 status = IXGBE_ERR_EEPROM;
584
585 status = ixgbe_poll_flash_update_done_X540(hw);
586 if (status == IXGBE_ERR_EEPROM) {
587 hw_dbg(hw, "Flash update time out\n");
588 goto out;
589 }
590
591 flup = IXGBE_READ_REG(hw, IXGBE_EEC) | IXGBE_EEC_FLUP;
592 IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
593
594 status = ixgbe_poll_flash_update_done_X540(hw);
595 if (status == 0)
596 hw_dbg(hw, "Flash update complete\n");
597 else
598 hw_dbg(hw, "Flash update time out\n");
599
600 if (hw->revision_id == 0) {
601 flup = IXGBE_READ_REG(hw, IXGBE_EEC);
602
603 if (flup & IXGBE_EEC_SEC1VAL) {
604 flup |= IXGBE_EEC_FLUP;
605 IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
606 }
607
608 status = ixgbe_poll_flash_update_done_X540(hw);
609 if (status == 0)
610 hw_dbg(hw, "Flash update complete\n");
611 else
612 hw_dbg(hw, "Flash update time out\n");
613 }
614out:
615 return status;
616}
617
618/**
619 * ixgbe_poll_flash_update_done_X540 - Poll flash update status
620 * @hw: pointer to hardware structure
621 *
622 * Polls the FLUDONE (bit 26) of the EEC Register to determine when the
623 * flash update is done.
624 **/
625static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
626{
627 u32 i;
628 u32 reg;
629 s32 status = IXGBE_ERR_EEPROM;
630
631 for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) {
632 reg = IXGBE_READ_REG(hw, IXGBE_EEC);
633 if (reg & IXGBE_EEC_FLUDONE) {
634 status = 0;
635 break;
636 }
637 udelay(5);
638 }
639 return status;
640}
641
642/**
643 * ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore
644 * @hw: pointer to hardware structure
645 * @mask: Mask to specify which semaphore to acquire
646 *
647 * Acquires the SWFW semaphore thought the SW_FW_SYNC register for
648 * the specified function (CSR, PHY0, PHY1, NVM, Flash)
649 **/
650static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
651{
652 u32 swfw_sync;
653 u32 swmask = mask;
654 u32 fwmask = mask << 5;
655 u32 hwmask = 0;
656 u32 timeout = 200;
657 u32 i;
658
659 if (swmask == IXGBE_GSSR_EEP_SM)
660 hwmask = IXGBE_GSSR_FLASH_SM;
661
662 for (i = 0; i < timeout; i++) {
663 /*
664 * SW NVM semaphore bit is used for access to all
665 * SW_FW_SYNC bits (not just NVM)
666 */
667 if (ixgbe_get_swfw_sync_semaphore(hw))
668 return IXGBE_ERR_SWFW_SYNC;
669
670 swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
671 if (!(swfw_sync & (fwmask | swmask | hwmask))) {
672 swfw_sync |= swmask;
673 IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
674 ixgbe_release_swfw_sync_semaphore(hw);
675 break;
676 } else {
677 /*
678 * Firmware currently using resource (fwmask),
679 * hardware currently using resource (hwmask),
680 * or other software thread currently using
681 * resource (swmask)
682 */
683 ixgbe_release_swfw_sync_semaphore(hw);
684 usleep_range(5000, 10000);
685 }
686 }
687
688 /*
689 * If the resource is not released by the FW/HW the SW can assume that
690 * the FW/HW malfunctions. In that case the SW should sets the
691 * SW bit(s) of the requested resource(s) while ignoring the
692 * corresponding FW/HW bits in the SW_FW_SYNC register.
693 */
694 if (i >= timeout) {
695 swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
696 if (swfw_sync & (fwmask | hwmask)) {
697 if (ixgbe_get_swfw_sync_semaphore(hw))
698 return IXGBE_ERR_SWFW_SYNC;
699
700 swfw_sync |= swmask;
701 IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
702 ixgbe_release_swfw_sync_semaphore(hw);
703 }
704 }
705
706 usleep_range(5000, 10000);
707 return 0;
708}
709
710/**
711 * ixgbe_release_swfw_sync_X540 - Release SWFW semaphore
712 * @hw: pointer to hardware structure
713 * @mask: Mask to specify which semaphore to release
714 *
715 * Releases the SWFW semaphore through the SW_FW_SYNC register
716 * for the specified function (CSR, PHY0, PHY1, EVM, Flash)
717 **/
718static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
719{
720 u32 swfw_sync;
721 u32 swmask = mask;
722
723 ixgbe_get_swfw_sync_semaphore(hw);
724
725 swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
726 swfw_sync &= ~swmask;
727 IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
728
729 ixgbe_release_swfw_sync_semaphore(hw);
730 usleep_range(5000, 10000);
731}
732
733/**
734 * ixgbe_get_nvm_semaphore - Get hardware semaphore
735 * @hw: pointer to hardware structure
736 *
737 * Sets the hardware semaphores so SW/FW can gain control of shared resources
738 **/
739static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
740{
741 s32 status = IXGBE_ERR_EEPROM;
742 u32 timeout = 2000;
743 u32 i;
744 u32 swsm;
745
746 /* Get SMBI software semaphore between device drivers first */
747 for (i = 0; i < timeout; i++) {
748 /*
749 * If the SMBI bit is 0 when we read it, then the bit will be
750 * set and we have the semaphore
751 */
752 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
753 if (!(swsm & IXGBE_SWSM_SMBI)) {
754 status = 0;
755 break;
756 }
757 udelay(50);
758 }
759
760 /* Now get the semaphore between SW/FW through the REGSMP bit */
761 if (status) {
762 for (i = 0; i < timeout; i++) {
763 swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
764 if (!(swsm & IXGBE_SWFW_REGSMP))
765 break;
766
767 udelay(50);
768 }
769 } else {
770 hw_dbg(hw, "Software semaphore SMBI between device drivers "
771 "not granted.\n");
772 }
773
774 return status;
775}
776
777/**
778 * ixgbe_release_nvm_semaphore - Release hardware semaphore
779 * @hw: pointer to hardware structure
780 *
781 * This function clears hardware semaphore bits.
782 **/
783static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
784{
785 u32 swsm;
786
787 /* Release both semaphores by writing 0 to the bits REGSMP and SMBI */
788
789 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
790 swsm &= ~IXGBE_SWSM_SMBI;
791 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
792
793 swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
794 swsm &= ~IXGBE_SWFW_REGSMP;
795 IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swsm);
796
797 IXGBE_WRITE_FLUSH(hw);
798}
799
800/**
801 * ixgbe_blink_led_start_X540 - Blink LED based on index.
802 * @hw: pointer to hardware structure
803 * @index: led number to blink
804 *
805 * Devices that implement the version 2 interface:
806 * X540
807 **/
808static s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
809{
810 u32 macc_reg;
811 u32 ledctl_reg;
812
813 /*
814 * In order for the blink bit in the LED control register
815 * to work, link and speed must be forced in the MAC. We
816 * will reverse this when we stop the blinking.
817 */
818 macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
819 macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS;
820 IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
821
822 /* Set the LED to LINK_UP + BLINK. */
823 ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
824 ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
825 ledctl_reg |= IXGBE_LED_BLINK(index);
826 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg);
827 IXGBE_WRITE_FLUSH(hw);
828
829 return 0;
830}
831
832/**
833 * ixgbe_blink_led_stop_X540 - Stop blinking LED based on index.
834 * @hw: pointer to hardware structure
835 * @index: led number to stop blinking
836 *
837 * Devices that implement the version 2 interface:
838 * X540
839 **/
840static s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
841{
842 u32 macc_reg;
843 u32 ledctl_reg;
844
845 /* Restore the LED to its default value. */
846 ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
847 ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
848 ledctl_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
849 ledctl_reg &= ~IXGBE_LED_BLINK(index);
850 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg);
851
852 /* Unforce link and speed in the MAC. */
853 macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
854 macc_reg &= ~(IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS);
855 IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
856 IXGBE_WRITE_FLUSH(hw);
857
858 return 0;
859}
860static struct ixgbe_mac_operations mac_ops_X540 = {
861 .init_hw = &ixgbe_init_hw_generic,
862 .reset_hw = &ixgbe_reset_hw_X540,
863 .start_hw = &ixgbe_start_hw_X540,
864 .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic,
865 .get_media_type = &ixgbe_get_media_type_X540,
866 .get_supported_physical_layer =
867 &ixgbe_get_supported_physical_layer_X540,
868 .enable_rx_dma = &ixgbe_enable_rx_dma_generic,
869 .get_mac_addr = &ixgbe_get_mac_addr_generic,
870 .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
871 .get_device_caps = &ixgbe_get_device_caps_generic,
872 .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic,
873 .stop_adapter = &ixgbe_stop_adapter_generic,
874 .get_bus_info = &ixgbe_get_bus_info_generic,
875 .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie,
876 .read_analog_reg8 = NULL,
877 .write_analog_reg8 = NULL,
878 .setup_link = &ixgbe_setup_mac_link_X540,
879 .check_link = &ixgbe_check_mac_link_generic,
880 .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic,
881 .led_on = &ixgbe_led_on_generic,
882 .led_off = &ixgbe_led_off_generic,
883 .blink_led_start = &ixgbe_blink_led_start_X540,
884 .blink_led_stop = &ixgbe_blink_led_stop_X540,
885 .set_rar = &ixgbe_set_rar_generic,
886 .clear_rar = &ixgbe_clear_rar_generic,
887 .set_vmdq = &ixgbe_set_vmdq_generic,
888 .clear_vmdq = &ixgbe_clear_vmdq_generic,
889 .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
890 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
891 .enable_mc = &ixgbe_enable_mc_generic,
892 .disable_mc = &ixgbe_disable_mc_generic,
893 .clear_vfta = &ixgbe_clear_vfta_generic,
894 .set_vfta = &ixgbe_set_vfta_generic,
895 .fc_enable = &ixgbe_fc_enable_generic,
896 .init_uta_tables = &ixgbe_init_uta_tables_generic,
897 .setup_sfp = NULL,
898 .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing,
899 .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
900 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540,
901 .release_swfw_sync = &ixgbe_release_swfw_sync_X540,
902};
903
904static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
905 .init_params = &ixgbe_init_eeprom_params_X540,
906 .read = &ixgbe_read_eerd_X540,
907 .read_buffer = &ixgbe_read_eerd_buffer_X540,
908 .write = &ixgbe_write_eewr_X540,
909 .write_buffer = &ixgbe_write_eewr_buffer_X540,
910 .calc_checksum = &ixgbe_calc_eeprom_checksum_X540,
911 .validate_checksum = &ixgbe_validate_eeprom_checksum_X540,
912 .update_checksum = &ixgbe_update_eeprom_checksum_X540,
913};
914
915static struct ixgbe_phy_operations phy_ops_X540 = {
916 .identify = &ixgbe_identify_phy_generic,
917 .identify_sfp = &ixgbe_identify_sfp_module_generic,
918 .init = NULL,
919 .reset = NULL,
920 .read_reg = &ixgbe_read_phy_reg_generic,
921 .write_reg = &ixgbe_write_phy_reg_generic,
922 .setup_link = &ixgbe_setup_phy_link_generic,
923 .setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
924 .read_i2c_byte = &ixgbe_read_i2c_byte_generic,
925 .write_i2c_byte = &ixgbe_write_i2c_byte_generic,
926 .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic,
927 .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic,
928 .check_overtemp = &ixgbe_tn_check_overtemp,
929};
930
931struct ixgbe_info ixgbe_X540_info = {
932 .mac = ixgbe_mac_X540,
933 .get_invariants = &ixgbe_get_invariants_X540,
934 .mac_ops = &mac_ops_X540,
935 .eeprom_ops = &eeprom_ops_X540,
936 .phy_ops = &phy_ops_X540,
937 .mbx_ops = &mbx_ops_generic,
938};