aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPJ Waskiewicz <peter.p.waskiewicz.jr@intel.com>2009-02-27 10:45:05 -0500
committerDavid S. Miller <davem@davemloft.net>2009-03-01 03:24:36 -0500
commite8e26350f114fa212e277ea02332d9347c59865d (patch)
treee5356ae1e62587c5fd128800c361fb1bf873772c
parent235ea828a1640ed493562a5fe08aa666ff84fbc4 (diff)
ixgbe: Add 82599 device id's, hook it up into the main driver.
With the hardware-specific code in place, add all supported device id's, along with base driver changes to enable 82599 devices. The devices being enabled are: 8086:10f7: 82599EB 10 Gigabit KX4 Network Connection 8086:10fb: 82599EB 10 Gigabit Network Connection The device 8086:10fb is a fully-pluggable SFP+ NIC. Signed-off-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ixgbe/Makefile5
-rw-r--r--drivers/net/ixgbe/ixgbe.h29
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c1
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c659
4 files changed, 559 insertions, 135 deletions
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index f6061950f5d1..b3f8208ec7be 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -33,6 +33,7 @@
33obj-$(CONFIG_IXGBE) += ixgbe.o 33obj-$(CONFIG_IXGBE) += ixgbe.o
34 34
35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ 35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
36 ixgbe_82598.o ixgbe_phy.o 36 ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o
37 37
38ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o ixgbe_dcb_nl.o 38ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
39 ixgbe_dcb_82599.o ixgbe_dcb_nl.o
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 2d877da143cf..0b54717f707d 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -140,9 +140,9 @@ struct ixgbe_ring {
140 int cpu; 140 int cpu;
141#endif 141#endif
142 struct ixgbe_queue_stats stats; 142 struct ixgbe_queue_stats stats;
143 u16 v_idx; /* maps directly to the index for this ring in the hardware 143 u64 v_idx; /* maps directly to the index for this ring in the hardware
144 * vector array, can also be used for finding the bit in EICR 144 * vector array, can also be used for finding the bit in EICR
145 * and friends that represents the vector for this ring */ 145 * and friends that represents the vector for this ring */
146 146
147 147
148 u16 work_limit; /* max work per interrupt */ 148 u16 work_limit; /* max work per interrupt */
@@ -166,8 +166,8 @@ struct ixgbe_ring_feature {
166 int mask; 166 int mask;
167}; 167};
168 168
169#define MAX_RX_QUEUES 64 169#define MAX_RX_QUEUES 128
170#define MAX_TX_QUEUES 32 170#define MAX_TX_QUEUES 128
171 171
172#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \ 172#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
173 ? 8 : 1) 173 ? 8 : 1)
@@ -211,11 +211,13 @@ struct ixgbe_q_vector {
211#define OTHER_VECTOR 1 211#define OTHER_VECTOR 1
212#define NON_Q_VECTORS (OTHER_VECTOR) 212#define NON_Q_VECTORS (OTHER_VECTOR)
213 213
214#define MAX_MSIX_VECTORS_82599 64
215#define MAX_MSIX_Q_VECTORS_82599 64
214#define MAX_MSIX_VECTORS_82598 18 216#define MAX_MSIX_VECTORS_82598 18
215#define MAX_MSIX_Q_VECTORS_82598 16 217#define MAX_MSIX_Q_VECTORS_82598 16
216 218
217#define MAX_MSIX_Q_VECTORS MAX_MSIX_Q_VECTORS_82598 219#define MAX_MSIX_Q_VECTORS MAX_MSIX_Q_VECTORS_82599
218#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82598 220#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599
219 221
220#define MIN_MSIX_Q_VECTORS 2 222#define MIN_MSIX_Q_VECTORS 2
221#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) 223#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
@@ -227,7 +229,7 @@ struct ixgbe_adapter {
227 u16 bd_number; 229 u16 bd_number;
228 struct work_struct reset_task; 230 struct work_struct reset_task;
229 struct ixgbe_q_vector q_vector[MAX_MSIX_Q_VECTORS]; 231 struct ixgbe_q_vector q_vector[MAX_MSIX_Q_VECTORS];
230 char name[MAX_MSIX_COUNT][IFNAMSIZ + 5]; 232 char name[MAX_MSIX_COUNT][IFNAMSIZ + 9];
231 struct ixgbe_dcb_config dcb_cfg; 233 struct ixgbe_dcb_config dcb_cfg;
232 struct ixgbe_dcb_config temp_dcb_cfg; 234 struct ixgbe_dcb_config temp_dcb_cfg;
233 u8 dcb_set_bitmap; 235 u8 dcb_set_bitmap;
@@ -252,6 +254,7 @@ struct ixgbe_adapter {
252 struct ixgbe_ring *rx_ring; /* One per active queue */ 254 struct ixgbe_ring *rx_ring; /* One per active queue */
253 int num_rx_queues; 255 int num_rx_queues;
254 u64 hw_csum_rx_error; 256 u64 hw_csum_rx_error;
257 u64 hw_rx_no_dma_resources;
255 u64 hw_csum_rx_good; 258 u64 hw_csum_rx_good;
256 u64 non_eop_descs; 259 u64 non_eop_descs;
257 int num_msix_vectors; 260 int num_msix_vectors;
@@ -280,6 +283,7 @@ struct ixgbe_adapter {
280#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 11) 283#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 11)
281#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 12) 284#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 12)
282#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 13) 285#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 13)
286#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 14)
283#define IXGBE_FLAG_RSS_ENABLED (u32)(1 << 16) 287#define IXGBE_FLAG_RSS_ENABLED (u32)(1 << 16)
284#define IXGBE_FLAG_RSS_CAPABLE (u32)(1 << 17) 288#define IXGBE_FLAG_RSS_CAPABLE (u32)(1 << 17)
285#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 18) 289#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 18)
@@ -287,7 +291,8 @@ struct ixgbe_adapter {
287#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 20) 291#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 20)
288#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22) 292#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22)
289#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 23) 293#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 23)
290#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 24) 294#define IXGBE_FLAG_IN_SFP_LINK_TASK (u32)(1 << 24)
295#define IXGBE_FLAG_IN_SFP_MOD_TASK (u32)(1 << 25)
291 296
292/* default to trying for four seconds */ 297/* default to trying for four seconds */
293#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ) 298#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
@@ -317,7 +322,9 @@ struct ixgbe_adapter {
317 struct work_struct watchdog_task; 322 struct work_struct watchdog_task;
318 struct work_struct sfp_task; 323 struct work_struct sfp_task;
319 struct timer_list sfp_timer; 324 struct timer_list sfp_timer;
320 325 struct work_struct multispeed_fiber_task;
326 struct work_struct sfp_config_module_task;
327 u32 wol;
321 u16 eeprom_version; 328 u16 eeprom_version;
322}; 329};
323 330
@@ -330,9 +337,11 @@ enum ixbge_state_t {
330 337
331enum ixgbe_boards { 338enum ixgbe_boards {
332 board_82598, 339 board_82598,
340 board_82599,
333}; 341};
334 342
335extern struct ixgbe_info ixgbe_82598_info; 343extern struct ixgbe_info ixgbe_82598_info;
344extern struct ixgbe_info ixgbe_82599_info;
336#ifdef CONFIG_IXGBE_DCB 345#ifdef CONFIG_IXGBE_DCB
337extern struct dcbnl_rtnl_ops dcbnl_ops; 346extern struct dcbnl_rtnl_ops dcbnl_ops;
338extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg, 347extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 35d820e6dfa8..ae38bcaa7ca1 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -89,6 +89,7 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
89 {"rx_header_split", IXGBE_STAT(rx_hdr_split)}, 89 {"rx_header_split", IXGBE_STAT(rx_hdr_split)},
90 {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)}, 90 {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
91 {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)}, 91 {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
92 {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
92}; 93};
93 94
94#define IXGBE_QUEUE_STATS_LEN \ 95#define IXGBE_QUEUE_STATS_LEN \
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 65642355a836..d0b98708e6ce 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -47,12 +47,13 @@ char ixgbe_driver_name[] = "ixgbe";
47static const char ixgbe_driver_string[] = 47static const char ixgbe_driver_string[] =
48 "Intel(R) 10 Gigabit PCI Express Network Driver"; 48 "Intel(R) 10 Gigabit PCI Express Network Driver";
49 49
50#define DRV_VERSION "1.3.56-k2" 50#define DRV_VERSION "2.0.8-k2"
51const char ixgbe_driver_version[] = DRV_VERSION; 51const char ixgbe_driver_version[] = DRV_VERSION;
52static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation."; 52static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation.";
53 53
54static const struct ixgbe_info *ixgbe_info_tbl[] = { 54static const struct ixgbe_info *ixgbe_info_tbl[] = {
55 [board_82598] = &ixgbe_82598_info, 55 [board_82598] = &ixgbe_82598_info,
56 [board_82599] = &ixgbe_82599_info,
56}; 57};
57 58
58/* ixgbe_pci_tbl - PCI Device ID Table 59/* ixgbe_pci_tbl - PCI Device ID Table
@@ -86,6 +87,10 @@ static struct pci_device_id ixgbe_pci_tbl[] = {
86 board_82598 }, 87 board_82598 },
87 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), 88 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX),
88 board_82598 }, 89 board_82598 },
90 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4),
91 board_82599 },
92 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
93 board_82599 },
89 94
90 /* required last entry */ 95 /* required last entry */
91 {0, } 96 {0, }
@@ -129,17 +134,53 @@ static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
129 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); 134 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
130} 135}
131 136
132static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, u16 int_alloc_entry, 137/*
133 u8 msix_vector) 138 * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
139 * @adapter: pointer to adapter struct
140 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
141 * @queue: queue to map the corresponding interrupt to
142 * @msix_vector: the vector to map to the corresponding queue
143 *
144 */
145static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
146 u8 queue, u8 msix_vector)
134{ 147{
135 u32 ivar, index; 148 u32 ivar, index;
136 149 struct ixgbe_hw *hw = &adapter->hw;
137 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 150 switch (hw->mac.type) {
138 index = (int_alloc_entry >> 2) & 0x1F; 151 case ixgbe_mac_82598EB:
139 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR(index)); 152 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
140 ivar &= ~(0xFF << (8 * (int_alloc_entry & 0x3))); 153 if (direction == -1)
141 ivar |= (msix_vector << (8 * (int_alloc_entry & 0x3))); 154 direction = 0;
142 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar); 155 index = (((direction * 64) + queue) >> 2) & 0x1F;
156 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
157 ivar &= ~(0xFF << (8 * (queue & 0x3)));
158 ivar |= (msix_vector << (8 * (queue & 0x3)));
159 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
160 break;
161 case ixgbe_mac_82599EB:
162 if (direction == -1) {
163 /* other causes */
164 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
165 index = ((queue & 1) * 8);
166 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
167 ivar &= ~(0xFF << index);
168 ivar |= (msix_vector << index);
169 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
170 break;
171 } else {
172 /* tx or rx causes */
173 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
174 index = ((16 * (queue & 1)) + (8 * direction));
175 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
176 ivar &= ~(0xFF << index);
177 ivar |= (msix_vector << index);
178 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
179 break;
180 }
181 default:
182 break;
183 }
143} 184}
144 185
145static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, 186static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
@@ -310,13 +351,19 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
310 351
311 if (rx_ring->cpu != cpu) { 352 if (rx_ring->cpu != cpu) {
312 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q)); 353 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
313 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK; 354 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
314 rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); 355 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
356 rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
357 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
358 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
359 rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
360 IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
361 }
315 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN; 362 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
316 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN; 363 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
317 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN); 364 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
318 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN | 365 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
319 IXGBE_DCA_RXCTRL_DESC_HSRO_EN); 366 IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
320 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl); 367 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
321 rx_ring->cpu = cpu; 368 rx_ring->cpu = cpu;
322 } 369 }
@@ -332,8 +379,14 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
332 379
333 if (tx_ring->cpu != cpu) { 380 if (tx_ring->cpu != cpu) {
334 txctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q)); 381 txctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q));
335 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK; 382 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
336 txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); 383 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
384 txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
385 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
386 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
387 txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
388 IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
389 }
337 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; 390 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
338 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q), txctrl); 391 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q), txctrl);
339 tx_ring->cpu = cpu; 392 tx_ring->cpu = cpu;
@@ -464,6 +517,19 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
464 adapter->hw_csum_rx_good++; 517 adapter->hw_csum_rx_good++;
465} 518}
466 519
520static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
521 struct ixgbe_ring *rx_ring, u32 val)
522{
523 /*
524 * Force memory writes to complete before letting h/w
525 * know there are new descriptors to fetch. (Only
526 * applicable for weak-ordered memory model archs,
527 * such as IA-64).
528 */
529 wmb();
530 IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->reg_idx), val);
531}
532
467/** 533/**
468 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split 534 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
469 * @adapter: address of board private structure 535 * @adapter: address of board private structure
@@ -476,6 +542,7 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
476 union ixgbe_adv_rx_desc *rx_desc; 542 union ixgbe_adv_rx_desc *rx_desc;
477 struct ixgbe_rx_buffer *bi; 543 struct ixgbe_rx_buffer *bi;
478 unsigned int i; 544 unsigned int i;
545 unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
479 546
480 i = rx_ring->next_to_use; 547 i = rx_ring->next_to_use;
481 bi = &rx_ring->rx_buffer_info[i]; 548 bi = &rx_ring->rx_buffer_info[i];
@@ -505,9 +572,7 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
505 572
506 if (!bi->skb) { 573 if (!bi->skb) {
507 struct sk_buff *skb; 574 struct sk_buff *skb;
508 skb = netdev_alloc_skb(adapter->netdev, 575 skb = netdev_alloc_skb(adapter->netdev, bufsz);
509 (rx_ring->rx_buf_len +
510 NET_IP_ALIGN));
511 576
512 if (!skb) { 577 if (!skb) {
513 adapter->alloc_rx_buff_failed++; 578 adapter->alloc_rx_buff_failed++;
@@ -522,8 +587,7 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
522 skb_reserve(skb, NET_IP_ALIGN); 587 skb_reserve(skb, NET_IP_ALIGN);
523 588
524 bi->skb = skb; 589 bi->skb = skb;
525 bi->dma = pci_map_single(pdev, skb->data, 590 bi->dma = pci_map_single(pdev, skb->data, bufsz,
526 rx_ring->rx_buf_len,
527 PCI_DMA_FROMDEVICE); 591 PCI_DMA_FROMDEVICE);
528 } 592 }
529 /* Refresh the desc even if buffer_addrs didn't change because 593 /* Refresh the desc even if buffer_addrs didn't change because
@@ -547,14 +611,7 @@ no_buffers:
547 if (i-- == 0) 611 if (i-- == 0)
548 i = (rx_ring->count - 1); 612 i = (rx_ring->count - 1);
549 613
550 /* 614 ixgbe_release_rx_desc(&adapter->hw, rx_ring, i);
551 * Force memory writes to complete before letting h/w
552 * know there are new descriptors to fetch. (Only
553 * applicable for weak-ordered memory model archs,
554 * such as IA-64).
555 */
556 wmb();
557 writel(i, adapter->hw.hw_addr + rx_ring->tail);
558 } 615 }
559} 616}
560 617
@@ -732,7 +789,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
732 789
733 for (i = 0; i < q_vector->rxr_count; i++) { 790 for (i = 0; i < q_vector->rxr_count; i++) {
734 j = adapter->rx_ring[r_idx].reg_idx; 791 j = adapter->rx_ring[r_idx].reg_idx;
735 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(j), v_idx); 792 ixgbe_set_ivar(adapter, 0, j, v_idx);
736 r_idx = find_next_bit(q_vector->rxr_idx, 793 r_idx = find_next_bit(q_vector->rxr_idx,
737 adapter->num_rx_queues, 794 adapter->num_rx_queues,
738 r_idx + 1); 795 r_idx + 1);
@@ -742,7 +799,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
742 799
743 for (i = 0; i < q_vector->txr_count; i++) { 800 for (i = 0; i < q_vector->txr_count; i++) {
744 j = adapter->tx_ring[r_idx].reg_idx; 801 j = adapter->tx_ring[r_idx].reg_idx;
745 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(j), v_idx); 802 ixgbe_set_ivar(adapter, 1, j, v_idx);
746 r_idx = find_next_bit(q_vector->txr_idx, 803 r_idx = find_next_bit(q_vector->txr_idx,
747 adapter->num_tx_queues, 804 adapter->num_tx_queues,
748 r_idx + 1); 805 r_idx + 1);
@@ -759,7 +816,11 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
759 EITR_INTS_PER_SEC_TO_REG(q_vector->eitr)); 816 EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
760 } 817 }
761 818
762 ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX, v_idx); 819 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
820 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
821 v_idx);
822 else if (adapter->hw.mac.type == ixgbe_mac_82599EB)
823 ixgbe_set_ivar(adapter, -1, 1, v_idx);
763 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950); 824 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
764 825
765 /* set up to autoclear timer, and the vectors */ 826 /* set up to autoclear timer, and the vectors */
@@ -897,6 +958,9 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
897 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); 958 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
898 q_vector->eitr = new_itr; 959 q_vector->eitr = new_itr;
899 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr); 960 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
961 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
962 /* Resolution is 2 usec on 82599, so halve the rate */
963 itr_reg >>= 1;
900 /* must write high and low 16 bits to reset counter */ 964 /* must write high and low 16 bits to reset counter */
901 DPRINTK(TX_ERR, DEBUG, "writing eitr(%d): %08X\n", v_idx, 965 DPRINTK(TX_ERR, DEBUG, "writing eitr(%d): %08X\n", v_idx,
902 itr_reg); 966 itr_reg);
@@ -918,6 +982,24 @@ static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
918 } 982 }
919} 983}
920 984
985static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
986{
987 struct ixgbe_hw *hw = &adapter->hw;
988
989 if (eicr & IXGBE_EICR_GPI_SDP1) {
990 /* Clear the interrupt */
991 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
992 schedule_work(&adapter->multispeed_fiber_task);
993 } else if (eicr & IXGBE_EICR_GPI_SDP2) {
994 /* Clear the interrupt */
995 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
996 schedule_work(&adapter->sfp_config_module_task);
997 } else {
998 /* Interrupt isn't for us... */
999 return;
1000 }
1001}
1002
921static void ixgbe_check_lsc(struct ixgbe_adapter *adapter) 1003static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
922{ 1004{
923 struct ixgbe_hw *hw = &adapter->hw; 1005 struct ixgbe_hw *hw = &adapter->hw;
@@ -950,8 +1032,11 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
950 if (eicr & IXGBE_EICR_LSC) 1032 if (eicr & IXGBE_EICR_LSC)
951 ixgbe_check_lsc(adapter); 1033 ixgbe_check_lsc(adapter);
952 1034
953 ixgbe_check_fan_failure(adapter, eicr); 1035 if (hw->mac.type == ixgbe_mac_82598EB)
1036 ixgbe_check_fan_failure(adapter, eicr);
954 1037
1038 if (hw->mac.type == ixgbe_mac_82599EB)
1039 ixgbe_check_sfp_event(adapter, eicr);
955 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1040 if (!test_bit(__IXGBE_DOWN, &adapter->state))
956 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); 1041 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
957 1042
@@ -1314,6 +1399,9 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
1314 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); 1399 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
1315 q_vector->eitr = new_itr; 1400 q_vector->eitr = new_itr;
1316 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr); 1401 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
1402 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
1403 /* Resolution is 2 usec on 82599, so halve the rate */
1404 itr_reg >>= 1;
1317 /* must write high and low 16 bits to reset counter */ 1405 /* must write high and low 16 bits to reset counter */
1318 IXGBE_WRITE_REG(hw, IXGBE_EITR(0), itr_reg | (itr_reg)<<16); 1406 IXGBE_WRITE_REG(hw, IXGBE_EITR(0), itr_reg | (itr_reg)<<16);
1319 } 1407 }
@@ -1328,6 +1416,10 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
1328static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) 1416static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
1329{ 1417{
1330 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); 1418 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
1419 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1420 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
1421 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(2), ~0);
1422 }
1331 IXGBE_WRITE_FLUSH(&adapter->hw); 1423 IXGBE_WRITE_FLUSH(&adapter->hw);
1332 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 1424 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1333 int i; 1425 int i;
@@ -1348,7 +1440,20 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
1348 mask = IXGBE_EIMS_ENABLE_MASK; 1440 mask = IXGBE_EIMS_ENABLE_MASK;
1349 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) 1441 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
1350 mask |= IXGBE_EIMS_GPI_SDP1; 1442 mask |= IXGBE_EIMS_GPI_SDP1;
1443 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1444 mask |= IXGBE_EIMS_GPI_SDP1;
1445 mask |= IXGBE_EIMS_GPI_SDP2;
1446 }
1447
1351 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); 1448 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1449 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1450 /* enable the rest of the queue vectors */
1451 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1),
1452 (IXGBE_EIMS_RTX_QUEUE << 16));
1453 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(2),
1454 ((IXGBE_EIMS_RTX_QUEUE << 16) |
1455 IXGBE_EIMS_RTX_QUEUE));
1456 }
1352 IXGBE_WRITE_FLUSH(&adapter->hw); 1457 IXGBE_WRITE_FLUSH(&adapter->hw);
1353} 1458}
1354 1459
@@ -1384,6 +1489,9 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
1384 if (eicr & IXGBE_EICR_LSC) 1489 if (eicr & IXGBE_EICR_LSC)
1385 ixgbe_check_lsc(adapter); 1490 ixgbe_check_lsc(adapter);
1386 1491
1492 if (hw->mac.type == ixgbe_mac_82599EB)
1493 ixgbe_check_sfp_event(adapter, eicr);
1494
1387 ixgbe_check_fan_failure(adapter, eicr); 1495 ixgbe_check_fan_failure(adapter, eicr);
1388 1496
1389 if (napi_schedule_prep(&adapter->q_vector[0].napi)) { 1497 if (napi_schedule_prep(&adapter->q_vector[0].napi)) {
@@ -1474,8 +1582,8 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
1474 IXGBE_WRITE_REG(hw, IXGBE_EITR(0), 1582 IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
1475 EITR_INTS_PER_SEC_TO_REG(adapter->eitr_param)); 1583 EITR_INTS_PER_SEC_TO_REG(adapter->eitr_param));
1476 1584
1477 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(0), 0); 1585 ixgbe_set_ivar(adapter, 0, 0, 0);
1478 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(0), 0); 1586 ixgbe_set_ivar(adapter, 1, 0, 0);
1479 1587
1480 map_vector_to_rxq(adapter, 0, 0); 1588 map_vector_to_rxq(adapter, 0, 0);
1481 map_vector_to_txq(adapter, 0, 0); 1589 map_vector_to_txq(adapter, 0, 0);
@@ -1516,26 +1624,25 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
1516 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; 1624 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1517 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); 1625 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
1518 } 1626 }
1627 if (hw->mac.type == ixgbe_mac_82599EB) {
1628 /* We enable 8 traffic classes, DCB only */
1629 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
1630 IXGBE_WRITE_REG(hw, IXGBE_MTQC, (IXGBE_MTQC_RT_ENA |
1631 IXGBE_MTQC_8TC_8TQ));
1632 }
1519} 1633}
1520 1634
1521#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 1635#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1522 1636
1523static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index) 1637static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index)
1524{ 1638{
1525 struct ixgbe_ring *rx_ring; 1639 struct ixgbe_ring *rx_ring;
1526 u32 srrctl; 1640 u32 srrctl;
1527 int queue0; 1641 int queue0 = 0;
1528 unsigned long mask; 1642 unsigned long mask;
1529 1643
1530 /* program one srrctl register per VMDq index */ 1644 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1531 if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) { 1645 queue0 = index;
1532 long shift, len;
1533 mask = (unsigned long) adapter->ring_feature[RING_F_RSS].mask;
1534 len = sizeof(adapter->ring_feature[RING_F_VMDQ].mask) * 8;
1535 shift = find_first_bit(&mask, len);
1536 queue0 = index & mask;
1537 index = (index & mask) >> shift;
1538 /* program one srrctl per RSS queue since RDRXCTL.MVMEN is enabled */
1539 } else { 1646 } else {
1540 mask = (unsigned long) adapter->ring_feature[RING_F_RSS].mask; 1647 mask = (unsigned long) adapter->ring_feature[RING_F_RSS].mask;
1541 queue0 = index & mask; 1648 queue0 = index & mask;
@@ -1572,6 +1679,9 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index)
1572 srrctl |= rx_ring->rx_buf_len >> 1679 srrctl |= rx_ring->rx_buf_len >>
1573 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1680 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1574 } 1681 }
1682 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
1683 srrctl |= IXGBE_SRRCTL_DROP_EN;
1684
1575 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl); 1685 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
1576} 1686}
1577 1687
@@ -1603,6 +1713,14 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1603 /* Set the RX buffer length according to the mode */ 1713 /* Set the RX buffer length according to the mode */
1604 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 1714 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
1605 rx_buf_len = IXGBE_RX_HDR_SIZE; 1715 rx_buf_len = IXGBE_RX_HDR_SIZE;
1716 if (hw->mac.type == ixgbe_mac_82599EB) {
1717 /* PSRTYPE must be initialized in 82599 */
1718 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
1719 IXGBE_PSRTYPE_UDPHDR |
1720 IXGBE_PSRTYPE_IPV4HDR |
1721 IXGBE_PSRTYPE_IPV6HDR;
1722 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
1723 }
1606 } else { 1724 } else {
1607 if (netdev->mtu <= ETH_DATA_LEN) 1725 if (netdev->mtu <= ETH_DATA_LEN)
1608 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1726 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
@@ -1613,6 +1731,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1613 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 1731 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1614 fctrl |= IXGBE_FCTRL_BAM; 1732 fctrl |= IXGBE_FCTRL_BAM;
1615 fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */ 1733 fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
1734 fctrl |= IXGBE_FCTRL_PMCF;
1616 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl); 1735 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
1617 1736
1618 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); 1737 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
@@ -1644,23 +1763,43 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1644 ixgbe_configure_srrctl(adapter, j); 1763 ixgbe_configure_srrctl(adapter, j);
1645 } 1764 }
1646 1765
1647 /* 1766 if (hw->mac.type == ixgbe_mac_82598EB) {
1648 * For VMDq support of different descriptor types or 1767 /*
1649 * buffer sizes through the use of multiple SRRCTL 1768 * For VMDq support of different descriptor types or
1650 * registers, RDRXCTL.MVMEN must be set to 1 1769 * buffer sizes through the use of multiple SRRCTL
1651 * 1770 * registers, RDRXCTL.MVMEN must be set to 1
1652 * also, the manual doesn't mention it clearly but DCA hints 1771 *
1653 * will only use queue 0's tags unless this bit is set. Side 1772 * also, the manual doesn't mention it clearly but DCA hints
1654 * effects of setting this bit are only that SRRCTL must be 1773 * will only use queue 0's tags unless this bit is set. Side
1655 * fully programmed [0..15] 1774 * effects of setting this bit are only that SRRCTL must be
1656 */ 1775 * fully programmed [0..15]
1657 if (adapter->flags & 1776 */
1658 (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_VMDQ_ENABLED)) { 1777 if (adapter->flags &
1659 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 1778 (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_VMDQ_ENABLED)) {
1660 rdrxctl |= IXGBE_RDRXCTL_MVMEN; 1779 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1661 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); 1780 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
1781 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
1782 }
1662 } 1783 }
1663 1784
1785 /* Program MRQC for the distribution of queues */
1786 if (hw->mac.type == ixgbe_mac_82599EB) {
1787 int mask = adapter->flags & (
1788 IXGBE_FLAG_RSS_ENABLED
1789 | IXGBE_FLAG_DCB_ENABLED
1790 );
1791
1792 switch (mask) {
1793 case (IXGBE_FLAG_RSS_ENABLED):
1794 mrqc = IXGBE_MRQC_RSSEN;
1795 break;
1796 case (IXGBE_FLAG_DCB_ENABLED):
1797 mrqc = IXGBE_MRQC_RT8TCEN;
1798 break;
1799 default:
1800 break;
1801 }
1802 }
1664 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { 1803 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
1665 /* Fill out redirection table */ 1804 /* Fill out redirection table */
1666 for (i = 0, j = 0; i < 128; i++, j++) { 1805 for (i = 0, j = 0; i < 128; i++, j++) {
@@ -1682,12 +1821,9 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1682 | IXGBE_MRQC_RSS_FIELD_IPV4 1821 | IXGBE_MRQC_RSS_FIELD_IPV4
1683 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP 1822 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
1684 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP 1823 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
1685 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
1686 | IXGBE_MRQC_RSS_FIELD_IPV6_EX
1687 | IXGBE_MRQC_RSS_FIELD_IPV6 1824 | IXGBE_MRQC_RSS_FIELD_IPV6
1688 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP 1825 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
1689 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP 1826 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1690 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
1691 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 1827 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
1692 } 1828 }
1693 1829
@@ -1706,6 +1842,12 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1706 } 1842 }
1707 1843
1708 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 1844 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1845
1846 if (hw->mac.type == ixgbe_mac_82599EB) {
1847 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1848 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
1849 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
1850 }
1709} 1851}
1710 1852
1711static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 1853static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
@@ -1739,6 +1881,7 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
1739{ 1881{
1740 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1882 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1741 u32 ctrl; 1883 u32 ctrl;
1884 int i, j;
1742 1885
1743 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1886 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1744 ixgbe_irq_disable(adapter); 1887 ixgbe_irq_disable(adapter);
@@ -1750,18 +1893,24 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
1750 * not in DCB mode. 1893 * not in DCB mode.
1751 */ 1894 */
1752 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL); 1895 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
1753 ctrl |= IXGBE_VLNCTRL_VME; 1896 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1754 ctrl &= ~IXGBE_VLNCTRL_CFIEN; 1897 ctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
1755 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl); 1898 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1756 ixgbe_vlan_rx_add_vid(netdev, 0); 1899 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
1757 1900 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1758 if (grp) { 1901 ctrl |= IXGBE_VLNCTRL_VFE;
1759 /* enable VLAN tag insert/strip */ 1902 /* enable VLAN tag insert/strip */
1760 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL); 1903 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
1761 ctrl |= IXGBE_VLNCTRL_VME;
1762 ctrl &= ~IXGBE_VLNCTRL_CFIEN; 1904 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1763 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl); 1905 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
1906 for (i = 0; i < adapter->num_rx_queues; i++) {
1907 j = adapter->rx_ring[i].reg_idx;
1908 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j));
1909 ctrl |= IXGBE_RXDCTL_VME;
1910 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl);
1911 }
1764 } 1912 }
1913 ixgbe_vlan_rx_add_vid(netdev, 0);
1765 1914
1766 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1915 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1767 ixgbe_irq_enable(adapter); 1916 ixgbe_irq_enable(adapter);
@@ -1924,9 +2073,21 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
1924 } 2073 }
1925 /* Enable VLAN tag insert/strip */ 2074 /* Enable VLAN tag insert/strip */
1926 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 2075 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1927 vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE; 2076 if (hw->mac.type == ixgbe_mac_82598EB) {
1928 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; 2077 vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
1929 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 2078 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2079 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2080 } else if (hw->mac.type == ixgbe_mac_82599EB) {
2081 vlnctrl |= IXGBE_VLNCTRL_VFE;
2082 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2083 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2084 for (i = 0; i < adapter->num_rx_queues; i++) {
2085 j = adapter->rx_ring[i].reg_idx;
2086 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
2087 vlnctrl |= IXGBE_RXDCTL_VME;
2088 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
2089 }
2090 }
1930 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); 2091 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
1931} 2092}
1932 2093
@@ -1957,13 +2118,60 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
1957 (adapter->rx_ring[i].count - 1)); 2118 (adapter->rx_ring[i].count - 1));
1958} 2119}
1959 2120
2121static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
2122{
2123 switch (hw->phy.type) {
2124 case ixgbe_phy_sfp_avago:
2125 case ixgbe_phy_sfp_ftl:
2126 case ixgbe_phy_sfp_intel:
2127 case ixgbe_phy_sfp_unknown:
2128 case ixgbe_phy_tw_tyco:
2129 case ixgbe_phy_tw_unknown:
2130 return true;
2131 default:
2132 return false;
2133 }
2134}
2135
2136/**
2137 * ixgbe_sfp_link_config - set up SFP+ link
2138 * @adapter: pointer to private adapter struct
2139 **/
2140static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
2141{
2142 struct ixgbe_hw *hw = &adapter->hw;
2143
2144 if (hw->phy.multispeed_fiber) {
2145 /*
2146 * In multispeed fiber setups, the device may not have
2147 * had a physical connection when the driver loaded.
2148 * If that's the case, the initial link configuration
2149 * couldn't get the MAC into 10G or 1G mode, so we'll
2150 * never have a link status change interrupt fire.
2151 * We need to try and force an autonegotiation
2152 * session, then bring up link.
2153 */
2154 hw->mac.ops.setup_sfp(hw);
2155 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
2156 schedule_work(&adapter->multispeed_fiber_task);
2157 } else {
2158 /*
2159 * Direct Attach Cu and non-multispeed fiber modules
2160 * still need to be configured properly prior to
2161 * attempting link.
2162 */
2163 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_MOD_TASK))
2164 schedule_work(&adapter->sfp_config_module_task);
2165 }
2166}
2167
1960/** 2168/**
1961 * ixgbe_link_config - set up initial link with default speed and duplex 2169 * ixgbe_non_sfp_link_config - set up non-SFP+ link
1962 * @hw: pointer to private hardware struct 2170 * @hw: pointer to private hardware struct
1963 * 2171 *
1964 * Returns 0 on success, negative on failure 2172 * Returns 0 on success, negative on failure
1965 **/ 2173 **/
1966static int ixgbe_link_config(struct ixgbe_hw *hw) 2174static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
1967{ 2175{
1968 u32 autoneg; 2176 u32 autoneg;
1969 bool link_up = false; 2177 bool link_up = false;
@@ -1983,19 +2191,42 @@ static int ixgbe_link_config(struct ixgbe_hw *hw)
1983 2191
1984 if (hw->mac.ops.setup_link_speed) 2192 if (hw->mac.ops.setup_link_speed)
1985 ret = hw->mac.ops.setup_link_speed(hw, autoneg, true, link_up); 2193 ret = hw->mac.ops.setup_link_speed(hw, autoneg, true, link_up);
1986
1987link_cfg_out: 2194link_cfg_out:
1988 return ret; 2195 return ret;
1989} 2196}
1990 2197
2198#define IXGBE_MAX_RX_DESC_POLL 10
2199static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
2200 int rxr)
2201{
2202 int j = adapter->rx_ring[rxr].reg_idx;
2203 int k;
2204
2205 for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
2206 if (IXGBE_READ_REG(&adapter->hw,
2207 IXGBE_RXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
2208 break;
2209 else
2210 msleep(1);
2211 }
2212 if (k >= IXGBE_MAX_RX_DESC_POLL) {
2213 DPRINTK(DRV, ERR, "RXDCTL.ENABLE on Rx queue %d "
2214 "not set within the polling period\n", rxr);
2215 }
2216 ixgbe_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
2217 (adapter->rx_ring[rxr].count - 1));
2218}
2219
1991static int ixgbe_up_complete(struct ixgbe_adapter *adapter) 2220static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
1992{ 2221{
1993 struct net_device *netdev = adapter->netdev; 2222 struct net_device *netdev = adapter->netdev;
1994 struct ixgbe_hw *hw = &adapter->hw; 2223 struct ixgbe_hw *hw = &adapter->hw;
1995 int i, j = 0; 2224 int i, j = 0;
2225 int num_rx_rings = adapter->num_rx_queues;
1996 int err; 2226 int err;
1997 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 2227 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1998 u32 txdctl, rxdctl, mhadd; 2228 u32 txdctl, rxdctl, mhadd;
2229 u32 dmatxctl;
1999 u32 gpie; 2230 u32 gpie;
2000 2231
2001 ixgbe_get_hw_control(adapter); 2232 ixgbe_get_hw_control(adapter);
@@ -2027,6 +2258,13 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2027 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 2258 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
2028 } 2259 }
2029 2260
2261 if (hw->mac.type == ixgbe_mac_82599EB) {
2262 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
2263 gpie |= IXGBE_SDP1_GPIEN;
2264 gpie |= IXGBE_SDP2_GPIEN;
2265 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
2266 }
2267
2030 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 2268 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
2031 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) { 2269 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
2032 mhadd &= ~IXGBE_MHADD_MFS_MASK; 2270 mhadd &= ~IXGBE_MHADD_MFS_MASK;
@@ -2040,11 +2278,23 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2040 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); 2278 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
2041 /* enable WTHRESH=8 descriptors, to encourage burst writeback */ 2279 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
2042 txdctl |= (8 << 16); 2280 txdctl |= (8 << 16);
2281 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
2282 }
2283
2284 if (hw->mac.type == ixgbe_mac_82599EB) {
2285 /* DMATXCTL.EN must be set after all Tx queue config is done */
2286 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2287 dmatxctl |= IXGBE_DMATXCTL_TE;
2288 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2289 }
2290 for (i = 0; i < adapter->num_tx_queues; i++) {
2291 j = adapter->tx_ring[i].reg_idx;
2292 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
2043 txdctl |= IXGBE_TXDCTL_ENABLE; 2293 txdctl |= IXGBE_TXDCTL_ENABLE;
2044 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); 2294 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
2045 } 2295 }
2046 2296
2047 for (i = 0; i < adapter->num_rx_queues; i++) { 2297 for (i = 0; i < num_rx_rings; i++) {
2048 j = adapter->rx_ring[i].reg_idx; 2298 j = adapter->rx_ring[i].reg_idx;
2049 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); 2299 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
2050 /* enable PTHRESH=32 descriptors (half the internal cache) 2300 /* enable PTHRESH=32 descriptors (half the internal cache)
@@ -2053,11 +2303,16 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2053 rxdctl |= 0x0020; 2303 rxdctl |= 0x0020;
2054 rxdctl |= IXGBE_RXDCTL_ENABLE; 2304 rxdctl |= IXGBE_RXDCTL_ENABLE;
2055 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), rxdctl); 2305 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), rxdctl);
2306 if (hw->mac.type == ixgbe_mac_82599EB)
2307 ixgbe_rx_desc_queue_enable(adapter, i);
2056 } 2308 }
2057 /* enable all receives */ 2309 /* enable all receives */
2058 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 2310 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2059 rxdctl |= (IXGBE_RXCTRL_DMBYPS | IXGBE_RXCTRL_RXEN); 2311 if (hw->mac.type == ixgbe_mac_82598EB)
2060 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxdctl); 2312 rxdctl |= (IXGBE_RXCTRL_DMBYPS | IXGBE_RXCTRL_RXEN);
2313 else
2314 rxdctl |= IXGBE_RXCTRL_RXEN;
2315 hw->mac.ops.enable_rx_dma(hw, rxdctl);
2061 2316
2062 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) 2317 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
2063 ixgbe_configure_msix(adapter); 2318 ixgbe_configure_msix(adapter);
@@ -2074,9 +2329,27 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2074 2329
2075 ixgbe_irq_enable(adapter); 2330 ixgbe_irq_enable(adapter);
2076 2331
2077 err = ixgbe_link_config(hw); 2332 /*
2078 if (err) 2333 * For hot-pluggable SFP+ devices, a new SFP+ module may have
2079 dev_err(&adapter->pdev->dev, "link_config FAILED %d\n", err); 2334 * arrived before interrupts were enabled. We need to kick off
2335 * the SFP+ module setup first, then try to bring up link.
2336 * If we're not hot-pluggable SFP+, we just need to configure link
2337 * and bring it up.
2338 */
2339 err = hw->phy.ops.identify(hw);
2340 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
2341 DPRINTK(PROBE, ERR, "PHY not supported on this NIC %d\n", err);
2342 ixgbe_down(adapter);
2343 return err;
2344 }
2345
2346 if (ixgbe_is_sfp(hw)) {
2347 ixgbe_sfp_link_config(adapter);
2348 } else {
2349 err = ixgbe_non_sfp_link_config(hw);
2350 if (err)
2351 DPRINTK(PROBE, ERR, "link_config FAILED %d\n", err);
2352 }
2080 2353
2081 /* enable transmits */ 2354 /* enable transmits */
2082 netif_tx_start_all_queues(netdev); 2355 netif_tx_start_all_queues(netdev);
@@ -2506,6 +2779,12 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
2506 adapter->tx_ring[i].reg_idx = i << 2; 2779 adapter->tx_ring[i].reg_idx = i << 2;
2507 } 2780 }
2508 ret = true; 2781 ret = true;
2782 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
2783 for (i = 0; i < dcb_i; i++) {
2784 adapter->rx_ring[i].reg_idx = i << 4;
2785 adapter->tx_ring[i].reg_idx = i << 4;
2786 }
2787 ret = true;
2509 } else { 2788 } else {
2510 ret = false; 2789 ret = false;
2511 } 2790 }
@@ -2801,7 +3080,10 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
2801 adapter->ring_feature[RING_F_RSS].indices = rss; 3080 adapter->ring_feature[RING_F_RSS].indices = rss;
2802 adapter->flags |= IXGBE_FLAG_RSS_ENABLED; 3081 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
2803 adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES; 3082 adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
2804 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; 3083 if (hw->mac.type == ixgbe_mac_82598EB)
3084 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
3085 else if (hw->mac.type == ixgbe_mac_82599EB)
3086 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
2805 3087
2806#ifdef CONFIG_IXGBE_DCB 3088#ifdef CONFIG_IXGBE_DCB
2807 /* Configure DCB traffic classes */ 3089 /* Configure DCB traffic classes */
@@ -2822,9 +3104,6 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
2822 adapter->ring_feature[RING_F_DCB].indices); 3104 adapter->ring_feature[RING_F_DCB].indices);
2823 3105
2824#endif 3106#endif
2825 if (hw->mac.ops.get_media_type &&
2826 (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper))
2827 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
2828 3107
2829 /* default flow control settings */ 3108 /* default flow control settings */
2830 hw->fc.requested_mode = ixgbe_fc_none; 3109 hw->fc.requested_mode = ixgbe_fc_none;
@@ -3272,6 +3551,9 @@ static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
3272{ 3551{
3273 struct net_device *netdev = pci_get_drvdata(pdev); 3552 struct net_device *netdev = pci_get_drvdata(pdev);
3274 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3553 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3554 struct ixgbe_hw *hw = &adapter->hw;
3555 u32 ctrl, fctrl;
3556 u32 wufc = adapter->wol;
3275#ifdef CONFIG_PM 3557#ifdef CONFIG_PM
3276 int retval = 0; 3558 int retval = 0;
3277#endif 3559#endif
@@ -3295,9 +3577,33 @@ static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
3295 if (retval) 3577 if (retval)
3296 return retval; 3578 return retval;
3297#endif 3579#endif
3580 if (wufc) {
3581 ixgbe_set_rx_mode(netdev);
3298 3582
3299 pci_enable_wake(pdev, PCI_D3hot, 0); 3583 /* turn on all-multi mode if wake on multicast is enabled */
3300 pci_enable_wake(pdev, PCI_D3cold, 0); 3584 if (wufc & IXGBE_WUFC_MC) {
3585 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3586 fctrl |= IXGBE_FCTRL_MPE;
3587 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3588 }
3589
3590 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
3591 ctrl |= IXGBE_CTRL_GIO_DIS;
3592 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
3593
3594 IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
3595 } else {
3596 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
3597 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3598 }
3599
3600 if (wufc && hw->mac.type == ixgbe_mac_82599EB) {
3601 pci_enable_wake(pdev, PCI_D3hot, 1);
3602 pci_enable_wake(pdev, PCI_D3cold, 1);
3603 } else {
3604 pci_enable_wake(pdev, PCI_D3hot, 0);
3605 pci_enable_wake(pdev, PCI_D3cold, 0);
3606 }
3301 3607
3302 ixgbe_release_hw_control(adapter); 3608 ixgbe_release_hw_control(adapter);
3303 3609
@@ -3330,32 +3636,56 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
3330 missed_rx += mpc; 3636 missed_rx += mpc;
3331 adapter->stats.mpc[i] += mpc; 3637 adapter->stats.mpc[i] += mpc;
3332 total_mpc += adapter->stats.mpc[i]; 3638 total_mpc += adapter->stats.mpc[i];
3333 adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i)); 3639 if (hw->mac.type == ixgbe_mac_82598EB)
3640 adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
3334 adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 3641 adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
3335 adapter->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i)); 3642 adapter->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
3336 adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 3643 adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
3337 adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i)); 3644 adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
3338 adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw, 3645 if (hw->mac.type == ixgbe_mac_82599EB) {
3339 IXGBE_PXONRXC(i)); 3646 adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw,
3647 IXGBE_PXONRXCNT(i));
3648 adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw,
3649 IXGBE_PXOFFRXCNT(i));
3650 adapter->stats.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
3651 adapter->hw_rx_no_dma_resources += adapter->stats.qprdc[i];
3652 } else {
3653 adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw,
3654 IXGBE_PXONRXC(i));
3655 adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw,
3656 IXGBE_PXOFFRXC(i));
3657 }
3340 adapter->stats.pxontxc[i] += IXGBE_READ_REG(hw, 3658 adapter->stats.pxontxc[i] += IXGBE_READ_REG(hw,
3341 IXGBE_PXONTXC(i)); 3659 IXGBE_PXONTXC(i));
3342 adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw,
3343 IXGBE_PXOFFRXC(i));
3344 adapter->stats.pxofftxc[i] += IXGBE_READ_REG(hw, 3660 adapter->stats.pxofftxc[i] += IXGBE_READ_REG(hw,
3345 IXGBE_PXOFFTXC(i)); 3661 IXGBE_PXOFFTXC(i));
3346 } 3662 }
3347 adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); 3663 adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
3348 /* work around hardware counting issue */ 3664 /* work around hardware counting issue */
3349 adapter->stats.gprc -= missed_rx; 3665 adapter->stats.gprc -= missed_rx;
3350 3666
3351 /* 82598 hardware only has a 32 bit counter in the high register */ 3667 /* 82598 hardware only has a 32 bit counter in the high register */
3352 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); 3668 if (hw->mac.type == ixgbe_mac_82599EB) {
3353 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); 3669 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
3354 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH); 3670 IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
3671 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
3672 IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */
3673 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL);
3674 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
3675 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
3676 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
3677 } else {
3678 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
3679 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
3680 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
3681 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
3682 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
3683 }
3355 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 3684 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
3356 adapter->stats.bprc += bprc; 3685 adapter->stats.bprc += bprc;
3357 adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); 3686 adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
3358 adapter->stats.mprc -= bprc; 3687 if (hw->mac.type == ixgbe_mac_82598EB)
3688 adapter->stats.mprc -= bprc;
3359 adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC); 3689 adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
3360 adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); 3690 adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
3361 adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); 3691 adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
@@ -3364,8 +3694,6 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
3364 adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); 3694 adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
3365 adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); 3695 adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
3366 adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); 3696 adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
3367 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
3368 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
3369 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); 3697 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
3370 adapter->stats.lxontxc += lxon; 3698 adapter->stats.lxontxc += lxon;
3371 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 3699 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
@@ -3438,6 +3766,55 @@ static void ixgbe_watchdog(unsigned long data)
3438} 3766}
3439 3767
3440/** 3768/**
3769 * ixgbe_multispeed_fiber_task - worker thread to configure multispeed fiber
3770 * @work: pointer to work_struct containing our data
3771 **/
3772static void ixgbe_multispeed_fiber_task(struct work_struct *work)
3773{
3774 struct ixgbe_adapter *adapter = container_of(work,
3775 struct ixgbe_adapter,
3776 multispeed_fiber_task);
3777 struct ixgbe_hw *hw = &adapter->hw;
3778 u32 autoneg;
3779
3780 adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK;
3781 if (hw->mac.ops.get_link_capabilities)
3782 hw->mac.ops.get_link_capabilities(hw, &autoneg,
3783 &hw->mac.autoneg);
3784 if (hw->mac.ops.setup_link_speed)
3785 hw->mac.ops.setup_link_speed(hw, autoneg, true, true);
3786 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
3787 adapter->flags &= ~IXGBE_FLAG_IN_SFP_LINK_TASK;
3788}
3789
3790/**
3791 * ixgbe_sfp_config_module_task - worker thread to configure a new SFP+ module
3792 * @work: pointer to work_struct containing our data
3793 **/
3794static void ixgbe_sfp_config_module_task(struct work_struct *work)
3795{
3796 struct ixgbe_adapter *adapter = container_of(work,
3797 struct ixgbe_adapter,
3798 sfp_config_module_task);
3799 struct ixgbe_hw *hw = &adapter->hw;
3800 u32 err;
3801
3802 adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK;
3803 err = hw->phy.ops.identify_sfp(hw);
3804 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3805 DPRINTK(PROBE, ERR, "PHY not supported on this NIC %d\n", err);
3806 ixgbe_down(adapter);
3807 return;
3808 }
3809 hw->mac.ops.setup_sfp(hw);
3810
3811 if (!adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)
3812 /* This will also work for DA Twinax connections */
3813 schedule_work(&adapter->multispeed_fiber_task);
3814 adapter->flags &= ~IXGBE_FLAG_IN_SFP_MOD_TASK;
3815}
3816
3817/**
3441 * ixgbe_watchdog_task - worker thread to bring link up 3818 * ixgbe_watchdog_task - worker thread to bring link up
3442 * @work: pointer to work_struct containing our data 3819 * @work: pointer to work_struct containing our data
3443 **/ 3820 **/
@@ -3467,10 +3844,20 @@ static void ixgbe_watchdog_task(struct work_struct *work)
3467 3844
3468 if (link_up) { 3845 if (link_up) {
3469 if (!netif_carrier_ok(netdev)) { 3846 if (!netif_carrier_ok(netdev)) {
3470 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 3847 bool flow_rx, flow_tx;
3471 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS); 3848
3472#define FLOW_RX (frctl & IXGBE_FCTRL_RFCE) 3849 if (hw->mac.type == ixgbe_mac_82599EB) {
3473#define FLOW_TX (rmcs & IXGBE_RMCS_TFCE_802_3X) 3850 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
3851 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
3852 flow_rx = (mflcn & IXGBE_MFLCN_RFCE);
3853 flow_tx = (fccfg & IXGBE_FCCFG_TFCE_802_3X);
3854 } else {
3855 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3856 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
3857 flow_rx = (frctl & IXGBE_FCTRL_RFCE);
3858 flow_tx = (rmcs & IXGBE_RMCS_TFCE_802_3X);
3859 }
3860
3474 printk(KERN_INFO "ixgbe: %s NIC Link is Up %s, " 3861 printk(KERN_INFO "ixgbe: %s NIC Link is Up %s, "
3475 "Flow Control: %s\n", 3862 "Flow Control: %s\n",
3476 netdev->name, 3863 netdev->name,
@@ -3478,9 +3865,9 @@ static void ixgbe_watchdog_task(struct work_struct *work)
3478 "10 Gbps" : 3865 "10 Gbps" :
3479 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? 3866 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
3480 "1 Gbps" : "unknown speed")), 3867 "1 Gbps" : "unknown speed")),
3481 ((FLOW_RX && FLOW_TX) ? "RX/TX" : 3868 ((flow_rx && flow_tx) ? "RX/TX" :
3482 (FLOW_RX ? "RX" : 3869 (flow_rx ? "RX" :
3483 (FLOW_TX ? "TX" : "None")))); 3870 (flow_tx ? "TX" : "None"))));
3484 3871
3485 netif_carrier_on(netdev); 3872 netif_carrier_on(netdev);
3486 } else { 3873 } else {
@@ -3987,7 +4374,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3987 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; 4374 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
3988 static int cards_found; 4375 static int cards_found;
3989 int i, err, pci_using_dac; 4376 int i, err, pci_using_dac;
3990 u16 link_status, link_speed, link_width; 4377 u16 pm_value = 0;
3991 u32 part_num, eec; 4378 u32 part_num, eec;
3992 4379
3993 err = pci_enable_device(pdev); 4380 err = pci_enable_device(pdev);
@@ -4086,6 +4473,13 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
4086 4473
4087 INIT_WORK(&adapter->sfp_task, ixgbe_sfp_task); 4474 INIT_WORK(&adapter->sfp_task, ixgbe_sfp_task);
4088 4475
4476 /* multispeed fiber has its own tasklet, called from GPI SDP1 context */
4477 INIT_WORK(&adapter->multispeed_fiber_task, ixgbe_multispeed_fiber_task);
4478
4479 /* a new SFP+ module arrival, called from GPI SDP2 context */
4480 INIT_WORK(&adapter->sfp_config_module_task,
4481 ixgbe_sfp_config_module_task);
4482
4089 err = ii->get_invariants(hw); 4483 err = ii->get_invariants(hw);
4090 if (err == IXGBE_ERR_SFP_NOT_PRESENT) { 4484 if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
4091 /* start a kernel thread to watch for a module to arrive */ 4485 /* start a kernel thread to watch for a module to arrive */
@@ -4166,26 +4560,41 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
4166 if (err) 4560 if (err)
4167 goto err_sw_init; 4561 goto err_sw_init;
4168 4562
4563 switch (pdev->device) {
4564 case IXGBE_DEV_ID_82599_KX4:
4565#define IXGBE_PCIE_PMCSR 0x44
4566 adapter->wol = IXGBE_WUFC_MAG;
4567 pci_read_config_word(pdev, IXGBE_PCIE_PMCSR, &pm_value);
4568 pci_write_config_word(pdev, IXGBE_PCIE_PMCSR,
4569 (pm_value | (1 << 8)));
4570 break;
4571 default:
4572 adapter->wol = 0;
4573 break;
4574 }
4575 device_init_wakeup(&adapter->pdev->dev, true);
4576 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
4577
4169 /* print bus type/speed/width info */ 4578 /* print bus type/speed/width info */
4170 pci_read_config_word(pdev, IXGBE_PCI_LINK_STATUS, &link_status);
4171 link_speed = link_status & IXGBE_PCI_LINK_SPEED;
4172 link_width = link_status & IXGBE_PCI_LINK_WIDTH;
4173 dev_info(&pdev->dev, "(PCI Express:%s:%s) %pM\n", 4579 dev_info(&pdev->dev, "(PCI Express:%s:%s) %pM\n",
4174 ((link_speed == IXGBE_PCI_LINK_SPEED_5000) ? "5.0Gb/s" : 4580 ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s":
4175 (link_speed == IXGBE_PCI_LINK_SPEED_2500) ? "2.5Gb/s" : 4581 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"),
4176 "Unknown"), 4582 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
4177 ((link_width == IXGBE_PCI_LINK_WIDTH_8) ? "Width x8" : 4583 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
4178 (link_width == IXGBE_PCI_LINK_WIDTH_4) ? "Width x4" : 4584 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
4179 (link_width == IXGBE_PCI_LINK_WIDTH_2) ? "Width x2" :
4180 (link_width == IXGBE_PCI_LINK_WIDTH_1) ? "Width x1" :
4181 "Unknown"), 4585 "Unknown"),
4182 netdev->dev_addr); 4586 netdev->dev_addr);
4183 ixgbe_read_pba_num_generic(hw, &part_num); 4587 ixgbe_read_pba_num_generic(hw, &part_num);
4184 dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n", 4588 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
4185 hw->mac.type, hw->phy.type, 4589 dev_info(&pdev->dev, "MAC: %d, PHY: %d, SFP+: %d, PBA No: %06x-%03x\n",
4186 (part_num >> 8), (part_num & 0xff)); 4590 hw->mac.type, hw->phy.type, hw->phy.sfp_type,
4591 (part_num >> 8), (part_num & 0xff));
4592 else
4593 dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
4594 hw->mac.type, hw->phy.type,
4595 (part_num >> 8), (part_num & 0xff));
4187 4596
4188 if (link_width <= IXGBE_PCI_LINK_WIDTH_4) { 4597 if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
4189 dev_warn(&pdev->dev, "PCI-Express bandwidth available for " 4598 dev_warn(&pdev->dev, "PCI-Express bandwidth available for "
4190 "this card is not sufficient for optimal " 4599 "this card is not sufficient for optimal "
4191 "performance.\n"); 4600 "performance.\n");
@@ -4229,6 +4638,8 @@ err_eeprom:
4229 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); 4638 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
4230 del_timer_sync(&adapter->sfp_timer); 4639 del_timer_sync(&adapter->sfp_timer);
4231 cancel_work_sync(&adapter->sfp_task); 4640 cancel_work_sync(&adapter->sfp_task);
4641 cancel_work_sync(&adapter->multispeed_fiber_task);
4642 cancel_work_sync(&adapter->sfp_config_module_task);
4232 iounmap(hw->hw_addr); 4643 iounmap(hw->hw_addr);
4233err_ioremap: 4644err_ioremap:
4234 free_netdev(netdev); 4645 free_netdev(netdev);
@@ -4265,6 +4676,8 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
4265 del_timer_sync(&adapter->sfp_timer); 4676 del_timer_sync(&adapter->sfp_timer);
4266 cancel_work_sync(&adapter->watchdog_task); 4677 cancel_work_sync(&adapter->watchdog_task);
4267 cancel_work_sync(&adapter->sfp_task); 4678 cancel_work_sync(&adapter->sfp_task);
4679 cancel_work_sync(&adapter->multispeed_fiber_task);
4680 cancel_work_sync(&adapter->sfp_config_module_task);
4268 flush_scheduled_work(); 4681 flush_scheduled_work();
4269 4682
4270#ifdef CONFIG_IXGBE_DCA 4683#ifdef CONFIG_IXGBE_DCA