aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe/ixgbe_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_main.c')
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c656
1 files changed, 521 insertions, 135 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 6c00ee493a3b..9551cbb7bf01 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -175,6 +175,345 @@ static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
175 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; 175 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
176} 176}
177 177
178struct ixgbe_reg_info {
179 u32 ofs;
180 char *name;
181};
182
183static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
184
185 /* General Registers */
186 {IXGBE_CTRL, "CTRL"},
187 {IXGBE_STATUS, "STATUS"},
188 {IXGBE_CTRL_EXT, "CTRL_EXT"},
189
190 /* Interrupt Registers */
191 {IXGBE_EICR, "EICR"},
192
193 /* RX Registers */
194 {IXGBE_SRRCTL(0), "SRRCTL"},
195 {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
196 {IXGBE_RDLEN(0), "RDLEN"},
197 {IXGBE_RDH(0), "RDH"},
198 {IXGBE_RDT(0), "RDT"},
199 {IXGBE_RXDCTL(0), "RXDCTL"},
200 {IXGBE_RDBAL(0), "RDBAL"},
201 {IXGBE_RDBAH(0), "RDBAH"},
202
203 /* TX Registers */
204 {IXGBE_TDBAL(0), "TDBAL"},
205 {IXGBE_TDBAH(0), "TDBAH"},
206 {IXGBE_TDLEN(0), "TDLEN"},
207 {IXGBE_TDH(0), "TDH"},
208 {IXGBE_TDT(0), "TDT"},
209 {IXGBE_TXDCTL(0), "TXDCTL"},
210
211 /* List Terminator */
212 {}
213};
214
215
216/*
217 * ixgbe_regdump - register printout routine
218 */
219static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
220{
221 int i = 0, j = 0;
222 char rname[16];
223 u32 regs[64];
224
225 switch (reginfo->ofs) {
226 case IXGBE_SRRCTL(0):
227 for (i = 0; i < 64; i++)
228 regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
229 break;
230 case IXGBE_DCA_RXCTRL(0):
231 for (i = 0; i < 64; i++)
232 regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
233 break;
234 case IXGBE_RDLEN(0):
235 for (i = 0; i < 64; i++)
236 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
237 break;
238 case IXGBE_RDH(0):
239 for (i = 0; i < 64; i++)
240 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
241 break;
242 case IXGBE_RDT(0):
243 for (i = 0; i < 64; i++)
244 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
245 break;
246 case IXGBE_RXDCTL(0):
247 for (i = 0; i < 64; i++)
248 regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
249 break;
250 case IXGBE_RDBAL(0):
251 for (i = 0; i < 64; i++)
252 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
253 break;
254 case IXGBE_RDBAH(0):
255 for (i = 0; i < 64; i++)
256 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
257 break;
258 case IXGBE_TDBAL(0):
259 for (i = 0; i < 64; i++)
260 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
261 break;
262 case IXGBE_TDBAH(0):
263 for (i = 0; i < 64; i++)
264 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
265 break;
266 case IXGBE_TDLEN(0):
267 for (i = 0; i < 64; i++)
268 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
269 break;
270 case IXGBE_TDH(0):
271 for (i = 0; i < 64; i++)
272 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
273 break;
274 case IXGBE_TDT(0):
275 for (i = 0; i < 64; i++)
276 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
277 break;
278 case IXGBE_TXDCTL(0):
279 for (i = 0; i < 64; i++)
280 regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
281 break;
282 default:
283 printk(KERN_INFO "%-15s %08x\n", reginfo->name,
284 IXGBE_READ_REG(hw, reginfo->ofs));
285 return;
286 }
287
288 for (i = 0; i < 8; i++) {
289 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
290 printk(KERN_ERR "%-15s ", rname);
291 for (j = 0; j < 8; j++)
292 printk(KERN_CONT "%08x ", regs[i*8+j]);
293 printk(KERN_CONT "\n");
294 }
295
296}
297
298/*
299 * ixgbe_dump - Print registers, tx-rings and rx-rings
300 */
301static void ixgbe_dump(struct ixgbe_adapter *adapter)
302{
303 struct net_device *netdev = adapter->netdev;
304 struct ixgbe_hw *hw = &adapter->hw;
305 struct ixgbe_reg_info *reginfo;
306 int n = 0;
307 struct ixgbe_ring *tx_ring;
308 struct ixgbe_tx_buffer *tx_buffer_info;
309 union ixgbe_adv_tx_desc *tx_desc;
310 struct my_u0 { u64 a; u64 b; } *u0;
311 struct ixgbe_ring *rx_ring;
312 union ixgbe_adv_rx_desc *rx_desc;
313 struct ixgbe_rx_buffer *rx_buffer_info;
314 u32 staterr;
315 int i = 0;
316
317 if (!netif_msg_hw(adapter))
318 return;
319
320 /* Print netdevice Info */
321 if (netdev) {
322 dev_info(&adapter->pdev->dev, "Net device Info\n");
323 printk(KERN_INFO "Device Name state "
324 "trans_start last_rx\n");
325 printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
326 netdev->name,
327 netdev->state,
328 netdev->trans_start,
329 netdev->last_rx);
330 }
331
332 /* Print Registers */
333 dev_info(&adapter->pdev->dev, "Register Dump\n");
334 printk(KERN_INFO " Register Name Value\n");
335 for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
336 reginfo->name; reginfo++) {
337 ixgbe_regdump(hw, reginfo);
338 }
339
340 /* Print TX Ring Summary */
341 if (!netdev || !netif_running(netdev))
342 goto exit;
343
344 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
345 printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ] "
346 "leng ntw timestamp\n");
347 for (n = 0; n < adapter->num_tx_queues; n++) {
348 tx_ring = adapter->tx_ring[n];
349 tx_buffer_info =
350 &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
351 printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
352 n, tx_ring->next_to_use, tx_ring->next_to_clean,
353 (u64)tx_buffer_info->dma,
354 tx_buffer_info->length,
355 tx_buffer_info->next_to_watch,
356 (u64)tx_buffer_info->time_stamp);
357 }
358
359 /* Print TX Rings */
360 if (!netif_msg_tx_done(adapter))
361 goto rx_ring_summary;
362
363 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
364
365 /* Transmit Descriptor Formats
366 *
367 * Advanced Transmit Descriptor
368 * +--------------------------------------------------------------+
369 * 0 | Buffer Address [63:0] |
370 * +--------------------------------------------------------------+
371 * 8 | PAYLEN | PORTS | IDX | STA | DCMD |DTYP | RSV | DTALEN |
372 * +--------------------------------------------------------------+
373 * 63 46 45 40 39 36 35 32 31 24 23 20 19 0
374 */
375
376 for (n = 0; n < adapter->num_tx_queues; n++) {
377 tx_ring = adapter->tx_ring[n];
378 printk(KERN_INFO "------------------------------------\n");
379 printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index);
380 printk(KERN_INFO "------------------------------------\n");
381 printk(KERN_INFO "T [desc] [address 63:0 ] "
382 "[PlPOIdStDDt Ln] [bi->dma ] "
383 "leng ntw timestamp bi->skb\n");
384
385 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
386 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
387 tx_buffer_info = &tx_ring->tx_buffer_info[i];
388 u0 = (struct my_u0 *)tx_desc;
389 printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX"
390 " %04X %3X %016llX %p", i,
391 le64_to_cpu(u0->a),
392 le64_to_cpu(u0->b),
393 (u64)tx_buffer_info->dma,
394 tx_buffer_info->length,
395 tx_buffer_info->next_to_watch,
396 (u64)tx_buffer_info->time_stamp,
397 tx_buffer_info->skb);
398 if (i == tx_ring->next_to_use &&
399 i == tx_ring->next_to_clean)
400 printk(KERN_CONT " NTC/U\n");
401 else if (i == tx_ring->next_to_use)
402 printk(KERN_CONT " NTU\n");
403 else if (i == tx_ring->next_to_clean)
404 printk(KERN_CONT " NTC\n");
405 else
406 printk(KERN_CONT "\n");
407
408 if (netif_msg_pktdata(adapter) &&
409 tx_buffer_info->dma != 0)
410 print_hex_dump(KERN_INFO, "",
411 DUMP_PREFIX_ADDRESS, 16, 1,
412 phys_to_virt(tx_buffer_info->dma),
413 tx_buffer_info->length, true);
414 }
415 }
416
417 /* Print RX Rings Summary */
418rx_ring_summary:
419 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
420 printk(KERN_INFO "Queue [NTU] [NTC]\n");
421 for (n = 0; n < adapter->num_rx_queues; n++) {
422 rx_ring = adapter->rx_ring[n];
423 printk(KERN_INFO "%5d %5X %5X\n", n,
424 rx_ring->next_to_use, rx_ring->next_to_clean);
425 }
426
427 /* Print RX Rings */
428 if (!netif_msg_rx_status(adapter))
429 goto exit;
430
431 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
432
433 /* Advanced Receive Descriptor (Read) Format
434 * 63 1 0
435 * +-----------------------------------------------------+
436 * 0 | Packet Buffer Address [63:1] |A0/NSE|
437 * +----------------------------------------------+------+
438 * 8 | Header Buffer Address [63:1] | DD |
439 * +-----------------------------------------------------+
440 *
441 *
442 * Advanced Receive Descriptor (Write-Back) Format
443 *
444 * 63 48 47 32 31 30 21 20 16 15 4 3 0
445 * +------------------------------------------------------+
446 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
447 * | Checksum Ident | | | | Type | Type |
448 * +------------------------------------------------------+
449 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
450 * +------------------------------------------------------+
451 * 63 48 47 32 31 20 19 0
452 */
453 for (n = 0; n < adapter->num_rx_queues; n++) {
454 rx_ring = adapter->rx_ring[n];
455 printk(KERN_INFO "------------------------------------\n");
456 printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index);
457 printk(KERN_INFO "------------------------------------\n");
458 printk(KERN_INFO "R [desc] [ PktBuf A0] "
459 "[ HeadBuf DD] [bi->dma ] [bi->skb] "
460 "<-- Adv Rx Read format\n");
461 printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] "
462 "[vl er S cks ln] ---------------- [bi->skb] "
463 "<-- Adv Rx Write-Back format\n");
464
465 for (i = 0; i < rx_ring->count; i++) {
466 rx_buffer_info = &rx_ring->rx_buffer_info[i];
467 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
468 u0 = (struct my_u0 *)rx_desc;
469 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
470 if (staterr & IXGBE_RXD_STAT_DD) {
471 /* Descriptor Done */
472 printk(KERN_INFO "RWB[0x%03X] %016llX "
473 "%016llX ---------------- %p", i,
474 le64_to_cpu(u0->a),
475 le64_to_cpu(u0->b),
476 rx_buffer_info->skb);
477 } else {
478 printk(KERN_INFO "R [0x%03X] %016llX "
479 "%016llX %016llX %p", i,
480 le64_to_cpu(u0->a),
481 le64_to_cpu(u0->b),
482 (u64)rx_buffer_info->dma,
483 rx_buffer_info->skb);
484
485 if (netif_msg_pktdata(adapter)) {
486 print_hex_dump(KERN_INFO, "",
487 DUMP_PREFIX_ADDRESS, 16, 1,
488 phys_to_virt(rx_buffer_info->dma),
489 rx_ring->rx_buf_len, true);
490
491 if (rx_ring->rx_buf_len
492 < IXGBE_RXBUFFER_2048)
493 print_hex_dump(KERN_INFO, "",
494 DUMP_PREFIX_ADDRESS, 16, 1,
495 phys_to_virt(
496 rx_buffer_info->page_dma +
497 rx_buffer_info->page_offset
498 ),
499 PAGE_SIZE/2, true);
500 }
501 }
502
503 if (i == rx_ring->next_to_use)
504 printk(KERN_CONT " NTU\n");
505 else if (i == rx_ring->next_to_clean)
506 printk(KERN_CONT " NTC\n");
507 else
508 printk(KERN_CONT "\n");
509
510 }
511 }
512
513exit:
514 return;
515}
516
178static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter) 517static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
179{ 518{
180 u32 ctrl_ext; 519 u32 ctrl_ext;
@@ -266,15 +605,15 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
266{ 605{
267 if (tx_buffer_info->dma) { 606 if (tx_buffer_info->dma) {
268 if (tx_buffer_info->mapped_as_page) 607 if (tx_buffer_info->mapped_as_page)
269 pci_unmap_page(adapter->pdev, 608 dma_unmap_page(&adapter->pdev->dev,
270 tx_buffer_info->dma, 609 tx_buffer_info->dma,
271 tx_buffer_info->length, 610 tx_buffer_info->length,
272 PCI_DMA_TODEVICE); 611 DMA_TO_DEVICE);
273 else 612 else
274 pci_unmap_single(adapter->pdev, 613 dma_unmap_single(&adapter->pdev->dev,
275 tx_buffer_info->dma, 614 tx_buffer_info->dma,
276 tx_buffer_info->length, 615 tx_buffer_info->length,
277 PCI_DMA_TODEVICE); 616 DMA_TO_DEVICE);
278 tx_buffer_info->dma = 0; 617 tx_buffer_info->dma = 0;
279 } 618 }
280 if (tx_buffer_info->skb) { 619 if (tx_buffer_info->skb) {
@@ -286,16 +625,16 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
286} 625}
287 626
288/** 627/**
289 * ixgbe_tx_is_paused - check if the tx ring is paused 628 * ixgbe_tx_xon_state - check the tx ring xon state
290 * @adapter: the ixgbe adapter 629 * @adapter: the ixgbe adapter
291 * @tx_ring: the corresponding tx_ring 630 * @tx_ring: the corresponding tx_ring
292 * 631 *
293 * If not in DCB mode, checks TFCS.TXOFF, otherwise, find out the 632 * If not in DCB mode, checks TFCS.TXOFF, otherwise, find out the
294 * corresponding TC of this tx_ring when checking TFCS. 633 * corresponding TC of this tx_ring when checking TFCS.
295 * 634 *
296 * Returns : true if paused 635 * Returns : true if in xon state (currently not paused)
297 */ 636 */
298static inline bool ixgbe_tx_is_paused(struct ixgbe_adapter *adapter, 637static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter,
299 struct ixgbe_ring *tx_ring) 638 struct ixgbe_ring *tx_ring)
300{ 639{
301 u32 txoff = IXGBE_TFCS_TXOFF; 640 u32 txoff = IXGBE_TFCS_TXOFF;
@@ -351,7 +690,7 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
351 adapter->detect_tx_hung = false; 690 adapter->detect_tx_hung = false;
352 if (tx_ring->tx_buffer_info[eop].time_stamp && 691 if (tx_ring->tx_buffer_info[eop].time_stamp &&
353 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) && 692 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
354 !ixgbe_tx_is_paused(adapter, tx_ring)) { 693 ixgbe_tx_xon_state(adapter, tx_ring)) {
355 /* detected Tx unit hang */ 694 /* detected Tx unit hang */
356 union ixgbe_adv_tx_desc *tx_desc; 695 union ixgbe_adv_tx_desc *tx_desc;
357 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); 696 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
@@ -721,10 +1060,10 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
721 bi->page_offset ^= (PAGE_SIZE / 2); 1060 bi->page_offset ^= (PAGE_SIZE / 2);
722 } 1061 }
723 1062
724 bi->page_dma = pci_map_page(pdev, bi->page, 1063 bi->page_dma = dma_map_page(&pdev->dev, bi->page,
725 bi->page_offset, 1064 bi->page_offset,
726 (PAGE_SIZE / 2), 1065 (PAGE_SIZE / 2),
727 PCI_DMA_FROMDEVICE); 1066 DMA_FROM_DEVICE);
728 } 1067 }
729 1068
730 if (!bi->skb) { 1069 if (!bi->skb) {
@@ -743,9 +1082,9 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
743 - skb->data)); 1082 - skb->data));
744 1083
745 bi->skb = skb; 1084 bi->skb = skb;
746 bi->dma = pci_map_single(pdev, skb->data, 1085 bi->dma = dma_map_single(&pdev->dev, skb->data,
747 rx_ring->rx_buf_len, 1086 rx_ring->rx_buf_len,
748 PCI_DMA_FROMDEVICE); 1087 DMA_FROM_DEVICE);
749 } 1088 }
750 /* Refresh the desc even if buffer_addrs didn't change because 1089 /* Refresh the desc even if buffer_addrs didn't change because
751 * each write-back erases this info. */ 1090 * each write-back erases this info. */
@@ -821,6 +1160,7 @@ static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
821 1160
822struct ixgbe_rsc_cb { 1161struct ixgbe_rsc_cb {
823 dma_addr_t dma; 1162 dma_addr_t dma;
1163 bool delay_unmap;
824}; 1164};
825 1165
826#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb) 1166#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
@@ -861,9 +1201,10 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
861 hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc)); 1201 hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
862 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> 1202 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
863 IXGBE_RXDADV_HDRBUFLEN_SHIFT; 1203 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
864 if (len > IXGBE_RX_HDR_SIZE)
865 len = IXGBE_RX_HDR_SIZE;
866 upper_len = le16_to_cpu(rx_desc->wb.upper.length); 1204 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
1205 if ((len > IXGBE_RX_HDR_SIZE) ||
1206 (upper_len && !(hdr_info & IXGBE_RXDADV_SPH)))
1207 len = IXGBE_RX_HDR_SIZE;
867 } else { 1208 } else {
868 len = le16_to_cpu(rx_desc->wb.upper.length); 1209 len = le16_to_cpu(rx_desc->wb.upper.length);
869 } 1210 }
@@ -876,7 +1217,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
876 if (rx_buffer_info->dma) { 1217 if (rx_buffer_info->dma) {
877 if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && 1218 if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
878 (!(staterr & IXGBE_RXD_STAT_EOP)) && 1219 (!(staterr & IXGBE_RXD_STAT_EOP)) &&
879 (!(skb->prev))) 1220 (!(skb->prev))) {
880 /* 1221 /*
881 * When HWRSC is enabled, delay unmapping 1222 * When HWRSC is enabled, delay unmapping
882 * of the first packet. It carries the 1223 * of the first packet. It carries the
@@ -884,18 +1225,21 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
884 * access the header after the writeback. 1225 * access the header after the writeback.
885 * Only unmap it when EOP is reached 1226 * Only unmap it when EOP is reached
886 */ 1227 */
1228 IXGBE_RSC_CB(skb)->delay_unmap = true;
887 IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma; 1229 IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
888 else 1230 } else {
889 pci_unmap_single(pdev, rx_buffer_info->dma, 1231 dma_unmap_single(&pdev->dev,
1232 rx_buffer_info->dma,
890 rx_ring->rx_buf_len, 1233 rx_ring->rx_buf_len,
891 PCI_DMA_FROMDEVICE); 1234 DMA_FROM_DEVICE);
1235 }
892 rx_buffer_info->dma = 0; 1236 rx_buffer_info->dma = 0;
893 skb_put(skb, len); 1237 skb_put(skb, len);
894 } 1238 }
895 1239
896 if (upper_len) { 1240 if (upper_len) {
897 pci_unmap_page(pdev, rx_buffer_info->page_dma, 1241 dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
898 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE); 1242 PAGE_SIZE / 2, DMA_FROM_DEVICE);
899 rx_buffer_info->page_dma = 0; 1243 rx_buffer_info->page_dma = 0;
900 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 1244 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
901 rx_buffer_info->page, 1245 rx_buffer_info->page,
@@ -936,11 +1280,13 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
936 if (skb->prev) 1280 if (skb->prev)
937 skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count)); 1281 skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count));
938 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { 1282 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
939 if (IXGBE_RSC_CB(skb)->dma) { 1283 if (IXGBE_RSC_CB(skb)->delay_unmap) {
940 pci_unmap_single(pdev, IXGBE_RSC_CB(skb)->dma, 1284 dma_unmap_single(&pdev->dev,
1285 IXGBE_RSC_CB(skb)->dma,
941 rx_ring->rx_buf_len, 1286 rx_ring->rx_buf_len,
942 PCI_DMA_FROMDEVICE); 1287 DMA_FROM_DEVICE);
943 IXGBE_RSC_CB(skb)->dma = 0; 1288 IXGBE_RSC_CB(skb)->dma = 0;
1289 IXGBE_RSC_CB(skb)->delay_unmap = false;
944 } 1290 }
945 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) 1291 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
946 rx_ring->rsc_count += skb_shinfo(skb)->nr_frags; 1292 rx_ring->rsc_count += skb_shinfo(skb)->nr_frags;
@@ -1190,6 +1536,15 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
1190 itr_reg |= (itr_reg << 16); 1536 itr_reg |= (itr_reg << 16);
1191 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 1537 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1192 /* 1538 /*
1539 * 82599 can support a value of zero, so allow it for
1540 * max interrupt rate, but there is an errata where it can
1541 * not be zero with RSC
1542 */
1543 if (itr_reg == 8 &&
1544 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
1545 itr_reg = 0;
1546
1547 /*
1193 * set the WDIS bit to not clear the timer bits and cause an 1548 * set the WDIS bit to not clear the timer bits and cause an
1194 * immediate assertion of the interrupt 1549 * immediate assertion of the interrupt
1195 */ 1550 */
@@ -1261,8 +1616,6 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1261 1616
1262 ixgbe_write_eitr(q_vector); 1617 ixgbe_write_eitr(q_vector);
1263 } 1618 }
1264
1265 return;
1266} 1619}
1267 1620
1268static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr) 1621static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
@@ -1826,8 +2179,6 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
1826 2179
1827 ixgbe_write_eitr(q_vector); 2180 ixgbe_write_eitr(q_vector);
1828 } 2181 }
1829
1830 return;
1831} 2182}
1832 2183
1833/** 2184/**
@@ -2372,7 +2723,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2372 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift)); 2723 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
2373 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift)); 2724 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
2374 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); 2725 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
2375 ixgbe_set_vmolr(hw, adapter->num_vfs); 2726 ixgbe_set_vmolr(hw, adapter->num_vfs, true);
2376 } 2727 }
2377 2728
2378 /* Program MRQC for the distribution of queues */ 2729 /* Program MRQC for the distribution of queues */
@@ -2482,12 +2833,82 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2482 hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false); 2833 hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
2483} 2834}
2484 2835
2836/**
2837 * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering
2838 * @adapter: driver data
2839 */
2840static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter)
2841{
2842 struct ixgbe_hw *hw = &adapter->hw;
2843 u32 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2844 int i, j;
2845
2846 switch (hw->mac.type) {
2847 case ixgbe_mac_82598EB:
2848 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
2849#ifdef CONFIG_IXGBE_DCB
2850 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
2851 vlnctrl &= ~IXGBE_VLNCTRL_VME;
2852#endif
2853 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2854 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2855 break;
2856 case ixgbe_mac_82599EB:
2857 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
2858 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2859 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2860#ifdef CONFIG_IXGBE_DCB
2861 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
2862 break;
2863#endif
2864 for (i = 0; i < adapter->num_rx_queues; i++) {
2865 j = adapter->rx_ring[i]->reg_idx;
2866 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
2867 vlnctrl &= ~IXGBE_RXDCTL_VME;
2868 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
2869 }
2870 break;
2871 default:
2872 break;
2873 }
2874}
2875
2876/**
2877 * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering
2878 * @adapter: driver data
2879 */
2880static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
2881{
2882 struct ixgbe_hw *hw = &adapter->hw;
2883 u32 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2884 int i, j;
2885
2886 switch (hw->mac.type) {
2887 case ixgbe_mac_82598EB:
2888 vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
2889 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2890 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2891 break;
2892 case ixgbe_mac_82599EB:
2893 vlnctrl |= IXGBE_VLNCTRL_VFE;
2894 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2895 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2896 for (i = 0; i < adapter->num_rx_queues; i++) {
2897 j = adapter->rx_ring[i]->reg_idx;
2898 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
2899 vlnctrl |= IXGBE_RXDCTL_VME;
2900 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
2901 }
2902 break;
2903 default:
2904 break;
2905 }
2906}
2907
2485static void ixgbe_vlan_rx_register(struct net_device *netdev, 2908static void ixgbe_vlan_rx_register(struct net_device *netdev,
2486 struct vlan_group *grp) 2909 struct vlan_group *grp)
2487{ 2910{
2488 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2911 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2489 u32 ctrl;
2490 int i, j;
2491 2912
2492 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2913 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2493 ixgbe_irq_disable(adapter); 2914 ixgbe_irq_disable(adapter);
@@ -2498,25 +2919,7 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
2498 * still receive traffic from a DCB-enabled host even if we're 2919 * still receive traffic from a DCB-enabled host even if we're
2499 * not in DCB mode. 2920 * not in DCB mode.
2500 */ 2921 */
2501 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL); 2922 ixgbe_vlan_filter_enable(adapter);
2502
2503 /* Disable CFI check */
2504 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
2505
2506 /* enable VLAN tag stripping */
2507 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2508 ctrl |= IXGBE_VLNCTRL_VME;
2509 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
2510 for (i = 0; i < adapter->num_rx_queues; i++) {
2511 u32 ctrl;
2512 j = adapter->rx_ring[i]->reg_idx;
2513 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j));
2514 ctrl |= IXGBE_RXDCTL_VME;
2515 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl);
2516 }
2517 }
2518
2519 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
2520 2923
2521 ixgbe_vlan_rx_add_vid(netdev, 0); 2924 ixgbe_vlan_rx_add_vid(netdev, 0);
2522 2925
@@ -2538,21 +2941,6 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
2538 } 2941 }
2539} 2942}
2540 2943
2541static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
2542{
2543 struct dev_mc_list *mc_ptr;
2544 u8 *addr = *mc_addr_ptr;
2545 *vmdq = 0;
2546
2547 mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
2548 if (mc_ptr->next)
2549 *mc_addr_ptr = mc_ptr->next->dmi_addr;
2550 else
2551 *mc_addr_ptr = NULL;
2552
2553 return addr;
2554}
2555
2556/** 2944/**
2557 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set 2945 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
2558 * @netdev: network interface device structure 2946 * @netdev: network interface device structure
@@ -2566,42 +2954,36 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
2566{ 2954{
2567 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2955 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2568 struct ixgbe_hw *hw = &adapter->hw; 2956 struct ixgbe_hw *hw = &adapter->hw;
2569 u32 fctrl, vlnctrl; 2957 u32 fctrl;
2570 u8 *addr_list = NULL;
2571 int addr_count = 0;
2572 2958
2573 /* Check for Promiscuous and All Multicast modes */ 2959 /* Check for Promiscuous and All Multicast modes */
2574 2960
2575 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 2961 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2576 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2577 2962
2578 if (netdev->flags & IFF_PROMISC) { 2963 if (netdev->flags & IFF_PROMISC) {
2579 hw->addr_ctrl.user_set_promisc = 1; 2964 hw->addr_ctrl.user_set_promisc = true;
2580 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 2965 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2581 vlnctrl &= ~IXGBE_VLNCTRL_VFE; 2966 /* don't hardware filter vlans in promisc mode */
2967 ixgbe_vlan_filter_disable(adapter);
2582 } else { 2968 } else {
2583 if (netdev->flags & IFF_ALLMULTI) { 2969 if (netdev->flags & IFF_ALLMULTI) {
2584 fctrl |= IXGBE_FCTRL_MPE; 2970 fctrl |= IXGBE_FCTRL_MPE;
2585 fctrl &= ~IXGBE_FCTRL_UPE; 2971 fctrl &= ~IXGBE_FCTRL_UPE;
2586 } else { 2972 } else if (!hw->addr_ctrl.uc_set_promisc) {
2587 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 2973 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2588 } 2974 }
2589 vlnctrl |= IXGBE_VLNCTRL_VFE; 2975 ixgbe_vlan_filter_enable(adapter);
2590 hw->addr_ctrl.user_set_promisc = 0; 2976 hw->addr_ctrl.user_set_promisc = false;
2591 } 2977 }
2592 2978
2593 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 2979 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2594 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2595 2980
2596 /* reprogram secondary unicast list */ 2981 /* reprogram secondary unicast list */
2597 hw->mac.ops.update_uc_addr_list(hw, netdev); 2982 hw->mac.ops.update_uc_addr_list(hw, netdev);
2598 2983
2599 /* reprogram multicast list */ 2984 /* reprogram multicast list */
2600 addr_count = netdev_mc_count(netdev); 2985 hw->mac.ops.update_mc_addr_list(hw, netdev);
2601 if (addr_count) 2986
2602 addr_list = netdev->mc_list->dmi_addr;
2603 hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
2604 ixgbe_addr_list_itr);
2605 if (adapter->num_vfs) 2987 if (adapter->num_vfs)
2606 ixgbe_restore_vf_multicasts(adapter); 2988 ixgbe_restore_vf_multicasts(adapter);
2607} 2989}
@@ -2661,7 +3043,7 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
2661static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) 3043static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
2662{ 3044{
2663 struct ixgbe_hw *hw = &adapter->hw; 3045 struct ixgbe_hw *hw = &adapter->hw;
2664 u32 txdctl, vlnctrl; 3046 u32 txdctl;
2665 int i, j; 3047 int i, j;
2666 3048
2667 ixgbe_dcb_check_config(&adapter->dcb_cfg); 3049 ixgbe_dcb_check_config(&adapter->dcb_cfg);
@@ -2679,22 +3061,8 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
2679 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); 3061 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
2680 } 3062 }
2681 /* Enable VLAN tag insert/strip */ 3063 /* Enable VLAN tag insert/strip */
2682 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 3064 ixgbe_vlan_filter_enable(adapter);
2683 if (hw->mac.type == ixgbe_mac_82598EB) { 3065
2684 vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
2685 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2686 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2687 } else if (hw->mac.type == ixgbe_mac_82599EB) {
2688 vlnctrl |= IXGBE_VLNCTRL_VFE;
2689 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2690 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2691 for (i = 0; i < adapter->num_rx_queues; i++) {
2692 j = adapter->rx_ring[i]->reg_idx;
2693 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
2694 vlnctrl |= IXGBE_RXDCTL_VME;
2695 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
2696 }
2697 }
2698 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); 3066 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
2699} 3067}
2700 3068
@@ -2750,8 +3118,10 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
2750 case ixgbe_phy_sfp_ftl: 3118 case ixgbe_phy_sfp_ftl:
2751 case ixgbe_phy_sfp_intel: 3119 case ixgbe_phy_sfp_intel:
2752 case ixgbe_phy_sfp_unknown: 3120 case ixgbe_phy_sfp_unknown:
2753 case ixgbe_phy_tw_tyco: 3121 case ixgbe_phy_sfp_passive_tyco:
2754 case ixgbe_phy_tw_unknown: 3122 case ixgbe_phy_sfp_passive_unknown:
3123 case ixgbe_phy_sfp_active_unknown:
3124 case ixgbe_phy_sfp_ftl_active:
2755 return true; 3125 return true;
2756 default: 3126 default:
2757 return false; 3127 return false;
@@ -2927,8 +3297,13 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2927 for (i = 0; i < adapter->num_tx_queues; i++) { 3297 for (i = 0; i < adapter->num_tx_queues; i++) {
2928 j = adapter->tx_ring[i]->reg_idx; 3298 j = adapter->tx_ring[i]->reg_idx;
2929 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); 3299 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
2930 /* enable WTHRESH=8 descriptors, to encourage burst writeback */ 3300 if (adapter->rx_itr_setting == 0) {
2931 txdctl |= (8 << 16); 3301 /* cannot set wthresh when itr==0 */
3302 txdctl &= ~0x007F0000;
3303 } else {
3304 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
3305 txdctl |= (8 << 16);
3306 }
2932 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); 3307 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
2933 } 3308 }
2934 3309
@@ -3131,9 +3506,9 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3131 3506
3132 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 3507 rx_buffer_info = &rx_ring->rx_buffer_info[i];
3133 if (rx_buffer_info->dma) { 3508 if (rx_buffer_info->dma) {
3134 pci_unmap_single(pdev, rx_buffer_info->dma, 3509 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
3135 rx_ring->rx_buf_len, 3510 rx_ring->rx_buf_len,
3136 PCI_DMA_FROMDEVICE); 3511 DMA_FROM_DEVICE);
3137 rx_buffer_info->dma = 0; 3512 rx_buffer_info->dma = 0;
3138 } 3513 }
3139 if (rx_buffer_info->skb) { 3514 if (rx_buffer_info->skb) {
@@ -3141,11 +3516,13 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3141 rx_buffer_info->skb = NULL; 3516 rx_buffer_info->skb = NULL;
3142 do { 3517 do {
3143 struct sk_buff *this = skb; 3518 struct sk_buff *this = skb;
3144 if (IXGBE_RSC_CB(this)->dma) { 3519 if (IXGBE_RSC_CB(this)->delay_unmap) {
3145 pci_unmap_single(pdev, IXGBE_RSC_CB(this)->dma, 3520 dma_unmap_single(&pdev->dev,
3521 IXGBE_RSC_CB(this)->dma,
3146 rx_ring->rx_buf_len, 3522 rx_ring->rx_buf_len,
3147 PCI_DMA_FROMDEVICE); 3523 DMA_FROM_DEVICE);
3148 IXGBE_RSC_CB(this)->dma = 0; 3524 IXGBE_RSC_CB(this)->dma = 0;
3525 IXGBE_RSC_CB(skb)->delay_unmap = false;
3149 } 3526 }
3150 skb = skb->prev; 3527 skb = skb->prev;
3151 dev_kfree_skb(this); 3528 dev_kfree_skb(this);
@@ -3154,8 +3531,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3154 if (!rx_buffer_info->page) 3531 if (!rx_buffer_info->page)
3155 continue; 3532 continue;
3156 if (rx_buffer_info->page_dma) { 3533 if (rx_buffer_info->page_dma) {
3157 pci_unmap_page(pdev, rx_buffer_info->page_dma, 3534 dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
3158 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE); 3535 PAGE_SIZE / 2, DMA_FROM_DEVICE);
3159 rx_buffer_info->page_dma = 0; 3536 rx_buffer_info->page_dma = 0;
3160 } 3537 }
3161 put_page(rx_buffer_info->page); 3538 put_page(rx_buffer_info->page);
@@ -3268,22 +3645,23 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
3268 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 3645 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3269 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); 3646 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
3270 3647
3271 netif_tx_disable(netdev);
3272
3273 IXGBE_WRITE_FLUSH(hw); 3648 IXGBE_WRITE_FLUSH(hw);
3274 msleep(10); 3649 msleep(10);
3275 3650
3276 netif_tx_stop_all_queues(netdev); 3651 netif_tx_stop_all_queues(netdev);
3277 3652
3278 ixgbe_irq_disable(adapter);
3279
3280 ixgbe_napi_disable_all(adapter);
3281
3282 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); 3653 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
3283 del_timer_sync(&adapter->sfp_timer); 3654 del_timer_sync(&adapter->sfp_timer);
3284 del_timer_sync(&adapter->watchdog_timer); 3655 del_timer_sync(&adapter->watchdog_timer);
3285 cancel_work_sync(&adapter->watchdog_task); 3656 cancel_work_sync(&adapter->watchdog_task);
3286 3657
3658 netif_carrier_off(netdev);
3659 netif_tx_disable(netdev);
3660
3661 ixgbe_irq_disable(adapter);
3662
3663 ixgbe_napi_disable_all(adapter);
3664
3287 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || 3665 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
3288 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) 3666 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
3289 cancel_work_sync(&adapter->fdir_reinit_task); 3667 cancel_work_sync(&adapter->fdir_reinit_task);
@@ -3301,8 +3679,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
3301 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & 3679 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
3302 ~IXGBE_DMATXCTL_TE)); 3680 ~IXGBE_DMATXCTL_TE));
3303 3681
3304 netif_carrier_off(netdev);
3305
3306 /* clear n-tuple filters that are cached */ 3682 /* clear n-tuple filters that are cached */
3307 ethtool_ntuple_flush(netdev); 3683 ethtool_ntuple_flush(netdev);
3308 3684
@@ -3379,6 +3755,8 @@ static void ixgbe_reset_task(struct work_struct *work)
3379 3755
3380 adapter->tx_timeout_count++; 3756 adapter->tx_timeout_count++;
3381 3757
3758 ixgbe_dump(adapter);
3759 netdev_err(adapter->netdev, "Reset adapter\n");
3382 ixgbe_reinit_locked(adapter); 3760 ixgbe_reinit_locked(adapter);
3383} 3761}
3384 3762
@@ -3479,12 +3857,12 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
3479 adapter->num_tx_queues = 1; 3857 adapter->num_tx_queues = 1;
3480#ifdef CONFIG_IXGBE_DCB 3858#ifdef CONFIG_IXGBE_DCB
3481 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 3859 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
3482 DPRINTK(PROBE, INFO, "FCoE enabled with DCB \n"); 3860 DPRINTK(PROBE, INFO, "FCoE enabled with DCB\n");
3483 ixgbe_set_dcb_queues(adapter); 3861 ixgbe_set_dcb_queues(adapter);
3484 } 3862 }
3485#endif 3863#endif
3486 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { 3864 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
3487 DPRINTK(PROBE, INFO, "FCoE enabled with RSS \n"); 3865 DPRINTK(PROBE, INFO, "FCoE enabled with RSS\n");
3488 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || 3866 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
3489 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) 3867 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
3490 ixgbe_set_fdir_queues(adapter); 3868 ixgbe_set_fdir_queues(adapter);
@@ -4095,7 +4473,6 @@ static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
4095 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; 4473 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
4096 pci_disable_msi(adapter->pdev); 4474 pci_disable_msi(adapter->pdev);
4097 } 4475 }
4098 return;
4099} 4476}
4100 4477
4101/** 4478/**
@@ -4381,8 +4758,8 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
4381 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 4758 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
4382 tx_ring->size = ALIGN(tx_ring->size, 4096); 4759 tx_ring->size = ALIGN(tx_ring->size, 4096);
4383 4760
4384 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, 4761 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
4385 &tx_ring->dma); 4762 &tx_ring->dma, GFP_KERNEL);
4386 if (!tx_ring->desc) 4763 if (!tx_ring->desc)
4387 goto err; 4764 goto err;
4388 4765
@@ -4452,7 +4829,8 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
4452 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 4829 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
4453 rx_ring->size = ALIGN(rx_ring->size, 4096); 4830 rx_ring->size = ALIGN(rx_ring->size, 4096);
4454 4831
4455 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, &rx_ring->dma); 4832 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
4833 &rx_ring->dma, GFP_KERNEL);
4456 4834
4457 if (!rx_ring->desc) { 4835 if (!rx_ring->desc) {
4458 DPRINTK(PROBE, ERR, 4836 DPRINTK(PROBE, ERR,
@@ -4513,7 +4891,8 @@ void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
4513 vfree(tx_ring->tx_buffer_info); 4891 vfree(tx_ring->tx_buffer_info);
4514 tx_ring->tx_buffer_info = NULL; 4892 tx_ring->tx_buffer_info = NULL;
4515 4893
4516 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); 4894 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
4895 tx_ring->dma);
4517 4896
4518 tx_ring->desc = NULL; 4897 tx_ring->desc = NULL;
4519} 4898}
@@ -4550,7 +4929,8 @@ void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
4550 vfree(rx_ring->rx_buffer_info); 4929 vfree(rx_ring->rx_buffer_info);
4551 rx_ring->rx_buffer_info = NULL; 4930 rx_ring->rx_buffer_info = NULL;
4552 4931
4553 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); 4932 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
4933 rx_ring->dma);
4554 4934
4555 rx_ring->desc = NULL; 4935 rx_ring->desc = NULL;
4556} 4936}
@@ -5100,7 +5480,7 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work)
5100 &(adapter->tx_ring[i]->reinit_state)); 5480 &(adapter->tx_ring[i]->reinit_state));
5101 } else { 5481 } else {
5102 DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, " 5482 DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, "
5103 "ignored adding FDIR ATR filters \n"); 5483 "ignored adding FDIR ATR filters\n");
5104 } 5484 }
5105 /* Done FDIR Re-initialization, enable transmits */ 5485 /* Done FDIR Re-initialization, enable transmits */
5106 netif_tx_start_all_queues(adapter->netdev); 5486 netif_tx_start_all_queues(adapter->netdev);
@@ -5420,10 +5800,10 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
5420 5800
5421 tx_buffer_info->length = size; 5801 tx_buffer_info->length = size;
5422 tx_buffer_info->mapped_as_page = false; 5802 tx_buffer_info->mapped_as_page = false;
5423 tx_buffer_info->dma = pci_map_single(pdev, 5803 tx_buffer_info->dma = dma_map_single(&pdev->dev,
5424 skb->data + offset, 5804 skb->data + offset,
5425 size, PCI_DMA_TODEVICE); 5805 size, DMA_TO_DEVICE);
5426 if (pci_dma_mapping_error(pdev, tx_buffer_info->dma)) 5806 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
5427 goto dma_error; 5807 goto dma_error;
5428 tx_buffer_info->time_stamp = jiffies; 5808 tx_buffer_info->time_stamp = jiffies;
5429 tx_buffer_info->next_to_watch = i; 5809 tx_buffer_info->next_to_watch = i;
@@ -5456,12 +5836,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
5456 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); 5836 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
5457 5837
5458 tx_buffer_info->length = size; 5838 tx_buffer_info->length = size;
5459 tx_buffer_info->dma = pci_map_page(adapter->pdev, 5839 tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev,
5460 frag->page, 5840 frag->page,
5461 offset, size, 5841 offset, size,
5462 PCI_DMA_TODEVICE); 5842 DMA_TO_DEVICE);
5463 tx_buffer_info->mapped_as_page = true; 5843 tx_buffer_info->mapped_as_page = true;
5464 if (pci_dma_mapping_error(pdev, tx_buffer_info->dma)) 5844 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
5465 goto dma_error; 5845 goto dma_error;
5466 tx_buffer_info->time_stamp = jiffies; 5846 tx_buffer_info->time_stamp = jiffies;
5467 tx_buffer_info->next_to_watch = i; 5847 tx_buffer_info->next_to_watch = i;
@@ -5697,7 +6077,8 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
5697 } 6077 }
5698 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 6078 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
5699 tx_flags |= IXGBE_TX_FLAGS_VLAN; 6079 tx_flags |= IXGBE_TX_FLAGS_VLAN;
5700 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 6080 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED &&
6081 skb->priority != TC_PRIO_CONTROL) {
5701 tx_flags |= ((skb->queue_mapping & 0x7) << 13); 6082 tx_flags |= ((skb->queue_mapping & 0x7) << 13);
5702 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 6083 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
5703 tx_flags |= IXGBE_TX_FLAGS_VLAN; 6084 tx_flags |= IXGBE_TX_FLAGS_VLAN;
@@ -5942,6 +6323,10 @@ static const struct net_device_ops ixgbe_netdev_ops = {
5942 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid, 6323 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
5943 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid, 6324 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
5944 .ndo_do_ioctl = ixgbe_ioctl, 6325 .ndo_do_ioctl = ixgbe_ioctl,
6326 .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
6327 .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
6328 .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw,
6329 .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
5945#ifdef CONFIG_NET_POLL_CONTROLLER 6330#ifdef CONFIG_NET_POLL_CONTROLLER
5946 .ndo_poll_controller = ixgbe_netpoll, 6331 .ndo_poll_controller = ixgbe_netpoll,
5947#endif 6332#endif
@@ -6039,13 +6424,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6039 if (err) 6424 if (err)
6040 return err; 6425 return err;
6041 6426
6042 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && 6427 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
6043 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { 6428 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
6044 pci_using_dac = 1; 6429 pci_using_dac = 1;
6045 } else { 6430 } else {
6046 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 6431 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6047 if (err) { 6432 if (err) {
6048 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 6433 err = dma_set_coherent_mask(&pdev->dev,
6434 DMA_BIT_MASK(32));
6049 if (err) { 6435 if (err) {
6050 dev_err(&pdev->dev, "No usable DMA " 6436 dev_err(&pdev->dev, "No usable DMA "
6051 "configuration, aborting\n"); 6437 "configuration, aborting\n");