diff options
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_main.c')
-rw-r--r-- | drivers/net/ixgbe/ixgbe_main.c | 5110 |
1 files changed, 2937 insertions, 2173 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index e32af434cc9d..08e8e25c159d 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel 10 Gigabit PCI Express Linux driver | 3 | Intel 10 Gigabit PCI Express Linux driver |
4 | Copyright(c) 1999 - 2010 Intel Corporation. | 4 | Copyright(c) 1999 - 2011 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -41,6 +41,7 @@ | |||
41 | #include <net/ip6_checksum.h> | 41 | #include <net/ip6_checksum.h> |
42 | #include <linux/ethtool.h> | 42 | #include <linux/ethtool.h> |
43 | #include <linux/if_vlan.h> | 43 | #include <linux/if_vlan.h> |
44 | #include <linux/prefetch.h> | ||
44 | #include <scsi/fc/fc_fcoe.h> | 45 | #include <scsi/fc/fc_fcoe.h> |
45 | 46 | ||
46 | #include "ixgbe.h" | 47 | #include "ixgbe.h" |
@@ -50,15 +51,21 @@ | |||
50 | 51 | ||
51 | char ixgbe_driver_name[] = "ixgbe"; | 52 | char ixgbe_driver_name[] = "ixgbe"; |
52 | static const char ixgbe_driver_string[] = | 53 | static const char ixgbe_driver_string[] = |
53 | "Intel(R) 10 Gigabit PCI Express Network Driver"; | 54 | "Intel(R) 10 Gigabit PCI Express Network Driver"; |
54 | 55 | #define MAJ 3 | |
55 | #define DRV_VERSION "2.0.84-k2" | 56 | #define MIN 3 |
57 | #define BUILD 8 | ||
58 | #define KFIX 2 | ||
59 | #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ | ||
60 | __stringify(BUILD) "-k" __stringify(KFIX) | ||
56 | const char ixgbe_driver_version[] = DRV_VERSION; | 61 | const char ixgbe_driver_version[] = DRV_VERSION; |
57 | static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation."; | 62 | static const char ixgbe_copyright[] = |
63 | "Copyright (c) 1999-2011 Intel Corporation."; | ||
58 | 64 | ||
59 | static const struct ixgbe_info *ixgbe_info_tbl[] = { | 65 | static const struct ixgbe_info *ixgbe_info_tbl[] = { |
60 | [board_82598] = &ixgbe_82598_info, | 66 | [board_82598] = &ixgbe_82598_info, |
61 | [board_82599] = &ixgbe_82599_info, | 67 | [board_82599] = &ixgbe_82599_info, |
68 | [board_X540] = &ixgbe_X540_info, | ||
62 | }; | 69 | }; |
63 | 70 | ||
64 | /* ixgbe_pci_tbl - PCI Device ID Table | 71 | /* ixgbe_pci_tbl - PCI Device ID Table |
@@ -108,10 +115,20 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = { | |||
108 | board_82599 }, | 115 | board_82599 }, |
109 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), | 116 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), |
110 | board_82599 }, | 117 | board_82599 }, |
118 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), | ||
119 | board_82599 }, | ||
120 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), | ||
121 | board_82599 }, | ||
111 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), | 122 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), |
112 | board_82599 }, | 123 | board_82599 }, |
113 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), | 124 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), |
114 | board_82599 }, | 125 | board_82599 }, |
126 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), | ||
127 | board_X540 }, | ||
128 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), | ||
129 | board_82599 }, | ||
130 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), | ||
131 | board_82599 }, | ||
115 | 132 | ||
116 | /* required last entry */ | 133 | /* required last entry */ |
117 | {0, } | 134 | {0, } |
@@ -120,7 +137,7 @@ MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl); | |||
120 | 137 | ||
121 | #ifdef CONFIG_IXGBE_DCA | 138 | #ifdef CONFIG_IXGBE_DCA |
122 | static int ixgbe_notify_dca(struct notifier_block *, unsigned long event, | 139 | static int ixgbe_notify_dca(struct notifier_block *, unsigned long event, |
123 | void *p); | 140 | void *p); |
124 | static struct notifier_block dca_notifier = { | 141 | static struct notifier_block dca_notifier = { |
125 | .notifier_call = ixgbe_notify_dca, | 142 | .notifier_call = ixgbe_notify_dca, |
126 | .next = NULL, | 143 | .next = NULL, |
@@ -131,8 +148,8 @@ static struct notifier_block dca_notifier = { | |||
131 | #ifdef CONFIG_PCI_IOV | 148 | #ifdef CONFIG_PCI_IOV |
132 | static unsigned int max_vfs; | 149 | static unsigned int max_vfs; |
133 | module_param(max_vfs, uint, 0); | 150 | module_param(max_vfs, uint, 0); |
134 | MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate " | 151 | MODULE_PARM_DESC(max_vfs, |
135 | "per physical function"); | 152 | "Maximum number of virtual functions to allocate per physical function"); |
136 | #endif /* CONFIG_PCI_IOV */ | 153 | #endif /* CONFIG_PCI_IOV */ |
137 | 154 | ||
138 | MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); | 155 | MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); |
@@ -169,14 +186,30 @@ static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter) | |||
169 | 186 | ||
170 | /* take a breather then clean up driver data */ | 187 | /* take a breather then clean up driver data */ |
171 | msleep(100); | 188 | msleep(100); |
172 | if (adapter->vfinfo) | 189 | |
173 | kfree(adapter->vfinfo); | 190 | kfree(adapter->vfinfo); |
174 | adapter->vfinfo = NULL; | 191 | adapter->vfinfo = NULL; |
175 | 192 | ||
176 | adapter->num_vfs = 0; | 193 | adapter->num_vfs = 0; |
177 | adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; | 194 | adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; |
178 | } | 195 | } |
179 | 196 | ||
197 | static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter) | ||
198 | { | ||
199 | if (!test_bit(__IXGBE_DOWN, &adapter->state) && | ||
200 | !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state)) | ||
201 | schedule_work(&adapter->service_task); | ||
202 | } | ||
203 | |||
204 | static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter) | ||
205 | { | ||
206 | BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state)); | ||
207 | |||
208 | /* flush memory to make sure state is correct before next watchog */ | ||
209 | smp_mb__before_clear_bit(); | ||
210 | clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); | ||
211 | } | ||
212 | |||
180 | struct ixgbe_reg_info { | 213 | struct ixgbe_reg_info { |
181 | u32 ofs; | 214 | u32 ofs; |
182 | char *name; | 215 | char *name; |
@@ -282,17 +315,17 @@ static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo) | |||
282 | regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); | 315 | regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); |
283 | break; | 316 | break; |
284 | default: | 317 | default: |
285 | printk(KERN_INFO "%-15s %08x\n", reginfo->name, | 318 | pr_info("%-15s %08x\n", reginfo->name, |
286 | IXGBE_READ_REG(hw, reginfo->ofs)); | 319 | IXGBE_READ_REG(hw, reginfo->ofs)); |
287 | return; | 320 | return; |
288 | } | 321 | } |
289 | 322 | ||
290 | for (i = 0; i < 8; i++) { | 323 | for (i = 0; i < 8; i++) { |
291 | snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7); | 324 | snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7); |
292 | printk(KERN_ERR "%-15s ", rname); | 325 | pr_err("%-15s", rname); |
293 | for (j = 0; j < 8; j++) | 326 | for (j = 0; j < 8; j++) |
294 | printk(KERN_CONT "%08x ", regs[i*8+j]); | 327 | pr_cont(" %08x", regs[i*8+j]); |
295 | printk(KERN_CONT "\n"); | 328 | pr_cont("\n"); |
296 | } | 329 | } |
297 | 330 | ||
298 | } | 331 | } |
@@ -322,18 +355,18 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) | |||
322 | /* Print netdevice Info */ | 355 | /* Print netdevice Info */ |
323 | if (netdev) { | 356 | if (netdev) { |
324 | dev_info(&adapter->pdev->dev, "Net device Info\n"); | 357 | dev_info(&adapter->pdev->dev, "Net device Info\n"); |
325 | printk(KERN_INFO "Device Name state " | 358 | pr_info("Device Name state " |
326 | "trans_start last_rx\n"); | 359 | "trans_start last_rx\n"); |
327 | printk(KERN_INFO "%-15s %016lX %016lX %016lX\n", | 360 | pr_info("%-15s %016lX %016lX %016lX\n", |
328 | netdev->name, | 361 | netdev->name, |
329 | netdev->state, | 362 | netdev->state, |
330 | netdev->trans_start, | 363 | netdev->trans_start, |
331 | netdev->last_rx); | 364 | netdev->last_rx); |
332 | } | 365 | } |
333 | 366 | ||
334 | /* Print Registers */ | 367 | /* Print Registers */ |
335 | dev_info(&adapter->pdev->dev, "Register Dump\n"); | 368 | dev_info(&adapter->pdev->dev, "Register Dump\n"); |
336 | printk(KERN_INFO " Register Name Value\n"); | 369 | pr_info(" Register Name Value\n"); |
337 | for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl; | 370 | for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl; |
338 | reginfo->name; reginfo++) { | 371 | reginfo->name; reginfo++) { |
339 | ixgbe_regdump(hw, reginfo); | 372 | ixgbe_regdump(hw, reginfo); |
@@ -344,13 +377,12 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) | |||
344 | goto exit; | 377 | goto exit; |
345 | 378 | ||
346 | dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); | 379 | dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); |
347 | printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ] " | 380 | pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); |
348 | "leng ntw timestamp\n"); | ||
349 | for (n = 0; n < adapter->num_tx_queues; n++) { | 381 | for (n = 0; n < adapter->num_tx_queues; n++) { |
350 | tx_ring = adapter->tx_ring[n]; | 382 | tx_ring = adapter->tx_ring[n]; |
351 | tx_buffer_info = | 383 | tx_buffer_info = |
352 | &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; | 384 | &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; |
353 | printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n", | 385 | pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n", |
354 | n, tx_ring->next_to_use, tx_ring->next_to_clean, | 386 | n, tx_ring->next_to_use, tx_ring->next_to_clean, |
355 | (u64)tx_buffer_info->dma, | 387 | (u64)tx_buffer_info->dma, |
356 | tx_buffer_info->length, | 388 | tx_buffer_info->length, |
@@ -377,18 +409,18 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) | |||
377 | 409 | ||
378 | for (n = 0; n < adapter->num_tx_queues; n++) { | 410 | for (n = 0; n < adapter->num_tx_queues; n++) { |
379 | tx_ring = adapter->tx_ring[n]; | 411 | tx_ring = adapter->tx_ring[n]; |
380 | printk(KERN_INFO "------------------------------------\n"); | 412 | pr_info("------------------------------------\n"); |
381 | printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index); | 413 | pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); |
382 | printk(KERN_INFO "------------------------------------\n"); | 414 | pr_info("------------------------------------\n"); |
383 | printk(KERN_INFO "T [desc] [address 63:0 ] " | 415 | pr_info("T [desc] [address 63:0 ] " |
384 | "[PlPOIdStDDt Ln] [bi->dma ] " | 416 | "[PlPOIdStDDt Ln] [bi->dma ] " |
385 | "leng ntw timestamp bi->skb\n"); | 417 | "leng ntw timestamp bi->skb\n"); |
386 | 418 | ||
387 | for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { | 419 | for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { |
388 | tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); | 420 | tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); |
389 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | 421 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; |
390 | u0 = (struct my_u0 *)tx_desc; | 422 | u0 = (struct my_u0 *)tx_desc; |
391 | printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX" | 423 | pr_info("T [0x%03X] %016llX %016llX %016llX" |
392 | " %04X %3X %016llX %p", i, | 424 | " %04X %3X %016llX %p", i, |
393 | le64_to_cpu(u0->a), | 425 | le64_to_cpu(u0->a), |
394 | le64_to_cpu(u0->b), | 426 | le64_to_cpu(u0->b), |
@@ -399,13 +431,13 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) | |||
399 | tx_buffer_info->skb); | 431 | tx_buffer_info->skb); |
400 | if (i == tx_ring->next_to_use && | 432 | if (i == tx_ring->next_to_use && |
401 | i == tx_ring->next_to_clean) | 433 | i == tx_ring->next_to_clean) |
402 | printk(KERN_CONT " NTC/U\n"); | 434 | pr_cont(" NTC/U\n"); |
403 | else if (i == tx_ring->next_to_use) | 435 | else if (i == tx_ring->next_to_use) |
404 | printk(KERN_CONT " NTU\n"); | 436 | pr_cont(" NTU\n"); |
405 | else if (i == tx_ring->next_to_clean) | 437 | else if (i == tx_ring->next_to_clean) |
406 | printk(KERN_CONT " NTC\n"); | 438 | pr_cont(" NTC\n"); |
407 | else | 439 | else |
408 | printk(KERN_CONT "\n"); | 440 | pr_cont("\n"); |
409 | 441 | ||
410 | if (netif_msg_pktdata(adapter) && | 442 | if (netif_msg_pktdata(adapter) && |
411 | tx_buffer_info->dma != 0) | 443 | tx_buffer_info->dma != 0) |
@@ -419,11 +451,11 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) | |||
419 | /* Print RX Rings Summary */ | 451 | /* Print RX Rings Summary */ |
420 | rx_ring_summary: | 452 | rx_ring_summary: |
421 | dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); | 453 | dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); |
422 | printk(KERN_INFO "Queue [NTU] [NTC]\n"); | 454 | pr_info("Queue [NTU] [NTC]\n"); |
423 | for (n = 0; n < adapter->num_rx_queues; n++) { | 455 | for (n = 0; n < adapter->num_rx_queues; n++) { |
424 | rx_ring = adapter->rx_ring[n]; | 456 | rx_ring = adapter->rx_ring[n]; |
425 | printk(KERN_INFO "%5d %5X %5X\n", n, | 457 | pr_info("%5d %5X %5X\n", |
426 | rx_ring->next_to_use, rx_ring->next_to_clean); | 458 | n, rx_ring->next_to_use, rx_ring->next_to_clean); |
427 | } | 459 | } |
428 | 460 | ||
429 | /* Print RX Rings */ | 461 | /* Print RX Rings */ |
@@ -454,30 +486,30 @@ rx_ring_summary: | |||
454 | */ | 486 | */ |
455 | for (n = 0; n < adapter->num_rx_queues; n++) { | 487 | for (n = 0; n < adapter->num_rx_queues; n++) { |
456 | rx_ring = adapter->rx_ring[n]; | 488 | rx_ring = adapter->rx_ring[n]; |
457 | printk(KERN_INFO "------------------------------------\n"); | 489 | pr_info("------------------------------------\n"); |
458 | printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index); | 490 | pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); |
459 | printk(KERN_INFO "------------------------------------\n"); | 491 | pr_info("------------------------------------\n"); |
460 | printk(KERN_INFO "R [desc] [ PktBuf A0] " | 492 | pr_info("R [desc] [ PktBuf A0] " |
461 | "[ HeadBuf DD] [bi->dma ] [bi->skb] " | 493 | "[ HeadBuf DD] [bi->dma ] [bi->skb] " |
462 | "<-- Adv Rx Read format\n"); | 494 | "<-- Adv Rx Read format\n"); |
463 | printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] " | 495 | pr_info("RWB[desc] [PcsmIpSHl PtRs] " |
464 | "[vl er S cks ln] ---------------- [bi->skb] " | 496 | "[vl er S cks ln] ---------------- [bi->skb] " |
465 | "<-- Adv Rx Write-Back format\n"); | 497 | "<-- Adv Rx Write-Back format\n"); |
466 | 498 | ||
467 | for (i = 0; i < rx_ring->count; i++) { | 499 | for (i = 0; i < rx_ring->count; i++) { |
468 | rx_buffer_info = &rx_ring->rx_buffer_info[i]; | 500 | rx_buffer_info = &rx_ring->rx_buffer_info[i]; |
469 | rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); | 501 | rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); |
470 | u0 = (struct my_u0 *)rx_desc; | 502 | u0 = (struct my_u0 *)rx_desc; |
471 | staterr = le32_to_cpu(rx_desc->wb.upper.status_error); | 503 | staterr = le32_to_cpu(rx_desc->wb.upper.status_error); |
472 | if (staterr & IXGBE_RXD_STAT_DD) { | 504 | if (staterr & IXGBE_RXD_STAT_DD) { |
473 | /* Descriptor Done */ | 505 | /* Descriptor Done */ |
474 | printk(KERN_INFO "RWB[0x%03X] %016llX " | 506 | pr_info("RWB[0x%03X] %016llX " |
475 | "%016llX ---------------- %p", i, | 507 | "%016llX ---------------- %p", i, |
476 | le64_to_cpu(u0->a), | 508 | le64_to_cpu(u0->a), |
477 | le64_to_cpu(u0->b), | 509 | le64_to_cpu(u0->b), |
478 | rx_buffer_info->skb); | 510 | rx_buffer_info->skb); |
479 | } else { | 511 | } else { |
480 | printk(KERN_INFO "R [0x%03X] %016llX " | 512 | pr_info("R [0x%03X] %016llX " |
481 | "%016llX %016llX %p", i, | 513 | "%016llX %016llX %p", i, |
482 | le64_to_cpu(u0->a), | 514 | le64_to_cpu(u0->a), |
483 | le64_to_cpu(u0->b), | 515 | le64_to_cpu(u0->b), |
@@ -503,11 +535,11 @@ rx_ring_summary: | |||
503 | } | 535 | } |
504 | 536 | ||
505 | if (i == rx_ring->next_to_use) | 537 | if (i == rx_ring->next_to_use) |
506 | printk(KERN_CONT " NTU\n"); | 538 | pr_cont(" NTU\n"); |
507 | else if (i == rx_ring->next_to_clean) | 539 | else if (i == rx_ring->next_to_clean) |
508 | printk(KERN_CONT " NTC\n"); | 540 | pr_cont(" NTC\n"); |
509 | else | 541 | else |
510 | printk(KERN_CONT "\n"); | 542 | pr_cont("\n"); |
511 | 543 | ||
512 | } | 544 | } |
513 | } | 545 | } |
@@ -523,7 +555,7 @@ static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter) | |||
523 | /* Let firmware take over control of h/w */ | 555 | /* Let firmware take over control of h/w */ |
524 | ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); | 556 | ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); |
525 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, | 557 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, |
526 | ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD); | 558 | ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD); |
527 | } | 559 | } |
528 | 560 | ||
529 | static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) | 561 | static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) |
@@ -533,7 +565,7 @@ static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) | |||
533 | /* Let firmware know the driver has taken over */ | 565 | /* Let firmware know the driver has taken over */ |
534 | ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); | 566 | ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); |
535 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, | 567 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, |
536 | ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); | 568 | ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); |
537 | } | 569 | } |
538 | 570 | ||
539 | /* | 571 | /* |
@@ -545,7 +577,7 @@ static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) | |||
545 | * | 577 | * |
546 | */ | 578 | */ |
547 | static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, | 579 | static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, |
548 | u8 queue, u8 msix_vector) | 580 | u8 queue, u8 msix_vector) |
549 | { | 581 | { |
550 | u32 ivar, index; | 582 | u32 ivar, index; |
551 | struct ixgbe_hw *hw = &adapter->hw; | 583 | struct ixgbe_hw *hw = &adapter->hw; |
@@ -561,6 +593,7 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, | |||
561 | IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); | 593 | IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); |
562 | break; | 594 | break; |
563 | case ixgbe_mac_82599EB: | 595 | case ixgbe_mac_82599EB: |
596 | case ixgbe_mac_X540: | ||
564 | if (direction == -1) { | 597 | if (direction == -1) { |
565 | /* other causes */ | 598 | /* other causes */ |
566 | msix_vector |= IXGBE_IVAR_ALLOC_VAL; | 599 | msix_vector |= IXGBE_IVAR_ALLOC_VAL; |
@@ -586,33 +619,38 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, | |||
586 | } | 619 | } |
587 | 620 | ||
588 | static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, | 621 | static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, |
589 | u64 qmask) | 622 | u64 qmask) |
590 | { | 623 | { |
591 | u32 mask; | 624 | u32 mask; |
592 | 625 | ||
593 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { | 626 | switch (adapter->hw.mac.type) { |
627 | case ixgbe_mac_82598EB: | ||
594 | mask = (IXGBE_EIMS_RTX_QUEUE & qmask); | 628 | mask = (IXGBE_EIMS_RTX_QUEUE & qmask); |
595 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); | 629 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); |
596 | } else { | 630 | break; |
631 | case ixgbe_mac_82599EB: | ||
632 | case ixgbe_mac_X540: | ||
597 | mask = (qmask & 0xFFFFFFFF); | 633 | mask = (qmask & 0xFFFFFFFF); |
598 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); | 634 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); |
599 | mask = (qmask >> 32); | 635 | mask = (qmask >> 32); |
600 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask); | 636 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask); |
637 | break; | ||
638 | default: | ||
639 | break; | ||
601 | } | 640 | } |
602 | } | 641 | } |
603 | 642 | ||
604 | static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, | 643 | void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring, |
605 | struct ixgbe_tx_buffer | 644 | struct ixgbe_tx_buffer *tx_buffer_info) |
606 | *tx_buffer_info) | ||
607 | { | 645 | { |
608 | if (tx_buffer_info->dma) { | 646 | if (tx_buffer_info->dma) { |
609 | if (tx_buffer_info->mapped_as_page) | 647 | if (tx_buffer_info->mapped_as_page) |
610 | dma_unmap_page(&adapter->pdev->dev, | 648 | dma_unmap_page(tx_ring->dev, |
611 | tx_buffer_info->dma, | 649 | tx_buffer_info->dma, |
612 | tx_buffer_info->length, | 650 | tx_buffer_info->length, |
613 | DMA_TO_DEVICE); | 651 | DMA_TO_DEVICE); |
614 | else | 652 | else |
615 | dma_unmap_single(&adapter->pdev->dev, | 653 | dma_unmap_single(tx_ring->dev, |
616 | tx_buffer_info->dma, | 654 | tx_buffer_info->dma, |
617 | tx_buffer_info->length, | 655 | tx_buffer_info->length, |
618 | DMA_TO_DEVICE); | 656 | DMA_TO_DEVICE); |
@@ -627,92 +665,166 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, | |||
627 | } | 665 | } |
628 | 666 | ||
629 | /** | 667 | /** |
630 | * ixgbe_tx_xon_state - check the tx ring xon state | 668 | * ixgbe_dcb_txq_to_tc - convert a reg index to a traffic class |
631 | * @adapter: the ixgbe adapter | 669 | * @adapter: driver private struct |
632 | * @tx_ring: the corresponding tx_ring | 670 | * @index: reg idx of queue to query (0-127) |
633 | * | 671 | * |
634 | * If not in DCB mode, checks TFCS.TXOFF, otherwise, find out the | 672 | * Helper function to determine the traffic index for a particular |
635 | * corresponding TC of this tx_ring when checking TFCS. | 673 | * register index. |
636 | * | 674 | * |
637 | * Returns : true if in xon state (currently not paused) | 675 | * Returns : a tc index for use in range 0-7, or 0-3 |
638 | */ | 676 | */ |
639 | static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter, | 677 | static u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx) |
640 | struct ixgbe_ring *tx_ring) | ||
641 | { | 678 | { |
642 | u32 txoff = IXGBE_TFCS_TXOFF; | 679 | int tc = -1; |
680 | int dcb_i = netdev_get_num_tc(adapter->netdev); | ||
643 | 681 | ||
644 | #ifdef CONFIG_IXGBE_DCB | 682 | /* if DCB is not enabled the queues have no TC */ |
645 | if (adapter->dcb_cfg.pfc_mode_enable) { | 683 | if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) |
646 | int tc; | 684 | return tc; |
647 | int reg_idx = tx_ring->reg_idx; | ||
648 | int dcb_i = adapter->ring_feature[RING_F_DCB].indices; | ||
649 | 685 | ||
650 | switch (adapter->hw.mac.type) { | 686 | /* check valid range */ |
687 | if (reg_idx >= adapter->hw.mac.max_tx_queues) | ||
688 | return tc; | ||
689 | |||
690 | switch (adapter->hw.mac.type) { | ||
691 | case ixgbe_mac_82598EB: | ||
692 | tc = reg_idx >> 2; | ||
693 | break; | ||
694 | default: | ||
695 | if (dcb_i != 4 && dcb_i != 8) | ||
696 | break; | ||
697 | |||
698 | /* if VMDq is enabled the lowest order bits determine TC */ | ||
699 | if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED | | ||
700 | IXGBE_FLAG_VMDQ_ENABLED)) { | ||
701 | tc = reg_idx & (dcb_i - 1); | ||
702 | break; | ||
703 | } | ||
704 | |||
705 | /* | ||
706 | * Convert the reg_idx into the correct TC. This bitmask | ||
707 | * targets the last full 32 ring traffic class and assigns | ||
708 | * it a value of 1. From there the rest of the rings are | ||
709 | * based on shifting the mask further up to include the | ||
710 | * reg_idx / 16 and then reg_idx / 8. It assumes dcB_i | ||
711 | * will only ever be 8 or 4 and that reg_idx will never | ||
712 | * be greater then 128. The code without the power of 2 | ||
713 | * optimizations would be: | ||
714 | * (((reg_idx % 32) + 32) * dcb_i) >> (9 - reg_idx / 32) | ||
715 | */ | ||
716 | tc = ((reg_idx & 0X1F) + 0x20) * dcb_i; | ||
717 | tc >>= 9 - (reg_idx >> 5); | ||
718 | } | ||
719 | |||
720 | return tc; | ||
721 | } | ||
722 | |||
723 | static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) | ||
724 | { | ||
725 | struct ixgbe_hw *hw = &adapter->hw; | ||
726 | struct ixgbe_hw_stats *hwstats = &adapter->stats; | ||
727 | u32 data = 0; | ||
728 | u32 xoff[8] = {0}; | ||
729 | int i; | ||
730 | |||
731 | if ((hw->fc.current_mode == ixgbe_fc_full) || | ||
732 | (hw->fc.current_mode == ixgbe_fc_rx_pause)) { | ||
733 | switch (hw->mac.type) { | ||
651 | case ixgbe_mac_82598EB: | 734 | case ixgbe_mac_82598EB: |
652 | tc = reg_idx >> 2; | 735 | data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); |
653 | txoff = IXGBE_TFCS_TXOFF0; | ||
654 | break; | 736 | break; |
655 | case ixgbe_mac_82599EB: | 737 | default: |
656 | tc = 0; | 738 | data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); |
657 | txoff = IXGBE_TFCS_TXOFF; | 739 | } |
658 | if (dcb_i == 8) { | 740 | hwstats->lxoffrxc += data; |
659 | /* TC0, TC1 */ | 741 | |
660 | tc = reg_idx >> 5; | 742 | /* refill credits (no tx hang) if we received xoff */ |
661 | if (tc == 2) /* TC2, TC3 */ | 743 | if (!data) |
662 | tc += (reg_idx - 64) >> 4; | 744 | return; |
663 | else if (tc == 3) /* TC4, TC5, TC6, TC7 */ | 745 | |
664 | tc += 1 + ((reg_idx - 96) >> 3); | 746 | for (i = 0; i < adapter->num_tx_queues; i++) |
665 | } else if (dcb_i == 4) { | 747 | clear_bit(__IXGBE_HANG_CHECK_ARMED, |
666 | /* TC0, TC1 */ | 748 | &adapter->tx_ring[i]->state); |
667 | tc = reg_idx >> 6; | 749 | return; |
668 | if (tc == 1) { | 750 | } else if (!(adapter->dcb_cfg.pfc_mode_enable)) |
669 | tc += (reg_idx - 64) >> 5; | 751 | return; |
670 | if (tc == 2) /* TC2, TC3 */ | 752 | |
671 | tc += (reg_idx - 96) >> 4; | 753 | /* update stats for each tc, only valid with PFC enabled */ |
672 | } | 754 | for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) { |
673 | } | 755 | switch (hw->mac.type) { |
756 | case ixgbe_mac_82598EB: | ||
757 | xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); | ||
674 | break; | 758 | break; |
675 | default: | 759 | default: |
676 | tc = 0; | 760 | xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); |
677 | } | 761 | } |
678 | txoff <<= tc; | 762 | hwstats->pxoffrxc[i] += xoff[i]; |
763 | } | ||
764 | |||
765 | /* disarm tx queues that have received xoff frames */ | ||
766 | for (i = 0; i < adapter->num_tx_queues; i++) { | ||
767 | struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; | ||
768 | u32 tc = ixgbe_dcb_txq_to_tc(adapter, tx_ring->reg_idx); | ||
769 | |||
770 | if (xoff[tc]) | ||
771 | clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); | ||
679 | } | 772 | } |
680 | #endif | ||
681 | return IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & txoff; | ||
682 | } | 773 | } |
683 | 774 | ||
684 | static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, | 775 | static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring) |
685 | struct ixgbe_ring *tx_ring, | ||
686 | unsigned int eop) | ||
687 | { | 776 | { |
777 | return ring->tx_stats.completed; | ||
778 | } | ||
779 | |||
780 | static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring) | ||
781 | { | ||
782 | struct ixgbe_adapter *adapter = netdev_priv(ring->netdev); | ||
688 | struct ixgbe_hw *hw = &adapter->hw; | 783 | struct ixgbe_hw *hw = &adapter->hw; |
689 | 784 | ||
690 | /* Detect a transmit hang in hardware, this serializes the | 785 | u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx)); |
691 | * check with the clearing of time_stamp and movement of eop */ | 786 | u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx)); |
692 | adapter->detect_tx_hung = false; | 787 | |
693 | if (tx_ring->tx_buffer_info[eop].time_stamp && | 788 | if (head != tail) |
694 | time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) && | 789 | return (head < tail) ? |
695 | ixgbe_tx_xon_state(adapter, tx_ring)) { | 790 | tail - head : (tail + ring->count - head); |
696 | /* detected Tx unit hang */ | 791 | |
697 | union ixgbe_adv_tx_desc *tx_desc; | 792 | return 0; |
698 | tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); | 793 | } |
699 | e_err(drv, "Detected Tx Unit Hang\n" | 794 | |
700 | " Tx Queue <%d>\n" | 795 | static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring) |
701 | " TDH, TDT <%x>, <%x>\n" | 796 | { |
702 | " next_to_use <%x>\n" | 797 | u32 tx_done = ixgbe_get_tx_completed(tx_ring); |
703 | " next_to_clean <%x>\n" | 798 | u32 tx_done_old = tx_ring->tx_stats.tx_done_old; |
704 | "tx_buffer_info[next_to_clean]\n" | 799 | u32 tx_pending = ixgbe_get_tx_pending(tx_ring); |
705 | " time_stamp <%lx>\n" | 800 | bool ret = false; |
706 | " jiffies <%lx>\n", | 801 | |
707 | tx_ring->queue_index, | 802 | clear_check_for_tx_hang(tx_ring); |
708 | IXGBE_READ_REG(hw, tx_ring->head), | 803 | |
709 | IXGBE_READ_REG(hw, tx_ring->tail), | 804 | /* |
710 | tx_ring->next_to_use, eop, | 805 | * Check for a hung queue, but be thorough. This verifies |
711 | tx_ring->tx_buffer_info[eop].time_stamp, jiffies); | 806 | * that a transmit has been completed since the previous |
712 | return true; | 807 | * check AND there is at least one packet pending. The |
808 | * ARMED bit is set to indicate a potential hang. The | ||
809 | * bit is cleared if a pause frame is received to remove | ||
810 | * false hang detection due to PFC or 802.3x frames. By | ||
811 | * requiring this to fail twice we avoid races with | ||
812 | * pfc clearing the ARMED bit and conditions where we | ||
813 | * run the check_tx_hang logic with a transmit completion | ||
814 | * pending but without time to complete it yet. | ||
815 | */ | ||
816 | if ((tx_done_old == tx_done) && tx_pending) { | ||
817 | /* make sure it is true for two checks in a row */ | ||
818 | ret = test_and_set_bit(__IXGBE_HANG_CHECK_ARMED, | ||
819 | &tx_ring->state); | ||
820 | } else { | ||
821 | /* update completed stats and continue */ | ||
822 | tx_ring->tx_stats.tx_done_old = tx_done; | ||
823 | /* reset the countdown */ | ||
824 | clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); | ||
713 | } | 825 | } |
714 | 826 | ||
715 | return false; | 827 | return ret; |
716 | } | 828 | } |
717 | 829 | ||
718 | #define IXGBE_MAX_TXD_PWR 14 | 830 | #define IXGBE_MAX_TXD_PWR 14 |
@@ -724,7 +836,19 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, | |||
724 | #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \ | 836 | #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \ |
725 | MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */ | 837 | MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */ |
726 | 838 | ||
727 | static void ixgbe_tx_timeout(struct net_device *netdev); | 839 | /** |
840 | * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout | ||
841 | * @adapter: driver private struct | ||
842 | **/ | ||
843 | static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter) | ||
844 | { | ||
845 | |||
846 | /* Do the reset outside of interrupt context */ | ||
847 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) { | ||
848 | adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; | ||
849 | ixgbe_service_event_schedule(adapter); | ||
850 | } | ||
851 | } | ||
728 | 852 | ||
729 | /** | 853 | /** |
730 | * ixgbe_clean_tx_irq - Reclaim resources after transmit completes | 854 | * ixgbe_clean_tx_irq - Reclaim resources after transmit completes |
@@ -732,163 +856,195 @@ static void ixgbe_tx_timeout(struct net_device *netdev); | |||
732 | * @tx_ring: tx ring to clean | 856 | * @tx_ring: tx ring to clean |
733 | **/ | 857 | **/ |
734 | static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, | 858 | static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, |
735 | struct ixgbe_ring *tx_ring) | 859 | struct ixgbe_ring *tx_ring) |
736 | { | 860 | { |
737 | struct ixgbe_adapter *adapter = q_vector->adapter; | 861 | struct ixgbe_adapter *adapter = q_vector->adapter; |
738 | struct net_device *netdev = adapter->netdev; | ||
739 | union ixgbe_adv_tx_desc *tx_desc, *eop_desc; | 862 | union ixgbe_adv_tx_desc *tx_desc, *eop_desc; |
740 | struct ixgbe_tx_buffer *tx_buffer_info; | 863 | struct ixgbe_tx_buffer *tx_buffer_info; |
741 | unsigned int i, eop, count = 0; | ||
742 | unsigned int total_bytes = 0, total_packets = 0; | 864 | unsigned int total_bytes = 0, total_packets = 0; |
865 | u16 i, eop, count = 0; | ||
743 | 866 | ||
744 | i = tx_ring->next_to_clean; | 867 | i = tx_ring->next_to_clean; |
745 | eop = tx_ring->tx_buffer_info[i].next_to_watch; | 868 | eop = tx_ring->tx_buffer_info[i].next_to_watch; |
746 | eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); | 869 | eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); |
747 | 870 | ||
748 | while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && | 871 | while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && |
749 | (count < tx_ring->work_limit)) { | 872 | (count < tx_ring->work_limit)) { |
750 | bool cleaned = false; | 873 | bool cleaned = false; |
751 | rmb(); /* read buffer_info after eop_desc */ | 874 | rmb(); /* read buffer_info after eop_desc */ |
752 | for ( ; !cleaned; count++) { | 875 | for ( ; !cleaned; count++) { |
753 | struct sk_buff *skb; | 876 | tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); |
754 | tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); | ||
755 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | 877 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; |
756 | cleaned = (i == eop); | ||
757 | skb = tx_buffer_info->skb; | ||
758 | |||
759 | if (cleaned && skb) { | ||
760 | unsigned int segs, bytecount; | ||
761 | unsigned int hlen = skb_headlen(skb); | ||
762 | |||
763 | /* gso_segs is currently only valid for tcp */ | ||
764 | segs = skb_shinfo(skb)->gso_segs ?: 1; | ||
765 | #ifdef IXGBE_FCOE | ||
766 | /* adjust for FCoE Sequence Offload */ | ||
767 | if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) | ||
768 | && (skb->protocol == htons(ETH_P_FCOE)) && | ||
769 | skb_is_gso(skb)) { | ||
770 | hlen = skb_transport_offset(skb) + | ||
771 | sizeof(struct fc_frame_header) + | ||
772 | sizeof(struct fcoe_crc_eof); | ||
773 | segs = DIV_ROUND_UP(skb->len - hlen, | ||
774 | skb_shinfo(skb)->gso_size); | ||
775 | } | ||
776 | #endif /* IXGBE_FCOE */ | ||
777 | /* multiply data chunks by size of headers */ | ||
778 | bytecount = ((segs - 1) * hlen) + skb->len; | ||
779 | total_packets += segs; | ||
780 | total_bytes += bytecount; | ||
781 | } | ||
782 | |||
783 | ixgbe_unmap_and_free_tx_resource(adapter, | ||
784 | tx_buffer_info); | ||
785 | 878 | ||
786 | tx_desc->wb.status = 0; | 879 | tx_desc->wb.status = 0; |
880 | cleaned = (i == eop); | ||
787 | 881 | ||
788 | i++; | 882 | i++; |
789 | if (i == tx_ring->count) | 883 | if (i == tx_ring->count) |
790 | i = 0; | 884 | i = 0; |
885 | |||
886 | if (cleaned && tx_buffer_info->skb) { | ||
887 | total_bytes += tx_buffer_info->bytecount; | ||
888 | total_packets += tx_buffer_info->gso_segs; | ||
889 | } | ||
890 | |||
891 | ixgbe_unmap_and_free_tx_resource(tx_ring, | ||
892 | tx_buffer_info); | ||
791 | } | 893 | } |
792 | 894 | ||
895 | tx_ring->tx_stats.completed++; | ||
793 | eop = tx_ring->tx_buffer_info[i].next_to_watch; | 896 | eop = tx_ring->tx_buffer_info[i].next_to_watch; |
794 | eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); | 897 | eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); |
795 | } | 898 | } |
796 | 899 | ||
797 | tx_ring->next_to_clean = i; | 900 | tx_ring->next_to_clean = i; |
901 | tx_ring->total_bytes += total_bytes; | ||
902 | tx_ring->total_packets += total_packets; | ||
903 | u64_stats_update_begin(&tx_ring->syncp); | ||
904 | tx_ring->stats.packets += total_packets; | ||
905 | tx_ring->stats.bytes += total_bytes; | ||
906 | u64_stats_update_end(&tx_ring->syncp); | ||
907 | |||
908 | if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) { | ||
909 | /* schedule immediate reset if we believe we hung */ | ||
910 | struct ixgbe_hw *hw = &adapter->hw; | ||
911 | tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); | ||
912 | e_err(drv, "Detected Tx Unit Hang\n" | ||
913 | " Tx Queue <%d>\n" | ||
914 | " TDH, TDT <%x>, <%x>\n" | ||
915 | " next_to_use <%x>\n" | ||
916 | " next_to_clean <%x>\n" | ||
917 | "tx_buffer_info[next_to_clean]\n" | ||
918 | " time_stamp <%lx>\n" | ||
919 | " jiffies <%lx>\n", | ||
920 | tx_ring->queue_index, | ||
921 | IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)), | ||
922 | IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)), | ||
923 | tx_ring->next_to_use, eop, | ||
924 | tx_ring->tx_buffer_info[eop].time_stamp, jiffies); | ||
925 | |||
926 | netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); | ||
927 | |||
928 | e_info(probe, | ||
929 | "tx hang %d detected on queue %d, resetting adapter\n", | ||
930 | adapter->tx_timeout_count + 1, tx_ring->queue_index); | ||
931 | |||
932 | /* schedule immediate reset if we believe we hung */ | ||
933 | ixgbe_tx_timeout_reset(adapter); | ||
934 | |||
935 | /* the adapter is about to reset, no point in enabling stuff */ | ||
936 | return true; | ||
937 | } | ||
798 | 938 | ||
799 | #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) | 939 | #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) |
800 | if (unlikely(count && netif_carrier_ok(netdev) && | 940 | if (unlikely(count && netif_carrier_ok(tx_ring->netdev) && |
801 | (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { | 941 | (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { |
802 | /* Make sure that anybody stopping the queue after this | 942 | /* Make sure that anybody stopping the queue after this |
803 | * sees the new next_to_clean. | 943 | * sees the new next_to_clean. |
804 | */ | 944 | */ |
805 | smp_mb(); | 945 | smp_mb(); |
806 | if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && | 946 | if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) && |
807 | !test_bit(__IXGBE_DOWN, &adapter->state)) { | 947 | !test_bit(__IXGBE_DOWN, &adapter->state)) { |
808 | netif_wake_subqueue(netdev, tx_ring->queue_index); | 948 | netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index); |
809 | ++tx_ring->restart_queue; | 949 | ++tx_ring->tx_stats.restart_queue; |
810 | } | 950 | } |
811 | } | 951 | } |
812 | 952 | ||
813 | if (adapter->detect_tx_hung) { | 953 | return count < tx_ring->work_limit; |
814 | if (ixgbe_check_tx_hang(adapter, tx_ring, i)) { | ||
815 | /* schedule immediate reset if we believe we hung */ | ||
816 | e_info(probe, "tx hang %d detected, resetting " | ||
817 | "adapter\n", adapter->tx_timeout_count + 1); | ||
818 | ixgbe_tx_timeout(adapter->netdev); | ||
819 | } | ||
820 | } | ||
821 | |||
822 | /* re-arm the interrupt */ | ||
823 | if (count >= tx_ring->work_limit) | ||
824 | ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx)); | ||
825 | |||
826 | tx_ring->total_bytes += total_bytes; | ||
827 | tx_ring->total_packets += total_packets; | ||
828 | tx_ring->stats.packets += total_packets; | ||
829 | tx_ring->stats.bytes += total_bytes; | ||
830 | return (count < tx_ring->work_limit); | ||
831 | } | 954 | } |
832 | 955 | ||
833 | #ifdef CONFIG_IXGBE_DCA | 956 | #ifdef CONFIG_IXGBE_DCA |
834 | static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, | 957 | static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, |
835 | struct ixgbe_ring *rx_ring) | 958 | struct ixgbe_ring *rx_ring, |
959 | int cpu) | ||
836 | { | 960 | { |
961 | struct ixgbe_hw *hw = &adapter->hw; | ||
837 | u32 rxctrl; | 962 | u32 rxctrl; |
838 | int cpu = get_cpu(); | 963 | u8 reg_idx = rx_ring->reg_idx; |
839 | int q = rx_ring->reg_idx; | 964 | |
840 | 965 | rxctrl = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(reg_idx)); | |
841 | if (rx_ring->cpu != cpu) { | 966 | switch (hw->mac.type) { |
842 | rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q)); | 967 | case ixgbe_mac_82598EB: |
843 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { | 968 | rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK; |
844 | rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK; | 969 | rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); |
845 | rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); | 970 | break; |
846 | } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { | 971 | case ixgbe_mac_82599EB: |
847 | rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599; | 972 | case ixgbe_mac_X540: |
848 | rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << | 973 | rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599; |
849 | IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599); | 974 | rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << |
850 | } | 975 | IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599); |
851 | rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN; | 976 | break; |
852 | rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN; | 977 | default: |
853 | rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN); | 978 | break; |
854 | rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN | | ||
855 | IXGBE_DCA_RXCTRL_DESC_HSRO_EN); | ||
856 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl); | ||
857 | rx_ring->cpu = cpu; | ||
858 | } | 979 | } |
859 | put_cpu(); | 980 | rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN; |
981 | rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN; | ||
982 | rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN); | ||
983 | IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl); | ||
860 | } | 984 | } |
861 | 985 | ||
862 | static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, | 986 | static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, |
863 | struct ixgbe_ring *tx_ring) | 987 | struct ixgbe_ring *tx_ring, |
988 | int cpu) | ||
864 | { | 989 | { |
990 | struct ixgbe_hw *hw = &adapter->hw; | ||
865 | u32 txctrl; | 991 | u32 txctrl; |
992 | u8 reg_idx = tx_ring->reg_idx; | ||
993 | |||
994 | switch (hw->mac.type) { | ||
995 | case ixgbe_mac_82598EB: | ||
996 | txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(reg_idx)); | ||
997 | txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK; | ||
998 | txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); | ||
999 | txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; | ||
1000 | IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl); | ||
1001 | break; | ||
1002 | case ixgbe_mac_82599EB: | ||
1003 | case ixgbe_mac_X540: | ||
1004 | txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx)); | ||
1005 | txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599; | ||
1006 | txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << | ||
1007 | IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599); | ||
1008 | txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; | ||
1009 | IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl); | ||
1010 | break; | ||
1011 | default: | ||
1012 | break; | ||
1013 | } | ||
1014 | } | ||
1015 | |||
1016 | static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector) | ||
1017 | { | ||
1018 | struct ixgbe_adapter *adapter = q_vector->adapter; | ||
866 | int cpu = get_cpu(); | 1019 | int cpu = get_cpu(); |
867 | int q = tx_ring->reg_idx; | 1020 | long r_idx; |
868 | struct ixgbe_hw *hw = &adapter->hw; | 1021 | int i; |
869 | 1022 | ||
870 | if (tx_ring->cpu != cpu) { | 1023 | if (q_vector->cpu == cpu) |
871 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { | 1024 | goto out_no_update; |
872 | txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(q)); | 1025 | |
873 | txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK; | 1026 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); |
874 | txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); | 1027 | for (i = 0; i < q_vector->txr_count; i++) { |
875 | txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; | 1028 | ixgbe_update_tx_dca(adapter, adapter->tx_ring[r_idx], cpu); |
876 | IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(q), txctrl); | 1029 | r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, |
877 | } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { | 1030 | r_idx + 1); |
878 | txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(q)); | ||
879 | txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599; | ||
880 | txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << | ||
881 | IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599); | ||
882 | txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; | ||
883 | IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(q), txctrl); | ||
884 | } | ||
885 | tx_ring->cpu = cpu; | ||
886 | } | 1031 | } |
1032 | |||
1033 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); | ||
1034 | for (i = 0; i < q_vector->rxr_count; i++) { | ||
1035 | ixgbe_update_rx_dca(adapter, adapter->rx_ring[r_idx], cpu); | ||
1036 | r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, | ||
1037 | r_idx + 1); | ||
1038 | } | ||
1039 | |||
1040 | q_vector->cpu = cpu; | ||
1041 | out_no_update: | ||
887 | put_cpu(); | 1042 | put_cpu(); |
888 | } | 1043 | } |
889 | 1044 | ||
890 | static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) | 1045 | static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) |
891 | { | 1046 | { |
1047 | int num_q_vectors; | ||
892 | int i; | 1048 | int i; |
893 | 1049 | ||
894 | if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED)) | 1050 | if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED)) |
@@ -897,22 +1053,25 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) | |||
897 | /* always use CB2 mode, difference is masked in the CB driver */ | 1053 | /* always use CB2 mode, difference is masked in the CB driver */ |
898 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); | 1054 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); |
899 | 1055 | ||
900 | for (i = 0; i < adapter->num_tx_queues; i++) { | 1056 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) |
901 | adapter->tx_ring[i]->cpu = -1; | 1057 | num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; |
902 | ixgbe_update_tx_dca(adapter, adapter->tx_ring[i]); | 1058 | else |
903 | } | 1059 | num_q_vectors = 1; |
904 | for (i = 0; i < adapter->num_rx_queues; i++) { | 1060 | |
905 | adapter->rx_ring[i]->cpu = -1; | 1061 | for (i = 0; i < num_q_vectors; i++) { |
906 | ixgbe_update_rx_dca(adapter, adapter->rx_ring[i]); | 1062 | adapter->q_vector[i]->cpu = -1; |
1063 | ixgbe_update_dca(adapter->q_vector[i]); | ||
907 | } | 1064 | } |
908 | } | 1065 | } |
909 | 1066 | ||
910 | static int __ixgbe_notify_dca(struct device *dev, void *data) | 1067 | static int __ixgbe_notify_dca(struct device *dev, void *data) |
911 | { | 1068 | { |
912 | struct net_device *netdev = dev_get_drvdata(dev); | 1069 | struct ixgbe_adapter *adapter = dev_get_drvdata(dev); |
913 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
914 | unsigned long event = *(unsigned long *)data; | 1070 | unsigned long event = *(unsigned long *)data; |
915 | 1071 | ||
1072 | if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED)) | ||
1073 | return 0; | ||
1074 | |||
916 | switch (event) { | 1075 | switch (event) { |
917 | case DCA_PROVIDER_ADD: | 1076 | case DCA_PROVIDER_ADD: |
918 | /* if we're already enabled, don't do it again */ | 1077 | /* if we're already enabled, don't do it again */ |
@@ -935,8 +1094,14 @@ static int __ixgbe_notify_dca(struct device *dev, void *data) | |||
935 | 1094 | ||
936 | return 0; | 1095 | return 0; |
937 | } | 1096 | } |
938 | |||
939 | #endif /* CONFIG_IXGBE_DCA */ | 1097 | #endif /* CONFIG_IXGBE_DCA */ |
1098 | |||
1099 | static inline void ixgbe_rx_hash(union ixgbe_adv_rx_desc *rx_desc, | ||
1100 | struct sk_buff *skb) | ||
1101 | { | ||
1102 | skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); | ||
1103 | } | ||
1104 | |||
940 | /** | 1105 | /** |
941 | * ixgbe_receive_skb - Send a completed packet up the stack | 1106 | * ixgbe_receive_skb - Send a completed packet up the stack |
942 | * @adapter: board private structure | 1107 | * @adapter: board private structure |
@@ -946,27 +1111,22 @@ static int __ixgbe_notify_dca(struct device *dev, void *data) | |||
946 | * @rx_desc: rx descriptor | 1111 | * @rx_desc: rx descriptor |
947 | **/ | 1112 | **/ |
948 | static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector, | 1113 | static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector, |
949 | struct sk_buff *skb, u8 status, | 1114 | struct sk_buff *skb, u8 status, |
950 | struct ixgbe_ring *ring, | 1115 | struct ixgbe_ring *ring, |
951 | union ixgbe_adv_rx_desc *rx_desc) | 1116 | union ixgbe_adv_rx_desc *rx_desc) |
952 | { | 1117 | { |
953 | struct ixgbe_adapter *adapter = q_vector->adapter; | 1118 | struct ixgbe_adapter *adapter = q_vector->adapter; |
954 | struct napi_struct *napi = &q_vector->napi; | 1119 | struct napi_struct *napi = &q_vector->napi; |
955 | bool is_vlan = (status & IXGBE_RXD_STAT_VP); | 1120 | bool is_vlan = (status & IXGBE_RXD_STAT_VP); |
956 | u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); | 1121 | u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); |
957 | 1122 | ||
958 | skb_record_rx_queue(skb, ring->queue_index); | 1123 | if (is_vlan && (tag & VLAN_VID_MASK)) |
959 | if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) { | 1124 | __vlan_hwaccel_put_tag(skb, tag); |
960 | if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK)) | 1125 | |
961 | vlan_gro_receive(napi, adapter->vlgrp, tag, skb); | 1126 | if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) |
962 | else | 1127 | napi_gro_receive(napi, skb); |
963 | napi_gro_receive(napi, skb); | 1128 | else |
964 | } else { | 1129 | netif_rx(skb); |
965 | if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK)) | ||
966 | vlan_hwaccel_rx(skb, adapter->vlgrp, tag); | ||
967 | else | ||
968 | netif_rx(skb); | ||
969 | } | ||
970 | } | 1130 | } |
971 | 1131 | ||
972 | /** | 1132 | /** |
@@ -981,7 +1141,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter, | |||
981 | { | 1141 | { |
982 | u32 status_err = le32_to_cpu(rx_desc->wb.upper.status_error); | 1142 | u32 status_err = le32_to_cpu(rx_desc->wb.upper.status_error); |
983 | 1143 | ||
984 | skb->ip_summed = CHECKSUM_NONE; | 1144 | skb_checksum_none_assert(skb); |
985 | 1145 | ||
986 | /* Rx csum disabled */ | 1146 | /* Rx csum disabled */ |
987 | if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED)) | 1147 | if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED)) |
@@ -1016,8 +1176,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter, | |||
1016 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 1176 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1017 | } | 1177 | } |
1018 | 1178 | ||
1019 | static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw, | 1179 | static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val) |
1020 | struct ixgbe_ring *rx_ring, u32 val) | ||
1021 | { | 1180 | { |
1022 | /* | 1181 | /* |
1023 | * Force memory writes to complete before letting h/w | 1182 | * Force memory writes to complete before letting h/w |
@@ -1026,130 +1185,133 @@ static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw, | |||
1026 | * such as IA-64). | 1185 | * such as IA-64). |
1027 | */ | 1186 | */ |
1028 | wmb(); | 1187 | wmb(); |
1029 | IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->reg_idx), val); | 1188 | writel(val, rx_ring->tail); |
1030 | } | 1189 | } |
1031 | 1190 | ||
1032 | /** | 1191 | /** |
1033 | * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split | 1192 | * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split |
1034 | * @adapter: address of board private structure | 1193 | * @rx_ring: ring to place buffers on |
1194 | * @cleaned_count: number of buffers to replace | ||
1035 | **/ | 1195 | **/ |
1036 | static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, | 1196 | void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) |
1037 | struct ixgbe_ring *rx_ring, | ||
1038 | int cleaned_count) | ||
1039 | { | 1197 | { |
1040 | struct pci_dev *pdev = adapter->pdev; | ||
1041 | union ixgbe_adv_rx_desc *rx_desc; | 1198 | union ixgbe_adv_rx_desc *rx_desc; |
1042 | struct ixgbe_rx_buffer *bi; | 1199 | struct ixgbe_rx_buffer *bi; |
1043 | unsigned int i; | 1200 | struct sk_buff *skb; |
1201 | u16 i = rx_ring->next_to_use; | ||
1044 | 1202 | ||
1045 | i = rx_ring->next_to_use; | 1203 | /* do nothing if no valid netdev defined */ |
1046 | bi = &rx_ring->rx_buffer_info[i]; | 1204 | if (!rx_ring->netdev) |
1205 | return; | ||
1047 | 1206 | ||
1048 | while (cleaned_count--) { | 1207 | while (cleaned_count--) { |
1049 | rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); | 1208 | rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); |
1209 | bi = &rx_ring->rx_buffer_info[i]; | ||
1210 | skb = bi->skb; | ||
1211 | |||
1212 | if (!skb) { | ||
1213 | skb = netdev_alloc_skb_ip_align(rx_ring->netdev, | ||
1214 | rx_ring->rx_buf_len); | ||
1215 | if (!skb) { | ||
1216 | rx_ring->rx_stats.alloc_rx_buff_failed++; | ||
1217 | goto no_buffers; | ||
1218 | } | ||
1219 | /* initialize queue mapping */ | ||
1220 | skb_record_rx_queue(skb, rx_ring->queue_index); | ||
1221 | bi->skb = skb; | ||
1222 | } | ||
1050 | 1223 | ||
1051 | if (!bi->page_dma && | 1224 | if (!bi->dma) { |
1052 | (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) { | 1225 | bi->dma = dma_map_single(rx_ring->dev, |
1226 | skb->data, | ||
1227 | rx_ring->rx_buf_len, | ||
1228 | DMA_FROM_DEVICE); | ||
1229 | if (dma_mapping_error(rx_ring->dev, bi->dma)) { | ||
1230 | rx_ring->rx_stats.alloc_rx_buff_failed++; | ||
1231 | bi->dma = 0; | ||
1232 | goto no_buffers; | ||
1233 | } | ||
1234 | } | ||
1235 | |||
1236 | if (ring_is_ps_enabled(rx_ring)) { | ||
1053 | if (!bi->page) { | 1237 | if (!bi->page) { |
1054 | bi->page = alloc_page(GFP_ATOMIC); | 1238 | bi->page = netdev_alloc_page(rx_ring->netdev); |
1055 | if (!bi->page) { | 1239 | if (!bi->page) { |
1056 | adapter->alloc_rx_page_failed++; | 1240 | rx_ring->rx_stats.alloc_rx_page_failed++; |
1057 | goto no_buffers; | 1241 | goto no_buffers; |
1058 | } | 1242 | } |
1059 | bi->page_offset = 0; | ||
1060 | } else { | ||
1061 | /* use a half page if we're re-using */ | ||
1062 | bi->page_offset ^= (PAGE_SIZE / 2); | ||
1063 | } | 1243 | } |
1064 | 1244 | ||
1065 | bi->page_dma = dma_map_page(&pdev->dev, bi->page, | 1245 | if (!bi->page_dma) { |
1066 | bi->page_offset, | 1246 | /* use a half page if we're re-using */ |
1067 | (PAGE_SIZE / 2), | 1247 | bi->page_offset ^= PAGE_SIZE / 2; |
1068 | DMA_FROM_DEVICE); | 1248 | bi->page_dma = dma_map_page(rx_ring->dev, |
1069 | } | 1249 | bi->page, |
1070 | 1250 | bi->page_offset, | |
1071 | if (!bi->skb) { | 1251 | PAGE_SIZE / 2, |
1072 | struct sk_buff *skb; | 1252 | DMA_FROM_DEVICE); |
1073 | /* netdev_alloc_skb reserves 32 bytes up front!! */ | 1253 | if (dma_mapping_error(rx_ring->dev, |
1074 | uint bufsz = rx_ring->rx_buf_len + SMP_CACHE_BYTES; | 1254 | bi->page_dma)) { |
1075 | skb = netdev_alloc_skb(adapter->netdev, bufsz); | 1255 | rx_ring->rx_stats.alloc_rx_page_failed++; |
1076 | 1256 | bi->page_dma = 0; | |
1077 | if (!skb) { | 1257 | goto no_buffers; |
1078 | adapter->alloc_rx_buff_failed++; | 1258 | } |
1079 | goto no_buffers; | ||
1080 | } | 1259 | } |
1081 | 1260 | ||
1082 | /* advance the data pointer to the next cache line */ | 1261 | /* Refresh the desc even if buffer_addrs didn't change |
1083 | skb_reserve(skb, (PTR_ALIGN(skb->data, SMP_CACHE_BYTES) | 1262 | * because each write-back erases this info. */ |
1084 | - skb->data)); | ||
1085 | |||
1086 | bi->skb = skb; | ||
1087 | bi->dma = dma_map_single(&pdev->dev, skb->data, | ||
1088 | rx_ring->rx_buf_len, | ||
1089 | DMA_FROM_DEVICE); | ||
1090 | } | ||
1091 | /* Refresh the desc even if buffer_addrs didn't change because | ||
1092 | * each write-back erases this info. */ | ||
1093 | if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { | ||
1094 | rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); | 1263 | rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); |
1095 | rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); | 1264 | rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); |
1096 | } else { | 1265 | } else { |
1097 | rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); | 1266 | rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); |
1267 | rx_desc->read.hdr_addr = 0; | ||
1098 | } | 1268 | } |
1099 | 1269 | ||
1100 | i++; | 1270 | i++; |
1101 | if (i == rx_ring->count) | 1271 | if (i == rx_ring->count) |
1102 | i = 0; | 1272 | i = 0; |
1103 | bi = &rx_ring->rx_buffer_info[i]; | ||
1104 | } | 1273 | } |
1105 | 1274 | ||
1106 | no_buffers: | 1275 | no_buffers: |
1107 | if (rx_ring->next_to_use != i) { | 1276 | if (rx_ring->next_to_use != i) { |
1108 | rx_ring->next_to_use = i; | 1277 | rx_ring->next_to_use = i; |
1109 | if (i-- == 0) | 1278 | ixgbe_release_rx_desc(rx_ring, i); |
1110 | i = (rx_ring->count - 1); | ||
1111 | |||
1112 | ixgbe_release_rx_desc(&adapter->hw, rx_ring, i); | ||
1113 | } | 1279 | } |
1114 | } | 1280 | } |
1115 | 1281 | ||
1116 | static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc) | 1282 | static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc) |
1117 | { | ||
1118 | return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info; | ||
1119 | } | ||
1120 | |||
1121 | static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc) | ||
1122 | { | 1283 | { |
1123 | return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; | 1284 | /* HW will not DMA in data larger than the given buffer, even if it |
1124 | } | 1285 | * parses the (NFS, of course) header to be larger. In that case, it |
1125 | 1286 | * fills the header buffer and spills the rest into the page. | |
1126 | static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc) | 1287 | */ |
1127 | { | 1288 | u16 hdr_info = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info); |
1128 | return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) & | 1289 | u16 hlen = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> |
1129 | IXGBE_RXDADV_RSCCNT_MASK) >> | 1290 | IXGBE_RXDADV_HDRBUFLEN_SHIFT; |
1130 | IXGBE_RXDADV_RSCCNT_SHIFT; | 1291 | if (hlen > IXGBE_RX_HDR_SIZE) |
1292 | hlen = IXGBE_RX_HDR_SIZE; | ||
1293 | return hlen; | ||
1131 | } | 1294 | } |
1132 | 1295 | ||
1133 | /** | 1296 | /** |
1134 | * ixgbe_transform_rsc_queue - change rsc queue into a full packet | 1297 | * ixgbe_transform_rsc_queue - change rsc queue into a full packet |
1135 | * @skb: pointer to the last skb in the rsc queue | 1298 | * @skb: pointer to the last skb in the rsc queue |
1136 | * @count: pointer to number of packets coalesced in this context | ||
1137 | * | 1299 | * |
1138 | * This function changes a queue full of hw rsc buffers into a completed | 1300 | * This function changes a queue full of hw rsc buffers into a completed |
1139 | * packet. It uses the ->prev pointers to find the first packet and then | 1301 | * packet. It uses the ->prev pointers to find the first packet and then |
1140 | * turns it into the frag list owner. | 1302 | * turns it into the frag list owner. |
1141 | **/ | 1303 | **/ |
1142 | static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb, | 1304 | static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb) |
1143 | u64 *count) | ||
1144 | { | 1305 | { |
1145 | unsigned int frag_list_size = 0; | 1306 | unsigned int frag_list_size = 0; |
1307 | unsigned int skb_cnt = 1; | ||
1146 | 1308 | ||
1147 | while (skb->prev) { | 1309 | while (skb->prev) { |
1148 | struct sk_buff *prev = skb->prev; | 1310 | struct sk_buff *prev = skb->prev; |
1149 | frag_list_size += skb->len; | 1311 | frag_list_size += skb->len; |
1150 | skb->prev = NULL; | 1312 | skb->prev = NULL; |
1151 | skb = prev; | 1313 | skb = prev; |
1152 | *count += 1; | 1314 | skb_cnt++; |
1153 | } | 1315 | } |
1154 | 1316 | ||
1155 | skb_shinfo(skb)->frag_list = skb->next; | 1317 | skb_shinfo(skb)->frag_list = skb->next; |
@@ -1157,69 +1319,59 @@ static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb, | |||
1157 | skb->len += frag_list_size; | 1319 | skb->len += frag_list_size; |
1158 | skb->data_len += frag_list_size; | 1320 | skb->data_len += frag_list_size; |
1159 | skb->truesize += frag_list_size; | 1321 | skb->truesize += frag_list_size; |
1322 | IXGBE_RSC_CB(skb)->skb_cnt = skb_cnt; | ||
1323 | |||
1160 | return skb; | 1324 | return skb; |
1161 | } | 1325 | } |
1162 | 1326 | ||
1163 | struct ixgbe_rsc_cb { | 1327 | static inline bool ixgbe_get_rsc_state(union ixgbe_adv_rx_desc *rx_desc) |
1164 | dma_addr_t dma; | 1328 | { |
1165 | bool delay_unmap; | 1329 | return !!(le32_to_cpu(rx_desc->wb.lower.lo_dword.data) & |
1166 | }; | 1330 | IXGBE_RXDADV_RSCCNT_MASK); |
1167 | 1331 | } | |
1168 | #define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb) | ||
1169 | 1332 | ||
1170 | static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | 1333 | static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, |
1171 | struct ixgbe_ring *rx_ring, | 1334 | struct ixgbe_ring *rx_ring, |
1172 | int *work_done, int work_to_do) | 1335 | int *work_done, int work_to_do) |
1173 | { | 1336 | { |
1174 | struct ixgbe_adapter *adapter = q_vector->adapter; | 1337 | struct ixgbe_adapter *adapter = q_vector->adapter; |
1175 | struct net_device *netdev = adapter->netdev; | ||
1176 | struct pci_dev *pdev = adapter->pdev; | ||
1177 | union ixgbe_adv_rx_desc *rx_desc, *next_rxd; | 1338 | union ixgbe_adv_rx_desc *rx_desc, *next_rxd; |
1178 | struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; | 1339 | struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; |
1179 | struct sk_buff *skb; | 1340 | struct sk_buff *skb; |
1180 | unsigned int i, rsc_count = 0; | ||
1181 | u32 len, staterr; | ||
1182 | u16 hdr_info; | ||
1183 | bool cleaned = false; | ||
1184 | int cleaned_count = 0; | ||
1185 | unsigned int total_rx_bytes = 0, total_rx_packets = 0; | 1341 | unsigned int total_rx_bytes = 0, total_rx_packets = 0; |
1342 | const int current_node = numa_node_id(); | ||
1186 | #ifdef IXGBE_FCOE | 1343 | #ifdef IXGBE_FCOE |
1187 | int ddp_bytes = 0; | 1344 | int ddp_bytes = 0; |
1188 | #endif /* IXGBE_FCOE */ | 1345 | #endif /* IXGBE_FCOE */ |
1346 | u32 staterr; | ||
1347 | u16 i; | ||
1348 | u16 cleaned_count = 0; | ||
1349 | bool pkt_is_rsc = false; | ||
1189 | 1350 | ||
1190 | i = rx_ring->next_to_clean; | 1351 | i = rx_ring->next_to_clean; |
1191 | rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); | 1352 | rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); |
1192 | staterr = le32_to_cpu(rx_desc->wb.upper.status_error); | 1353 | staterr = le32_to_cpu(rx_desc->wb.upper.status_error); |
1193 | rx_buffer_info = &rx_ring->rx_buffer_info[i]; | ||
1194 | 1354 | ||
1195 | while (staterr & IXGBE_RXD_STAT_DD) { | 1355 | while (staterr & IXGBE_RXD_STAT_DD) { |
1196 | u32 upper_len = 0; | 1356 | u32 upper_len = 0; |
1197 | if (*work_done >= work_to_do) | ||
1198 | break; | ||
1199 | (*work_done)++; | ||
1200 | 1357 | ||
1201 | rmb(); /* read descriptor and rx_buffer_info after status DD */ | 1358 | rmb(); /* read descriptor and rx_buffer_info after status DD */ |
1202 | if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { | ||
1203 | hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc)); | ||
1204 | len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> | ||
1205 | IXGBE_RXDADV_HDRBUFLEN_SHIFT; | ||
1206 | upper_len = le16_to_cpu(rx_desc->wb.upper.length); | ||
1207 | if ((len > IXGBE_RX_HDR_SIZE) || | ||
1208 | (upper_len && !(hdr_info & IXGBE_RXDADV_SPH))) | ||
1209 | len = IXGBE_RX_HDR_SIZE; | ||
1210 | } else { | ||
1211 | len = le16_to_cpu(rx_desc->wb.upper.length); | ||
1212 | } | ||
1213 | 1359 | ||
1214 | cleaned = true; | 1360 | rx_buffer_info = &rx_ring->rx_buffer_info[i]; |
1361 | |||
1215 | skb = rx_buffer_info->skb; | 1362 | skb = rx_buffer_info->skb; |
1216 | prefetch(skb->data); | ||
1217 | rx_buffer_info->skb = NULL; | 1363 | rx_buffer_info->skb = NULL; |
1364 | prefetch(skb->data); | ||
1365 | |||
1366 | if (ring_is_rsc_enabled(rx_ring)) | ||
1367 | pkt_is_rsc = ixgbe_get_rsc_state(rx_desc); | ||
1218 | 1368 | ||
1369 | /* if this is a skb from previous receive DMA will be 0 */ | ||
1219 | if (rx_buffer_info->dma) { | 1370 | if (rx_buffer_info->dma) { |
1220 | if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && | 1371 | u16 hlen; |
1221 | (!(staterr & IXGBE_RXD_STAT_EOP)) && | 1372 | if (pkt_is_rsc && |
1222 | (!(skb->prev))) { | 1373 | !(staterr & IXGBE_RXD_STAT_EOP) && |
1374 | !skb->prev) { | ||
1223 | /* | 1375 | /* |
1224 | * When HWRSC is enabled, delay unmapping | 1376 | * When HWRSC is enabled, delay unmapping |
1225 | * of the first packet. It carries the | 1377 | * of the first packet. It carries the |
@@ -1230,29 +1382,42 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
1230 | IXGBE_RSC_CB(skb)->delay_unmap = true; | 1382 | IXGBE_RSC_CB(skb)->delay_unmap = true; |
1231 | IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma; | 1383 | IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma; |
1232 | } else { | 1384 | } else { |
1233 | dma_unmap_single(&pdev->dev, | 1385 | dma_unmap_single(rx_ring->dev, |
1234 | rx_buffer_info->dma, | 1386 | rx_buffer_info->dma, |
1235 | rx_ring->rx_buf_len, | 1387 | rx_ring->rx_buf_len, |
1236 | DMA_FROM_DEVICE); | 1388 | DMA_FROM_DEVICE); |
1237 | } | 1389 | } |
1238 | rx_buffer_info->dma = 0; | 1390 | rx_buffer_info->dma = 0; |
1239 | skb_put(skb, len); | 1391 | |
1392 | if (ring_is_ps_enabled(rx_ring)) { | ||
1393 | hlen = ixgbe_get_hlen(rx_desc); | ||
1394 | upper_len = le16_to_cpu(rx_desc->wb.upper.length); | ||
1395 | } else { | ||
1396 | hlen = le16_to_cpu(rx_desc->wb.upper.length); | ||
1397 | } | ||
1398 | |||
1399 | skb_put(skb, hlen); | ||
1400 | } else { | ||
1401 | /* assume packet split since header is unmapped */ | ||
1402 | upper_len = le16_to_cpu(rx_desc->wb.upper.length); | ||
1240 | } | 1403 | } |
1241 | 1404 | ||
1242 | if (upper_len) { | 1405 | if (upper_len) { |
1243 | dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma, | 1406 | dma_unmap_page(rx_ring->dev, |
1244 | PAGE_SIZE / 2, DMA_FROM_DEVICE); | 1407 | rx_buffer_info->page_dma, |
1408 | PAGE_SIZE / 2, | ||
1409 | DMA_FROM_DEVICE); | ||
1245 | rx_buffer_info->page_dma = 0; | 1410 | rx_buffer_info->page_dma = 0; |
1246 | skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, | 1411 | skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, |
1247 | rx_buffer_info->page, | 1412 | rx_buffer_info->page, |
1248 | rx_buffer_info->page_offset, | 1413 | rx_buffer_info->page_offset, |
1249 | upper_len); | 1414 | upper_len); |
1250 | 1415 | ||
1251 | if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) || | 1416 | if ((page_count(rx_buffer_info->page) == 1) && |
1252 | (page_count(rx_buffer_info->page) != 1)) | 1417 | (page_to_nid(rx_buffer_info->page) == current_node)) |
1253 | rx_buffer_info->page = NULL; | ||
1254 | else | ||
1255 | get_page(rx_buffer_info->page); | 1418 | get_page(rx_buffer_info->page); |
1419 | else | ||
1420 | rx_buffer_info->page = NULL; | ||
1256 | 1421 | ||
1257 | skb->len += upper_len; | 1422 | skb->len += upper_len; |
1258 | skb->data_len += upper_len; | 1423 | skb->data_len += upper_len; |
@@ -1263,14 +1428,11 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
1263 | if (i == rx_ring->count) | 1428 | if (i == rx_ring->count) |
1264 | i = 0; | 1429 | i = 0; |
1265 | 1430 | ||
1266 | next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i); | 1431 | next_rxd = IXGBE_RX_DESC_ADV(rx_ring, i); |
1267 | prefetch(next_rxd); | 1432 | prefetch(next_rxd); |
1268 | cleaned_count++; | 1433 | cleaned_count++; |
1269 | 1434 | ||
1270 | if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) | 1435 | if (pkt_is_rsc) { |
1271 | rsc_count = ixgbe_get_rsc_count(rx_desc); | ||
1272 | |||
1273 | if (rsc_count) { | ||
1274 | u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >> | 1436 | u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >> |
1275 | IXGBE_RXDADV_NEXTP_SHIFT; | 1437 | IXGBE_RXDADV_NEXTP_SHIFT; |
1276 | next_buffer = &rx_ring->rx_buffer_info[nextp]; | 1438 | next_buffer = &rx_ring->rx_buffer_info[nextp]; |
@@ -1278,28 +1440,8 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
1278 | next_buffer = &rx_ring->rx_buffer_info[i]; | 1440 | next_buffer = &rx_ring->rx_buffer_info[i]; |
1279 | } | 1441 | } |
1280 | 1442 | ||
1281 | if (staterr & IXGBE_RXD_STAT_EOP) { | 1443 | if (!(staterr & IXGBE_RXD_STAT_EOP)) { |
1282 | if (skb->prev) | 1444 | if (ring_is_ps_enabled(rx_ring)) { |
1283 | skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count)); | ||
1284 | if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { | ||
1285 | if (IXGBE_RSC_CB(skb)->delay_unmap) { | ||
1286 | dma_unmap_single(&pdev->dev, | ||
1287 | IXGBE_RSC_CB(skb)->dma, | ||
1288 | rx_ring->rx_buf_len, | ||
1289 | DMA_FROM_DEVICE); | ||
1290 | IXGBE_RSC_CB(skb)->dma = 0; | ||
1291 | IXGBE_RSC_CB(skb)->delay_unmap = false; | ||
1292 | } | ||
1293 | if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) | ||
1294 | rx_ring->rsc_count += skb_shinfo(skb)->nr_frags; | ||
1295 | else | ||
1296 | rx_ring->rsc_count++; | ||
1297 | rx_ring->rsc_flush++; | ||
1298 | } | ||
1299 | rx_ring->stats.packets++; | ||
1300 | rx_ring->stats.bytes += skb->len; | ||
1301 | } else { | ||
1302 | if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { | ||
1303 | rx_buffer_info->skb = next_buffer->skb; | 1445 | rx_buffer_info->skb = next_buffer->skb; |
1304 | rx_buffer_info->dma = next_buffer->dma; | 1446 | rx_buffer_info->dma = next_buffer->dma; |
1305 | next_buffer->skb = skb; | 1447 | next_buffer->skb = skb; |
@@ -1308,22 +1450,57 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
1308 | skb->next = next_buffer->skb; | 1450 | skb->next = next_buffer->skb; |
1309 | skb->next->prev = skb; | 1451 | skb->next->prev = skb; |
1310 | } | 1452 | } |
1311 | rx_ring->non_eop_descs++; | 1453 | rx_ring->rx_stats.non_eop_descs++; |
1312 | goto next_desc; | 1454 | goto next_desc; |
1313 | } | 1455 | } |
1314 | 1456 | ||
1457 | if (skb->prev) { | ||
1458 | skb = ixgbe_transform_rsc_queue(skb); | ||
1459 | /* if we got here without RSC the packet is invalid */ | ||
1460 | if (!pkt_is_rsc) { | ||
1461 | __pskb_trim(skb, 0); | ||
1462 | rx_buffer_info->skb = skb; | ||
1463 | goto next_desc; | ||
1464 | } | ||
1465 | } | ||
1466 | |||
1467 | if (ring_is_rsc_enabled(rx_ring)) { | ||
1468 | if (IXGBE_RSC_CB(skb)->delay_unmap) { | ||
1469 | dma_unmap_single(rx_ring->dev, | ||
1470 | IXGBE_RSC_CB(skb)->dma, | ||
1471 | rx_ring->rx_buf_len, | ||
1472 | DMA_FROM_DEVICE); | ||
1473 | IXGBE_RSC_CB(skb)->dma = 0; | ||
1474 | IXGBE_RSC_CB(skb)->delay_unmap = false; | ||
1475 | } | ||
1476 | } | ||
1477 | if (pkt_is_rsc) { | ||
1478 | if (ring_is_ps_enabled(rx_ring)) | ||
1479 | rx_ring->rx_stats.rsc_count += | ||
1480 | skb_shinfo(skb)->nr_frags; | ||
1481 | else | ||
1482 | rx_ring->rx_stats.rsc_count += | ||
1483 | IXGBE_RSC_CB(skb)->skb_cnt; | ||
1484 | rx_ring->rx_stats.rsc_flush++; | ||
1485 | } | ||
1486 | |||
1487 | /* ERR_MASK will only have valid bits if EOP set */ | ||
1315 | if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) { | 1488 | if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) { |
1316 | dev_kfree_skb_irq(skb); | 1489 | /* trim packet back to size 0 and recycle it */ |
1490 | __pskb_trim(skb, 0); | ||
1491 | rx_buffer_info->skb = skb; | ||
1317 | goto next_desc; | 1492 | goto next_desc; |
1318 | } | 1493 | } |
1319 | 1494 | ||
1320 | ixgbe_rx_checksum(adapter, rx_desc, skb); | 1495 | ixgbe_rx_checksum(adapter, rx_desc, skb); |
1496 | if (adapter->netdev->features & NETIF_F_RXHASH) | ||
1497 | ixgbe_rx_hash(rx_desc, skb); | ||
1321 | 1498 | ||
1322 | /* probably a little skewed due to removing CRC */ | 1499 | /* probably a little skewed due to removing CRC */ |
1323 | total_rx_bytes += skb->len; | 1500 | total_rx_bytes += skb->len; |
1324 | total_rx_packets++; | 1501 | total_rx_packets++; |
1325 | 1502 | ||
1326 | skb->protocol = eth_type_trans(skb, adapter->netdev); | 1503 | skb->protocol = eth_type_trans(skb, rx_ring->netdev); |
1327 | #ifdef IXGBE_FCOE | 1504 | #ifdef IXGBE_FCOE |
1328 | /* if ddp, not passing to ULD unless for FCP_RSP or error */ | 1505 | /* if ddp, not passing to ULD unless for FCP_RSP or error */ |
1329 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { | 1506 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { |
@@ -1337,16 +1514,18 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
1337 | next_desc: | 1514 | next_desc: |
1338 | rx_desc->wb.upper.status_error = 0; | 1515 | rx_desc->wb.upper.status_error = 0; |
1339 | 1516 | ||
1517 | (*work_done)++; | ||
1518 | if (*work_done >= work_to_do) | ||
1519 | break; | ||
1520 | |||
1340 | /* return some buffers to hardware, one at a time is too slow */ | 1521 | /* return some buffers to hardware, one at a time is too slow */ |
1341 | if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { | 1522 | if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { |
1342 | ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count); | 1523 | ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); |
1343 | cleaned_count = 0; | 1524 | cleaned_count = 0; |
1344 | } | 1525 | } |
1345 | 1526 | ||
1346 | /* use prefetched values */ | 1527 | /* use prefetched values */ |
1347 | rx_desc = next_rxd; | 1528 | rx_desc = next_rxd; |
1348 | rx_buffer_info = &rx_ring->rx_buffer_info[i]; | ||
1349 | |||
1350 | staterr = le32_to_cpu(rx_desc->wb.upper.status_error); | 1529 | staterr = le32_to_cpu(rx_desc->wb.upper.status_error); |
1351 | } | 1530 | } |
1352 | 1531 | ||
@@ -1354,14 +1533,14 @@ next_desc: | |||
1354 | cleaned_count = IXGBE_DESC_UNUSED(rx_ring); | 1533 | cleaned_count = IXGBE_DESC_UNUSED(rx_ring); |
1355 | 1534 | ||
1356 | if (cleaned_count) | 1535 | if (cleaned_count) |
1357 | ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count); | 1536 | ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); |
1358 | 1537 | ||
1359 | #ifdef IXGBE_FCOE | 1538 | #ifdef IXGBE_FCOE |
1360 | /* include DDPed FCoE data */ | 1539 | /* include DDPed FCoE data */ |
1361 | if (ddp_bytes > 0) { | 1540 | if (ddp_bytes > 0) { |
1362 | unsigned int mss; | 1541 | unsigned int mss; |
1363 | 1542 | ||
1364 | mss = adapter->netdev->mtu - sizeof(struct fcoe_hdr) - | 1543 | mss = rx_ring->netdev->mtu - sizeof(struct fcoe_hdr) - |
1365 | sizeof(struct fc_frame_header) - | 1544 | sizeof(struct fc_frame_header) - |
1366 | sizeof(struct fcoe_crc_eof); | 1545 | sizeof(struct fcoe_crc_eof); |
1367 | if (mss > 512) | 1546 | if (mss > 512) |
@@ -1373,10 +1552,10 @@ next_desc: | |||
1373 | 1552 | ||
1374 | rx_ring->total_packets += total_rx_packets; | 1553 | rx_ring->total_packets += total_rx_packets; |
1375 | rx_ring->total_bytes += total_rx_bytes; | 1554 | rx_ring->total_bytes += total_rx_bytes; |
1376 | netdev->stats.rx_bytes += total_rx_bytes; | 1555 | u64_stats_update_begin(&rx_ring->syncp); |
1377 | netdev->stats.rx_packets += total_rx_packets; | 1556 | rx_ring->stats.packets += total_rx_packets; |
1378 | 1557 | rx_ring->stats.bytes += total_rx_bytes; | |
1379 | return cleaned; | 1558 | u64_stats_update_end(&rx_ring->syncp); |
1380 | } | 1559 | } |
1381 | 1560 | ||
1382 | static int ixgbe_clean_rxonly(struct napi_struct *, int); | 1561 | static int ixgbe_clean_rxonly(struct napi_struct *, int); |
@@ -1390,7 +1569,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *, int); | |||
1390 | static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) | 1569 | static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) |
1391 | { | 1570 | { |
1392 | struct ixgbe_q_vector *q_vector; | 1571 | struct ixgbe_q_vector *q_vector; |
1393 | int i, j, q_vectors, v_idx, r_idx; | 1572 | int i, q_vectors, v_idx, r_idx; |
1394 | u32 mask; | 1573 | u32 mask; |
1395 | 1574 | ||
1396 | q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | 1575 | q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; |
@@ -1403,24 +1582,24 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) | |||
1403 | q_vector = adapter->q_vector[v_idx]; | 1582 | q_vector = adapter->q_vector[v_idx]; |
1404 | /* XXX for_each_set_bit(...) */ | 1583 | /* XXX for_each_set_bit(...) */ |
1405 | r_idx = find_first_bit(q_vector->rxr_idx, | 1584 | r_idx = find_first_bit(q_vector->rxr_idx, |
1406 | adapter->num_rx_queues); | 1585 | adapter->num_rx_queues); |
1407 | 1586 | ||
1408 | for (i = 0; i < q_vector->rxr_count; i++) { | 1587 | for (i = 0; i < q_vector->rxr_count; i++) { |
1409 | j = adapter->rx_ring[r_idx]->reg_idx; | 1588 | u8 reg_idx = adapter->rx_ring[r_idx]->reg_idx; |
1410 | ixgbe_set_ivar(adapter, 0, j, v_idx); | 1589 | ixgbe_set_ivar(adapter, 0, reg_idx, v_idx); |
1411 | r_idx = find_next_bit(q_vector->rxr_idx, | 1590 | r_idx = find_next_bit(q_vector->rxr_idx, |
1412 | adapter->num_rx_queues, | 1591 | adapter->num_rx_queues, |
1413 | r_idx + 1); | 1592 | r_idx + 1); |
1414 | } | 1593 | } |
1415 | r_idx = find_first_bit(q_vector->txr_idx, | 1594 | r_idx = find_first_bit(q_vector->txr_idx, |
1416 | adapter->num_tx_queues); | 1595 | adapter->num_tx_queues); |
1417 | 1596 | ||
1418 | for (i = 0; i < q_vector->txr_count; i++) { | 1597 | for (i = 0; i < q_vector->txr_count; i++) { |
1419 | j = adapter->tx_ring[r_idx]->reg_idx; | 1598 | u8 reg_idx = adapter->tx_ring[r_idx]->reg_idx; |
1420 | ixgbe_set_ivar(adapter, 1, j, v_idx); | 1599 | ixgbe_set_ivar(adapter, 1, reg_idx, v_idx); |
1421 | r_idx = find_next_bit(q_vector->txr_idx, | 1600 | r_idx = find_next_bit(q_vector->txr_idx, |
1422 | adapter->num_tx_queues, | 1601 | adapter->num_tx_queues, |
1423 | r_idx + 1); | 1602 | r_idx + 1); |
1424 | } | 1603 | } |
1425 | 1604 | ||
1426 | if (q_vector->txr_count && !q_vector->rxr_count) | 1605 | if (q_vector->txr_count && !q_vector->rxr_count) |
@@ -1431,13 +1610,36 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) | |||
1431 | q_vector->eitr = adapter->rx_eitr_param; | 1610 | q_vector->eitr = adapter->rx_eitr_param; |
1432 | 1611 | ||
1433 | ixgbe_write_eitr(q_vector); | 1612 | ixgbe_write_eitr(q_vector); |
1613 | /* If Flow Director is enabled, set interrupt affinity */ | ||
1614 | if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || | ||
1615 | (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) { | ||
1616 | /* | ||
1617 | * Allocate the affinity_hint cpumask, assign the mask | ||
1618 | * for this vector, and set our affinity_hint for | ||
1619 | * this irq. | ||
1620 | */ | ||
1621 | if (!alloc_cpumask_var(&q_vector->affinity_mask, | ||
1622 | GFP_KERNEL)) | ||
1623 | return; | ||
1624 | cpumask_set_cpu(v_idx, q_vector->affinity_mask); | ||
1625 | irq_set_affinity_hint(adapter->msix_entries[v_idx].vector, | ||
1626 | q_vector->affinity_mask); | ||
1627 | } | ||
1434 | } | 1628 | } |
1435 | 1629 | ||
1436 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) | 1630 | switch (adapter->hw.mac.type) { |
1631 | case ixgbe_mac_82598EB: | ||
1437 | ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, | 1632 | ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, |
1438 | v_idx); | 1633 | v_idx); |
1439 | else if (adapter->hw.mac.type == ixgbe_mac_82599EB) | 1634 | break; |
1635 | case ixgbe_mac_82599EB: | ||
1636 | case ixgbe_mac_X540: | ||
1440 | ixgbe_set_ivar(adapter, -1, 1, v_idx); | 1637 | ixgbe_set_ivar(adapter, -1, 1, v_idx); |
1638 | break; | ||
1639 | |||
1640 | default: | ||
1641 | break; | ||
1642 | } | ||
1441 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950); | 1643 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950); |
1442 | 1644 | ||
1443 | /* set up to autoclear timer, and the vectors */ | 1645 | /* set up to autoclear timer, and the vectors */ |
@@ -1477,8 +1679,8 @@ enum latency_range { | |||
1477 | * parameter (see ixgbe_param.c) | 1679 | * parameter (see ixgbe_param.c) |
1478 | **/ | 1680 | **/ |
1479 | static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter, | 1681 | static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter, |
1480 | u32 eitr, u8 itr_setting, | 1682 | u32 eitr, u8 itr_setting, |
1481 | int packets, int bytes) | 1683 | int packets, int bytes) |
1482 | { | 1684 | { |
1483 | unsigned int retval = itr_setting; | 1685 | unsigned int retval = itr_setting; |
1484 | u32 timepassed_us; | 1686 | u32 timepassed_us; |
@@ -1533,12 +1735,15 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) | |||
1533 | int v_idx = q_vector->v_idx; | 1735 | int v_idx = q_vector->v_idx; |
1534 | u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr); | 1736 | u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr); |
1535 | 1737 | ||
1536 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { | 1738 | switch (adapter->hw.mac.type) { |
1739 | case ixgbe_mac_82598EB: | ||
1537 | /* must write high and low 16 bits to reset counter */ | 1740 | /* must write high and low 16 bits to reset counter */ |
1538 | itr_reg |= (itr_reg << 16); | 1741 | itr_reg |= (itr_reg << 16); |
1539 | } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { | 1742 | break; |
1743 | case ixgbe_mac_82599EB: | ||
1744 | case ixgbe_mac_X540: | ||
1540 | /* | 1745 | /* |
1541 | * 82599 can support a value of zero, so allow it for | 1746 | * 82599 and X540 can support a value of zero, so allow it for |
1542 | * max interrupt rate, but there is an errata where it can | 1747 | * max interrupt rate, but there is an errata where it can |
1543 | * not be zero with RSC | 1748 | * not be zero with RSC |
1544 | */ | 1749 | */ |
@@ -1551,6 +1756,9 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) | |||
1551 | * immediate assertion of the interrupt | 1756 | * immediate assertion of the interrupt |
1552 | */ | 1757 | */ |
1553 | itr_reg |= IXGBE_EITR_CNT_WDIS; | 1758 | itr_reg |= IXGBE_EITR_CNT_WDIS; |
1759 | break; | ||
1760 | default: | ||
1761 | break; | ||
1554 | } | 1762 | } |
1555 | IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg); | 1763 | IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg); |
1556 | } | 1764 | } |
@@ -1558,39 +1766,38 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) | |||
1558 | static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) | 1766 | static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) |
1559 | { | 1767 | { |
1560 | struct ixgbe_adapter *adapter = q_vector->adapter; | 1768 | struct ixgbe_adapter *adapter = q_vector->adapter; |
1769 | int i, r_idx; | ||
1561 | u32 new_itr; | 1770 | u32 new_itr; |
1562 | u8 current_itr, ret_itr; | 1771 | u8 current_itr, ret_itr; |
1563 | int i, r_idx; | ||
1564 | struct ixgbe_ring *rx_ring, *tx_ring; | ||
1565 | 1772 | ||
1566 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); | 1773 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); |
1567 | for (i = 0; i < q_vector->txr_count; i++) { | 1774 | for (i = 0; i < q_vector->txr_count; i++) { |
1568 | tx_ring = adapter->tx_ring[r_idx]; | 1775 | struct ixgbe_ring *tx_ring = adapter->tx_ring[r_idx]; |
1569 | ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, | 1776 | ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, |
1570 | q_vector->tx_itr, | 1777 | q_vector->tx_itr, |
1571 | tx_ring->total_packets, | 1778 | tx_ring->total_packets, |
1572 | tx_ring->total_bytes); | 1779 | tx_ring->total_bytes); |
1573 | /* if the result for this queue would decrease interrupt | 1780 | /* if the result for this queue would decrease interrupt |
1574 | * rate for this vector then use that result */ | 1781 | * rate for this vector then use that result */ |
1575 | q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ? | 1782 | q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ? |
1576 | q_vector->tx_itr - 1 : ret_itr); | 1783 | q_vector->tx_itr - 1 : ret_itr); |
1577 | r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, | 1784 | r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, |
1578 | r_idx + 1); | 1785 | r_idx + 1); |
1579 | } | 1786 | } |
1580 | 1787 | ||
1581 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); | 1788 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); |
1582 | for (i = 0; i < q_vector->rxr_count; i++) { | 1789 | for (i = 0; i < q_vector->rxr_count; i++) { |
1583 | rx_ring = adapter->rx_ring[r_idx]; | 1790 | struct ixgbe_ring *rx_ring = adapter->rx_ring[r_idx]; |
1584 | ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, | 1791 | ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, |
1585 | q_vector->rx_itr, | 1792 | q_vector->rx_itr, |
1586 | rx_ring->total_packets, | 1793 | rx_ring->total_packets, |
1587 | rx_ring->total_bytes); | 1794 | rx_ring->total_bytes); |
1588 | /* if the result for this queue would decrease interrupt | 1795 | /* if the result for this queue would decrease interrupt |
1589 | * rate for this vector then use that result */ | 1796 | * rate for this vector then use that result */ |
1590 | q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ? | 1797 | q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ? |
1591 | q_vector->rx_itr - 1 : ret_itr); | 1798 | q_vector->rx_itr - 1 : ret_itr); |
1592 | r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, | 1799 | r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, |
1593 | r_idx + 1); | 1800 | r_idx + 1); |
1594 | } | 1801 | } |
1595 | 1802 | ||
1596 | current_itr = max(q_vector->rx_itr, q_vector->tx_itr); | 1803 | current_itr = max(q_vector->rx_itr, q_vector->tx_itr); |
@@ -1611,7 +1818,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) | |||
1611 | 1818 | ||
1612 | if (new_itr != q_vector->eitr) { | 1819 | if (new_itr != q_vector->eitr) { |
1613 | /* do an exponential smoothing */ | 1820 | /* do an exponential smoothing */ |
1614 | new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); | 1821 | new_itr = ((q_vector->eitr * 9) + new_itr)/10; |
1615 | 1822 | ||
1616 | /* save the algorithm value here, not the smoothed one */ | 1823 | /* save the algorithm value here, not the smoothed one */ |
1617 | q_vector->eitr = new_itr; | 1824 | q_vector->eitr = new_itr; |
@@ -1621,45 +1828,62 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) | |||
1621 | } | 1828 | } |
1622 | 1829 | ||
1623 | /** | 1830 | /** |
1624 | * ixgbe_check_overtemp_task - worker thread to check over tempurature | 1831 | * ixgbe_check_overtemp_subtask - check for over tempurature |
1625 | * @work: pointer to work_struct containing our data | 1832 | * @adapter: pointer to adapter |
1626 | **/ | 1833 | **/ |
1627 | static void ixgbe_check_overtemp_task(struct work_struct *work) | 1834 | static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) |
1628 | { | 1835 | { |
1629 | struct ixgbe_adapter *adapter = container_of(work, | ||
1630 | struct ixgbe_adapter, | ||
1631 | check_overtemp_task); | ||
1632 | struct ixgbe_hw *hw = &adapter->hw; | 1836 | struct ixgbe_hw *hw = &adapter->hw; |
1633 | u32 eicr = adapter->interrupt_event; | 1837 | u32 eicr = adapter->interrupt_event; |
1634 | 1838 | ||
1635 | if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) { | 1839 | if (test_bit(__IXGBE_DOWN, &adapter->state)) |
1636 | switch (hw->device_id) { | 1840 | return; |
1637 | case IXGBE_DEV_ID_82599_T3_LOM: { | 1841 | |
1842 | if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && | ||
1843 | !(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT)) | ||
1844 | return; | ||
1845 | |||
1846 | adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT; | ||
1847 | |||
1848 | switch (hw->device_id) { | ||
1849 | case IXGBE_DEV_ID_82599_T3_LOM: | ||
1850 | /* | ||
1851 | * Since the warning interrupt is for both ports | ||
1852 | * we don't have to check if: | ||
1853 | * - This interrupt wasn't for our port. | ||
1854 | * - We may have missed the interrupt so always have to | ||
1855 | * check if we got a LSC | ||
1856 | */ | ||
1857 | if (!(eicr & IXGBE_EICR_GPI_SDP0) && | ||
1858 | !(eicr & IXGBE_EICR_LSC)) | ||
1859 | return; | ||
1860 | |||
1861 | if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) { | ||
1638 | u32 autoneg; | 1862 | u32 autoneg; |
1639 | bool link_up = false; | 1863 | bool link_up = false; |
1640 | 1864 | ||
1641 | if (hw->mac.ops.check_link) | 1865 | hw->mac.ops.check_link(hw, &autoneg, &link_up, false); |
1642 | hw->mac.ops.check_link(hw, &autoneg, &link_up, false); | ||
1643 | 1866 | ||
1644 | if (((eicr & IXGBE_EICR_GPI_SDP0) && (!link_up)) || | 1867 | if (link_up) |
1645 | (eicr & IXGBE_EICR_LSC)) | ||
1646 | /* Check if this is due to overtemp */ | ||
1647 | if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP) | ||
1648 | break; | ||
1649 | } | ||
1650 | return; | ||
1651 | default: | ||
1652 | if (!(eicr & IXGBE_EICR_GPI_SDP0)) | ||
1653 | return; | 1868 | return; |
1654 | break; | ||
1655 | } | 1869 | } |
1656 | e_crit(drv, "Network adapter has been stopped because it has " | 1870 | |
1657 | "over heated. Restart the computer. If the problem " | 1871 | /* Check if this is not due to overtemp */ |
1658 | "persists, power off the system and replace the " | 1872 | if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP) |
1659 | "adapter\n"); | 1873 | return; |
1660 | /* write to clear the interrupt */ | 1874 | |
1661 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0); | 1875 | break; |
1876 | default: | ||
1877 | if (!(eicr & IXGBE_EICR_GPI_SDP0)) | ||
1878 | return; | ||
1879 | break; | ||
1662 | } | 1880 | } |
1881 | e_crit(drv, | ||
1882 | "Network adapter has been stopped because it has over heated. " | ||
1883 | "Restart the computer. If the problem persists, " | ||
1884 | "power off the system and replace the adapter\n"); | ||
1885 | |||
1886 | adapter->interrupt_event = 0; | ||
1663 | } | 1887 | } |
1664 | 1888 | ||
1665 | static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr) | 1889 | static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr) |
@@ -1678,17 +1902,22 @@ static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr) | |||
1678 | { | 1902 | { |
1679 | struct ixgbe_hw *hw = &adapter->hw; | 1903 | struct ixgbe_hw *hw = &adapter->hw; |
1680 | 1904 | ||
1905 | if (eicr & IXGBE_EICR_GPI_SDP2) { | ||
1906 | /* Clear the interrupt */ | ||
1907 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2); | ||
1908 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) { | ||
1909 | adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; | ||
1910 | ixgbe_service_event_schedule(adapter); | ||
1911 | } | ||
1912 | } | ||
1913 | |||
1681 | if (eicr & IXGBE_EICR_GPI_SDP1) { | 1914 | if (eicr & IXGBE_EICR_GPI_SDP1) { |
1682 | /* Clear the interrupt */ | 1915 | /* Clear the interrupt */ |
1683 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); | 1916 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); |
1684 | schedule_work(&adapter->multispeed_fiber_task); | 1917 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) { |
1685 | } else if (eicr & IXGBE_EICR_GPI_SDP2) { | 1918 | adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; |
1686 | /* Clear the interrupt */ | 1919 | ixgbe_service_event_schedule(adapter); |
1687 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2); | 1920 | } |
1688 | schedule_work(&adapter->sfp_config_module_task); | ||
1689 | } else { | ||
1690 | /* Interrupt isn't for us... */ | ||
1691 | return; | ||
1692 | } | 1921 | } |
1693 | } | 1922 | } |
1694 | 1923 | ||
@@ -1702,7 +1931,7 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter) | |||
1702 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) { | 1931 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) { |
1703 | IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); | 1932 | IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); |
1704 | IXGBE_WRITE_FLUSH(hw); | 1933 | IXGBE_WRITE_FLUSH(hw); |
1705 | schedule_work(&adapter->watchdog_task); | 1934 | ixgbe_service_event_schedule(adapter); |
1706 | } | 1935 | } |
1707 | } | 1936 | } |
1708 | 1937 | ||
@@ -1728,33 +1957,47 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data) | |||
1728 | if (eicr & IXGBE_EICR_MAILBOX) | 1957 | if (eicr & IXGBE_EICR_MAILBOX) |
1729 | ixgbe_msg_task(adapter); | 1958 | ixgbe_msg_task(adapter); |
1730 | 1959 | ||
1731 | if (hw->mac.type == ixgbe_mac_82598EB) | 1960 | switch (hw->mac.type) { |
1732 | ixgbe_check_fan_failure(adapter, eicr); | 1961 | case ixgbe_mac_82599EB: |
1733 | 1962 | case ixgbe_mac_X540: | |
1734 | if (hw->mac.type == ixgbe_mac_82599EB) { | ||
1735 | ixgbe_check_sfp_event(adapter, eicr); | ||
1736 | adapter->interrupt_event = eicr; | ||
1737 | if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && | ||
1738 | ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) | ||
1739 | schedule_work(&adapter->check_overtemp_task); | ||
1740 | |||
1741 | /* Handle Flow Director Full threshold interrupt */ | 1963 | /* Handle Flow Director Full threshold interrupt */ |
1742 | if (eicr & IXGBE_EICR_FLOW_DIR) { | 1964 | if (eicr & IXGBE_EICR_FLOW_DIR) { |
1965 | int reinit_count = 0; | ||
1743 | int i; | 1966 | int i; |
1744 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR); | ||
1745 | /* Disable transmits before FDIR Re-initialization */ | ||
1746 | netif_tx_stop_all_queues(netdev); | ||
1747 | for (i = 0; i < adapter->num_tx_queues; i++) { | 1967 | for (i = 0; i < adapter->num_tx_queues; i++) { |
1748 | struct ixgbe_ring *tx_ring = | 1968 | struct ixgbe_ring *ring = adapter->tx_ring[i]; |
1749 | adapter->tx_ring[i]; | 1969 | if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE, |
1750 | if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE, | 1970 | &ring->state)) |
1751 | &tx_ring->reinit_state)) | 1971 | reinit_count++; |
1752 | schedule_work(&adapter->fdir_reinit_task); | 1972 | } |
1973 | if (reinit_count) { | ||
1974 | /* no more flow director interrupts until after init */ | ||
1975 | IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR); | ||
1976 | eicr &= ~IXGBE_EICR_FLOW_DIR; | ||
1977 | adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT; | ||
1978 | ixgbe_service_event_schedule(adapter); | ||
1753 | } | 1979 | } |
1754 | } | 1980 | } |
1981 | ixgbe_check_sfp_event(adapter, eicr); | ||
1982 | if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && | ||
1983 | ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) { | ||
1984 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) { | ||
1985 | adapter->interrupt_event = eicr; | ||
1986 | adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; | ||
1987 | ixgbe_service_event_schedule(adapter); | ||
1988 | } | ||
1989 | } | ||
1990 | break; | ||
1991 | default: | ||
1992 | break; | ||
1755 | } | 1993 | } |
1994 | |||
1995 | ixgbe_check_fan_failure(adapter, eicr); | ||
1996 | |||
1997 | /* re-enable the original interrupt state, no lsc, no queues */ | ||
1756 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | 1998 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
1757 | IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); | 1999 | IXGBE_WRITE_REG(hw, IXGBE_EIMS, eicr & |
2000 | ~(IXGBE_EIMS_LSC | IXGBE_EIMS_RTX_QUEUE)); | ||
1758 | 2001 | ||
1759 | return IRQ_HANDLED; | 2002 | return IRQ_HANDLED; |
1760 | } | 2003 | } |
@@ -1763,32 +2006,50 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, | |||
1763 | u64 qmask) | 2006 | u64 qmask) |
1764 | { | 2007 | { |
1765 | u32 mask; | 2008 | u32 mask; |
2009 | struct ixgbe_hw *hw = &adapter->hw; | ||
1766 | 2010 | ||
1767 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { | 2011 | switch (hw->mac.type) { |
2012 | case ixgbe_mac_82598EB: | ||
1768 | mask = (IXGBE_EIMS_RTX_QUEUE & qmask); | 2013 | mask = (IXGBE_EIMS_RTX_QUEUE & qmask); |
1769 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); | 2014 | IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); |
1770 | } else { | 2015 | break; |
2016 | case ixgbe_mac_82599EB: | ||
2017 | case ixgbe_mac_X540: | ||
1771 | mask = (qmask & 0xFFFFFFFF); | 2018 | mask = (qmask & 0xFFFFFFFF); |
1772 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask); | 2019 | if (mask) |
2020 | IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); | ||
1773 | mask = (qmask >> 32); | 2021 | mask = (qmask >> 32); |
1774 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask); | 2022 | if (mask) |
2023 | IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); | ||
2024 | break; | ||
2025 | default: | ||
2026 | break; | ||
1775 | } | 2027 | } |
1776 | /* skip the flush */ | 2028 | /* skip the flush */ |
1777 | } | 2029 | } |
1778 | 2030 | ||
1779 | static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, | 2031 | static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, |
1780 | u64 qmask) | 2032 | u64 qmask) |
1781 | { | 2033 | { |
1782 | u32 mask; | 2034 | u32 mask; |
2035 | struct ixgbe_hw *hw = &adapter->hw; | ||
1783 | 2036 | ||
1784 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { | 2037 | switch (hw->mac.type) { |
2038 | case ixgbe_mac_82598EB: | ||
1785 | mask = (IXGBE_EIMS_RTX_QUEUE & qmask); | 2039 | mask = (IXGBE_EIMS_RTX_QUEUE & qmask); |
1786 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask); | 2040 | IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); |
1787 | } else { | 2041 | break; |
2042 | case ixgbe_mac_82599EB: | ||
2043 | case ixgbe_mac_X540: | ||
1788 | mask = (qmask & 0xFFFFFFFF); | 2044 | mask = (qmask & 0xFFFFFFFF); |
1789 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), mask); | 2045 | if (mask) |
2046 | IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); | ||
1790 | mask = (qmask >> 32); | 2047 | mask = (qmask >> 32); |
1791 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), mask); | 2048 | if (mask) |
2049 | IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); | ||
2050 | break; | ||
2051 | default: | ||
2052 | break; | ||
1792 | } | 2053 | } |
1793 | /* skip the flush */ | 2054 | /* skip the flush */ |
1794 | } | 2055 | } |
@@ -1809,7 +2070,7 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data) | |||
1809 | tx_ring->total_bytes = 0; | 2070 | tx_ring->total_bytes = 0; |
1810 | tx_ring->total_packets = 0; | 2071 | tx_ring->total_packets = 0; |
1811 | r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, | 2072 | r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, |
1812 | r_idx + 1); | 2073 | r_idx + 1); |
1813 | } | 2074 | } |
1814 | 2075 | ||
1815 | /* EIAM disabled interrupts (on this vector) for us */ | 2076 | /* EIAM disabled interrupts (on this vector) for us */ |
@@ -1831,19 +2092,23 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data) | |||
1831 | int r_idx; | 2092 | int r_idx; |
1832 | int i; | 2093 | int i; |
1833 | 2094 | ||
2095 | #ifdef CONFIG_IXGBE_DCA | ||
2096 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) | ||
2097 | ixgbe_update_dca(q_vector); | ||
2098 | #endif | ||
2099 | |||
1834 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); | 2100 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); |
1835 | for (i = 0; i < q_vector->rxr_count; i++) { | 2101 | for (i = 0; i < q_vector->rxr_count; i++) { |
1836 | rx_ring = adapter->rx_ring[r_idx]; | 2102 | rx_ring = adapter->rx_ring[r_idx]; |
1837 | rx_ring->total_bytes = 0; | 2103 | rx_ring->total_bytes = 0; |
1838 | rx_ring->total_packets = 0; | 2104 | rx_ring->total_packets = 0; |
1839 | r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, | 2105 | r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, |
1840 | r_idx + 1); | 2106 | r_idx + 1); |
1841 | } | 2107 | } |
1842 | 2108 | ||
1843 | if (!q_vector->rxr_count) | 2109 | if (!q_vector->rxr_count) |
1844 | return IRQ_HANDLED; | 2110 | return IRQ_HANDLED; |
1845 | 2111 | ||
1846 | /* disable interrupts on this vector only */ | ||
1847 | /* EIAM disabled interrupts (on this vector) for us */ | 2112 | /* EIAM disabled interrupts (on this vector) for us */ |
1848 | napi_schedule(&q_vector->napi); | 2113 | napi_schedule(&q_vector->napi); |
1849 | 2114 | ||
@@ -1867,7 +2132,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data) | |||
1867 | ring->total_bytes = 0; | 2132 | ring->total_bytes = 0; |
1868 | ring->total_packets = 0; | 2133 | ring->total_packets = 0; |
1869 | r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, | 2134 | r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, |
1870 | r_idx + 1); | 2135 | r_idx + 1); |
1871 | } | 2136 | } |
1872 | 2137 | ||
1873 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); | 2138 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); |
@@ -1876,7 +2141,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data) | |||
1876 | ring->total_bytes = 0; | 2141 | ring->total_bytes = 0; |
1877 | ring->total_packets = 0; | 2142 | ring->total_packets = 0; |
1878 | r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, | 2143 | r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, |
1879 | r_idx + 1); | 2144 | r_idx + 1); |
1880 | } | 2145 | } |
1881 | 2146 | ||
1882 | /* EIAM disabled interrupts (on this vector) for us */ | 2147 | /* EIAM disabled interrupts (on this vector) for us */ |
@@ -1896,19 +2161,20 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data) | |||
1896 | static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) | 2161 | static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) |
1897 | { | 2162 | { |
1898 | struct ixgbe_q_vector *q_vector = | 2163 | struct ixgbe_q_vector *q_vector = |
1899 | container_of(napi, struct ixgbe_q_vector, napi); | 2164 | container_of(napi, struct ixgbe_q_vector, napi); |
1900 | struct ixgbe_adapter *adapter = q_vector->adapter; | 2165 | struct ixgbe_adapter *adapter = q_vector->adapter; |
1901 | struct ixgbe_ring *rx_ring = NULL; | 2166 | struct ixgbe_ring *rx_ring = NULL; |
1902 | int work_done = 0; | 2167 | int work_done = 0; |
1903 | long r_idx; | 2168 | long r_idx; |
1904 | 2169 | ||
1905 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); | ||
1906 | rx_ring = adapter->rx_ring[r_idx]; | ||
1907 | #ifdef CONFIG_IXGBE_DCA | 2170 | #ifdef CONFIG_IXGBE_DCA |
1908 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) | 2171 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) |
1909 | ixgbe_update_rx_dca(adapter, rx_ring); | 2172 | ixgbe_update_dca(q_vector); |
1910 | #endif | 2173 | #endif |
1911 | 2174 | ||
2175 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); | ||
2176 | rx_ring = adapter->rx_ring[r_idx]; | ||
2177 | |||
1912 | ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget); | 2178 | ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget); |
1913 | 2179 | ||
1914 | /* If all Rx work done, exit the polling mode */ | 2180 | /* If all Rx work done, exit the polling mode */ |
@@ -1918,7 +2184,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) | |||
1918 | ixgbe_set_itr_msix(q_vector); | 2184 | ixgbe_set_itr_msix(q_vector); |
1919 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | 2185 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
1920 | ixgbe_irq_enable_queues(adapter, | 2186 | ixgbe_irq_enable_queues(adapter, |
1921 | ((u64)1 << q_vector->v_idx)); | 2187 | ((u64)1 << q_vector->v_idx)); |
1922 | } | 2188 | } |
1923 | 2189 | ||
1924 | return work_done; | 2190 | return work_done; |
@@ -1935,23 +2201,24 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) | |||
1935 | static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget) | 2201 | static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget) |
1936 | { | 2202 | { |
1937 | struct ixgbe_q_vector *q_vector = | 2203 | struct ixgbe_q_vector *q_vector = |
1938 | container_of(napi, struct ixgbe_q_vector, napi); | 2204 | container_of(napi, struct ixgbe_q_vector, napi); |
1939 | struct ixgbe_adapter *adapter = q_vector->adapter; | 2205 | struct ixgbe_adapter *adapter = q_vector->adapter; |
1940 | struct ixgbe_ring *ring = NULL; | 2206 | struct ixgbe_ring *ring = NULL; |
1941 | int work_done = 0, i; | 2207 | int work_done = 0, i; |
1942 | long r_idx; | 2208 | long r_idx; |
1943 | bool tx_clean_complete = true; | 2209 | bool tx_clean_complete = true; |
1944 | 2210 | ||
2211 | #ifdef CONFIG_IXGBE_DCA | ||
2212 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) | ||
2213 | ixgbe_update_dca(q_vector); | ||
2214 | #endif | ||
2215 | |||
1945 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); | 2216 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); |
1946 | for (i = 0; i < q_vector->txr_count; i++) { | 2217 | for (i = 0; i < q_vector->txr_count; i++) { |
1947 | ring = adapter->tx_ring[r_idx]; | 2218 | ring = adapter->tx_ring[r_idx]; |
1948 | #ifdef CONFIG_IXGBE_DCA | ||
1949 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) | ||
1950 | ixgbe_update_tx_dca(adapter, ring); | ||
1951 | #endif | ||
1952 | tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring); | 2219 | tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring); |
1953 | r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, | 2220 | r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, |
1954 | r_idx + 1); | 2221 | r_idx + 1); |
1955 | } | 2222 | } |
1956 | 2223 | ||
1957 | /* attempt to distribute budget to each queue fairly, but don't allow | 2224 | /* attempt to distribute budget to each queue fairly, but don't allow |
@@ -1961,13 +2228,9 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget) | |||
1961 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); | 2228 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); |
1962 | for (i = 0; i < q_vector->rxr_count; i++) { | 2229 | for (i = 0; i < q_vector->rxr_count; i++) { |
1963 | ring = adapter->rx_ring[r_idx]; | 2230 | ring = adapter->rx_ring[r_idx]; |
1964 | #ifdef CONFIG_IXGBE_DCA | ||
1965 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) | ||
1966 | ixgbe_update_rx_dca(adapter, ring); | ||
1967 | #endif | ||
1968 | ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget); | 2231 | ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget); |
1969 | r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, | 2232 | r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, |
1970 | r_idx + 1); | 2233 | r_idx + 1); |
1971 | } | 2234 | } |
1972 | 2235 | ||
1973 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); | 2236 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); |
@@ -1979,7 +2242,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget) | |||
1979 | ixgbe_set_itr_msix(q_vector); | 2242 | ixgbe_set_itr_msix(q_vector); |
1980 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | 2243 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
1981 | ixgbe_irq_enable_queues(adapter, | 2244 | ixgbe_irq_enable_queues(adapter, |
1982 | ((u64)1 << q_vector->v_idx)); | 2245 | ((u64)1 << q_vector->v_idx)); |
1983 | return 0; | 2246 | return 0; |
1984 | } | 2247 | } |
1985 | 2248 | ||
@@ -1997,19 +2260,20 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget) | |||
1997 | static int ixgbe_clean_txonly(struct napi_struct *napi, int budget) | 2260 | static int ixgbe_clean_txonly(struct napi_struct *napi, int budget) |
1998 | { | 2261 | { |
1999 | struct ixgbe_q_vector *q_vector = | 2262 | struct ixgbe_q_vector *q_vector = |
2000 | container_of(napi, struct ixgbe_q_vector, napi); | 2263 | container_of(napi, struct ixgbe_q_vector, napi); |
2001 | struct ixgbe_adapter *adapter = q_vector->adapter; | 2264 | struct ixgbe_adapter *adapter = q_vector->adapter; |
2002 | struct ixgbe_ring *tx_ring = NULL; | 2265 | struct ixgbe_ring *tx_ring = NULL; |
2003 | int work_done = 0; | 2266 | int work_done = 0; |
2004 | long r_idx; | 2267 | long r_idx; |
2005 | 2268 | ||
2006 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); | ||
2007 | tx_ring = adapter->tx_ring[r_idx]; | ||
2008 | #ifdef CONFIG_IXGBE_DCA | 2269 | #ifdef CONFIG_IXGBE_DCA |
2009 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) | 2270 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) |
2010 | ixgbe_update_tx_dca(adapter, tx_ring); | 2271 | ixgbe_update_dca(q_vector); |
2011 | #endif | 2272 | #endif |
2012 | 2273 | ||
2274 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); | ||
2275 | tx_ring = adapter->tx_ring[r_idx]; | ||
2276 | |||
2013 | if (!ixgbe_clean_tx_irq(q_vector, tx_ring)) | 2277 | if (!ixgbe_clean_tx_irq(q_vector, tx_ring)) |
2014 | work_done = budget; | 2278 | work_done = budget; |
2015 | 2279 | ||
@@ -2019,34 +2283,38 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget) | |||
2019 | if (adapter->tx_itr_setting & 1) | 2283 | if (adapter->tx_itr_setting & 1) |
2020 | ixgbe_set_itr_msix(q_vector); | 2284 | ixgbe_set_itr_msix(q_vector); |
2021 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | 2285 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
2022 | ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx)); | 2286 | ixgbe_irq_enable_queues(adapter, |
2287 | ((u64)1 << q_vector->v_idx)); | ||
2023 | } | 2288 | } |
2024 | 2289 | ||
2025 | return work_done; | 2290 | return work_done; |
2026 | } | 2291 | } |
2027 | 2292 | ||
2028 | static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, | 2293 | static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, |
2029 | int r_idx) | 2294 | int r_idx) |
2030 | { | 2295 | { |
2031 | struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; | 2296 | struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; |
2297 | struct ixgbe_ring *rx_ring = a->rx_ring[r_idx]; | ||
2032 | 2298 | ||
2033 | set_bit(r_idx, q_vector->rxr_idx); | 2299 | set_bit(r_idx, q_vector->rxr_idx); |
2034 | q_vector->rxr_count++; | 2300 | q_vector->rxr_count++; |
2301 | rx_ring->q_vector = q_vector; | ||
2035 | } | 2302 | } |
2036 | 2303 | ||
2037 | static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, | 2304 | static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, |
2038 | int t_idx) | 2305 | int t_idx) |
2039 | { | 2306 | { |
2040 | struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; | 2307 | struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; |
2308 | struct ixgbe_ring *tx_ring = a->tx_ring[t_idx]; | ||
2041 | 2309 | ||
2042 | set_bit(t_idx, q_vector->txr_idx); | 2310 | set_bit(t_idx, q_vector->txr_idx); |
2043 | q_vector->txr_count++; | 2311 | q_vector->txr_count++; |
2312 | tx_ring->q_vector = q_vector; | ||
2044 | } | 2313 | } |
2045 | 2314 | ||
2046 | /** | 2315 | /** |
2047 | * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors | 2316 | * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors |
2048 | * @adapter: board private structure to initialize | 2317 | * @adapter: board private structure to initialize |
2049 | * @vectors: allotted vector count for descriptor rings | ||
2050 | * | 2318 | * |
2051 | * This function maps descriptor rings to the queue-specific vectors | 2319 | * This function maps descriptor rings to the queue-specific vectors |
2052 | * we were allotted through the MSI-X enabling code. Ideally, we'd have | 2320 | * we were allotted through the MSI-X enabling code. Ideally, we'd have |
@@ -2054,9 +2322,9 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, | |||
2054 | * group the rings as "efficiently" as possible. You would add new | 2322 | * group the rings as "efficiently" as possible. You would add new |
2055 | * mapping configurations in here. | 2323 | * mapping configurations in here. |
2056 | **/ | 2324 | **/ |
2057 | static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter, | 2325 | static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter) |
2058 | int vectors) | ||
2059 | { | 2326 | { |
2327 | int q_vectors; | ||
2060 | int v_start = 0; | 2328 | int v_start = 0; |
2061 | int rxr_idx = 0, txr_idx = 0; | 2329 | int rxr_idx = 0, txr_idx = 0; |
2062 | int rxr_remaining = adapter->num_rx_queues; | 2330 | int rxr_remaining = adapter->num_rx_queues; |
@@ -2069,11 +2337,13 @@ static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter, | |||
2069 | if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) | 2337 | if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) |
2070 | goto out; | 2338 | goto out; |
2071 | 2339 | ||
2340 | q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | ||
2341 | |||
2072 | /* | 2342 | /* |
2073 | * The ideal configuration... | 2343 | * The ideal configuration... |
2074 | * We have enough vectors to map one per queue. | 2344 | * We have enough vectors to map one per queue. |
2075 | */ | 2345 | */ |
2076 | if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) { | 2346 | if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) { |
2077 | for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) | 2347 | for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) |
2078 | map_vector_to_rxq(adapter, v_start, rxr_idx); | 2348 | map_vector_to_rxq(adapter, v_start, rxr_idx); |
2079 | 2349 | ||
@@ -2089,23 +2359,20 @@ static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter, | |||
2089 | * multiple queues per vector. | 2359 | * multiple queues per vector. |
2090 | */ | 2360 | */ |
2091 | /* Re-adjusting *qpv takes care of the remainder. */ | 2361 | /* Re-adjusting *qpv takes care of the remainder. */ |
2092 | for (i = v_start; i < vectors; i++) { | 2362 | for (i = v_start; i < q_vectors; i++) { |
2093 | rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i); | 2363 | rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i); |
2094 | for (j = 0; j < rqpv; j++) { | 2364 | for (j = 0; j < rqpv; j++) { |
2095 | map_vector_to_rxq(adapter, i, rxr_idx); | 2365 | map_vector_to_rxq(adapter, i, rxr_idx); |
2096 | rxr_idx++; | 2366 | rxr_idx++; |
2097 | rxr_remaining--; | 2367 | rxr_remaining--; |
2098 | } | 2368 | } |
2099 | } | 2369 | tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i); |
2100 | for (i = v_start; i < vectors; i++) { | ||
2101 | tqpv = DIV_ROUND_UP(txr_remaining, vectors - i); | ||
2102 | for (j = 0; j < tqpv; j++) { | 2370 | for (j = 0; j < tqpv; j++) { |
2103 | map_vector_to_txq(adapter, i, txr_idx); | 2371 | map_vector_to_txq(adapter, i, txr_idx); |
2104 | txr_idx++; | 2372 | txr_idx++; |
2105 | txr_remaining--; | 2373 | txr_remaining--; |
2106 | } | 2374 | } |
2107 | } | 2375 | } |
2108 | |||
2109 | out: | 2376 | out: |
2110 | return err; | 2377 | return err; |
2111 | } | 2378 | } |
@@ -2122,37 +2389,41 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) | |||
2122 | struct net_device *netdev = adapter->netdev; | 2389 | struct net_device *netdev = adapter->netdev; |
2123 | irqreturn_t (*handler)(int, void *); | 2390 | irqreturn_t (*handler)(int, void *); |
2124 | int i, vector, q_vectors, err; | 2391 | int i, vector, q_vectors, err; |
2125 | int ri=0, ti=0; | 2392 | int ri = 0, ti = 0; |
2126 | 2393 | ||
2127 | /* Decrement for Other and TCP Timer vectors */ | 2394 | /* Decrement for Other and TCP Timer vectors */ |
2128 | q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | 2395 | q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; |
2129 | 2396 | ||
2130 | /* Map the Tx/Rx rings to the vectors we were allotted. */ | 2397 | err = ixgbe_map_rings_to_vectors(adapter); |
2131 | err = ixgbe_map_rings_to_vectors(adapter, q_vectors); | ||
2132 | if (err) | 2398 | if (err) |
2133 | goto out; | 2399 | return err; |
2134 | 2400 | ||
2135 | #define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \ | 2401 | #define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \ |
2136 | (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \ | 2402 | ? &ixgbe_msix_clean_many : \ |
2137 | &ixgbe_msix_clean_many) | 2403 | (_v)->rxr_count ? &ixgbe_msix_clean_rx : \ |
2404 | (_v)->txr_count ? &ixgbe_msix_clean_tx : \ | ||
2405 | NULL) | ||
2138 | for (vector = 0; vector < q_vectors; vector++) { | 2406 | for (vector = 0; vector < q_vectors; vector++) { |
2139 | handler = SET_HANDLER(adapter->q_vector[vector]); | 2407 | struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; |
2140 | 2408 | handler = SET_HANDLER(q_vector); | |
2141 | if(handler == &ixgbe_msix_clean_rx) { | 2409 | |
2142 | sprintf(adapter->name[vector], "%s-%s-%d", | 2410 | if (handler == &ixgbe_msix_clean_rx) { |
2143 | netdev->name, "rx", ri++); | 2411 | snprintf(q_vector->name, sizeof(q_vector->name) - 1, |
2144 | } | 2412 | "%s-%s-%d", netdev->name, "rx", ri++); |
2145 | else if(handler == &ixgbe_msix_clean_tx) { | 2413 | } else if (handler == &ixgbe_msix_clean_tx) { |
2146 | sprintf(adapter->name[vector], "%s-%s-%d", | 2414 | snprintf(q_vector->name, sizeof(q_vector->name) - 1, |
2147 | netdev->name, "tx", ti++); | 2415 | "%s-%s-%d", netdev->name, "tx", ti++); |
2416 | } else if (handler == &ixgbe_msix_clean_many) { | ||
2417 | snprintf(q_vector->name, sizeof(q_vector->name) - 1, | ||
2418 | "%s-%s-%d", netdev->name, "TxRx", ri++); | ||
2419 | ti++; | ||
2420 | } else { | ||
2421 | /* skip this unused q_vector */ | ||
2422 | continue; | ||
2148 | } | 2423 | } |
2149 | else | ||
2150 | sprintf(adapter->name[vector], "%s-%s-%d", | ||
2151 | netdev->name, "TxRx", vector); | ||
2152 | |||
2153 | err = request_irq(adapter->msix_entries[vector].vector, | 2424 | err = request_irq(adapter->msix_entries[vector].vector, |
2154 | handler, 0, adapter->name[vector], | 2425 | handler, 0, q_vector->name, |
2155 | adapter->q_vector[vector]); | 2426 | q_vector); |
2156 | if (err) { | 2427 | if (err) { |
2157 | e_err(probe, "request_irq failed for MSIX interrupt " | 2428 | e_err(probe, "request_irq failed for MSIX interrupt " |
2158 | "Error: %d\n", err); | 2429 | "Error: %d\n", err); |
@@ -2160,9 +2431,9 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) | |||
2160 | } | 2431 | } |
2161 | } | 2432 | } |
2162 | 2433 | ||
2163 | sprintf(adapter->name[vector], "%s:lsc", netdev->name); | 2434 | sprintf(adapter->lsc_int_name, "%s:lsc", netdev->name); |
2164 | err = request_irq(adapter->msix_entries[vector].vector, | 2435 | err = request_irq(adapter->msix_entries[vector].vector, |
2165 | ixgbe_msix_lsc, 0, adapter->name[vector], netdev); | 2436 | ixgbe_msix_lsc, 0, adapter->lsc_int_name, netdev); |
2166 | if (err) { | 2437 | if (err) { |
2167 | e_err(probe, "request_irq for msix_lsc failed: %d\n", err); | 2438 | e_err(probe, "request_irq for msix_lsc failed: %d\n", err); |
2168 | goto free_queue_irqs; | 2439 | goto free_queue_irqs; |
@@ -2173,31 +2444,30 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) | |||
2173 | free_queue_irqs: | 2444 | free_queue_irqs: |
2174 | for (i = vector - 1; i >= 0; i--) | 2445 | for (i = vector - 1; i >= 0; i--) |
2175 | free_irq(adapter->msix_entries[--vector].vector, | 2446 | free_irq(adapter->msix_entries[--vector].vector, |
2176 | adapter->q_vector[i]); | 2447 | adapter->q_vector[i]); |
2177 | adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; | 2448 | adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; |
2178 | pci_disable_msix(adapter->pdev); | 2449 | pci_disable_msix(adapter->pdev); |
2179 | kfree(adapter->msix_entries); | 2450 | kfree(adapter->msix_entries); |
2180 | adapter->msix_entries = NULL; | 2451 | adapter->msix_entries = NULL; |
2181 | out: | ||
2182 | return err; | 2452 | return err; |
2183 | } | 2453 | } |
2184 | 2454 | ||
2185 | static void ixgbe_set_itr(struct ixgbe_adapter *adapter) | 2455 | static void ixgbe_set_itr(struct ixgbe_adapter *adapter) |
2186 | { | 2456 | { |
2187 | struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; | 2457 | struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; |
2188 | u8 current_itr; | ||
2189 | u32 new_itr = q_vector->eitr; | ||
2190 | struct ixgbe_ring *rx_ring = adapter->rx_ring[0]; | 2458 | struct ixgbe_ring *rx_ring = adapter->rx_ring[0]; |
2191 | struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; | 2459 | struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; |
2460 | u32 new_itr = q_vector->eitr; | ||
2461 | u8 current_itr; | ||
2192 | 2462 | ||
2193 | q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr, | 2463 | q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr, |
2194 | q_vector->tx_itr, | 2464 | q_vector->tx_itr, |
2195 | tx_ring->total_packets, | 2465 | tx_ring->total_packets, |
2196 | tx_ring->total_bytes); | 2466 | tx_ring->total_bytes); |
2197 | q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr, | 2467 | q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr, |
2198 | q_vector->rx_itr, | 2468 | q_vector->rx_itr, |
2199 | rx_ring->total_packets, | 2469 | rx_ring->total_packets, |
2200 | rx_ring->total_bytes); | 2470 | rx_ring->total_bytes); |
2201 | 2471 | ||
2202 | current_itr = max(q_vector->rx_itr, q_vector->tx_itr); | 2472 | current_itr = max(q_vector->rx_itr, q_vector->tx_itr); |
2203 | 2473 | ||
@@ -2218,9 +2488,9 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter) | |||
2218 | 2488 | ||
2219 | if (new_itr != q_vector->eitr) { | 2489 | if (new_itr != q_vector->eitr) { |
2220 | /* do an exponential smoothing */ | 2490 | /* do an exponential smoothing */ |
2221 | new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); | 2491 | new_itr = ((q_vector->eitr * 9) + new_itr)/10; |
2222 | 2492 | ||
2223 | /* save the algorithm value here, not the smoothed one */ | 2493 | /* save the algorithm value here */ |
2224 | q_vector->eitr = new_itr; | 2494 | q_vector->eitr = new_itr; |
2225 | 2495 | ||
2226 | ixgbe_write_eitr(q_vector); | 2496 | ixgbe_write_eitr(q_vector); |
@@ -2231,7 +2501,8 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter) | |||
2231 | * ixgbe_irq_enable - Enable default interrupt generation settings | 2501 | * ixgbe_irq_enable - Enable default interrupt generation settings |
2232 | * @adapter: board private structure | 2502 | * @adapter: board private structure |
2233 | **/ | 2503 | **/ |
2234 | static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter) | 2504 | static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, |
2505 | bool flush) | ||
2235 | { | 2506 | { |
2236 | u32 mask; | 2507 | u32 mask; |
2237 | 2508 | ||
@@ -2240,20 +2511,27 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter) | |||
2240 | mask |= IXGBE_EIMS_GPI_SDP0; | 2511 | mask |= IXGBE_EIMS_GPI_SDP0; |
2241 | if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) | 2512 | if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) |
2242 | mask |= IXGBE_EIMS_GPI_SDP1; | 2513 | mask |= IXGBE_EIMS_GPI_SDP1; |
2243 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) { | 2514 | switch (adapter->hw.mac.type) { |
2515 | case ixgbe_mac_82599EB: | ||
2516 | case ixgbe_mac_X540: | ||
2244 | mask |= IXGBE_EIMS_ECC; | 2517 | mask |= IXGBE_EIMS_ECC; |
2245 | mask |= IXGBE_EIMS_GPI_SDP1; | 2518 | mask |= IXGBE_EIMS_GPI_SDP1; |
2246 | mask |= IXGBE_EIMS_GPI_SDP2; | 2519 | mask |= IXGBE_EIMS_GPI_SDP2; |
2247 | if (adapter->num_vfs) | 2520 | if (adapter->num_vfs) |
2248 | mask |= IXGBE_EIMS_MAILBOX; | 2521 | mask |= IXGBE_EIMS_MAILBOX; |
2522 | break; | ||
2523 | default: | ||
2524 | break; | ||
2249 | } | 2525 | } |
2250 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || | 2526 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || |
2251 | adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) | 2527 | adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) |
2252 | mask |= IXGBE_EIMS_FLOW_DIR; | 2528 | mask |= IXGBE_EIMS_FLOW_DIR; |
2253 | 2529 | ||
2254 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); | 2530 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); |
2255 | ixgbe_irq_enable_queues(adapter, ~0); | 2531 | if (queues) |
2256 | IXGBE_WRITE_FLUSH(&adapter->hw); | 2532 | ixgbe_irq_enable_queues(adapter, ~0); |
2533 | if (flush) | ||
2534 | IXGBE_WRITE_FLUSH(&adapter->hw); | ||
2257 | 2535 | ||
2258 | if (adapter->num_vfs > 32) { | 2536 | if (adapter->num_vfs > 32) { |
2259 | u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1; | 2537 | u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1; |
@@ -2275,7 +2553,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data) | |||
2275 | u32 eicr; | 2553 | u32 eicr; |
2276 | 2554 | ||
2277 | /* | 2555 | /* |
2278 | * Workaround for silicon errata. Mask the interrupts | 2556 | * Workaround for silicon errata on 82598. Mask the interrupts |
2279 | * before the read of EICR. | 2557 | * before the read of EICR. |
2280 | */ | 2558 | */ |
2281 | IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); | 2559 | IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); |
@@ -2284,23 +2562,38 @@ static irqreturn_t ixgbe_intr(int irq, void *data) | |||
2284 | * therefore no explict interrupt disable is necessary */ | 2562 | * therefore no explict interrupt disable is necessary */ |
2285 | eicr = IXGBE_READ_REG(hw, IXGBE_EICR); | 2563 | eicr = IXGBE_READ_REG(hw, IXGBE_EICR); |
2286 | if (!eicr) { | 2564 | if (!eicr) { |
2287 | /* shared interrupt alert! | 2565 | /* |
2566 | * shared interrupt alert! | ||
2288 | * make sure interrupts are enabled because the read will | 2567 | * make sure interrupts are enabled because the read will |
2289 | * have disabled interrupts due to EIAM */ | 2568 | * have disabled interrupts due to EIAM |
2290 | ixgbe_irq_enable(adapter); | 2569 | * finish the workaround of silicon errata on 82598. Unmask |
2570 | * the interrupt that we masked before the EICR read. | ||
2571 | */ | ||
2572 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | ||
2573 | ixgbe_irq_enable(adapter, true, true); | ||
2291 | return IRQ_NONE; /* Not our interrupt */ | 2574 | return IRQ_NONE; /* Not our interrupt */ |
2292 | } | 2575 | } |
2293 | 2576 | ||
2294 | if (eicr & IXGBE_EICR_LSC) | 2577 | if (eicr & IXGBE_EICR_LSC) |
2295 | ixgbe_check_lsc(adapter); | 2578 | ixgbe_check_lsc(adapter); |
2296 | 2579 | ||
2297 | if (hw->mac.type == ixgbe_mac_82599EB) | 2580 | switch (hw->mac.type) { |
2581 | case ixgbe_mac_82599EB: | ||
2298 | ixgbe_check_sfp_event(adapter, eicr); | 2582 | ixgbe_check_sfp_event(adapter, eicr); |
2583 | if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && | ||
2584 | ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) { | ||
2585 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) { | ||
2586 | adapter->interrupt_event = eicr; | ||
2587 | adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; | ||
2588 | ixgbe_service_event_schedule(adapter); | ||
2589 | } | ||
2590 | } | ||
2591 | break; | ||
2592 | default: | ||
2593 | break; | ||
2594 | } | ||
2299 | 2595 | ||
2300 | ixgbe_check_fan_failure(adapter, eicr); | 2596 | ixgbe_check_fan_failure(adapter, eicr); |
2301 | if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && | ||
2302 | ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) | ||
2303 | schedule_work(&adapter->check_overtemp_task); | ||
2304 | 2597 | ||
2305 | if (napi_schedule_prep(&(q_vector->napi))) { | 2598 | if (napi_schedule_prep(&(q_vector->napi))) { |
2306 | adapter->tx_ring[0]->total_packets = 0; | 2599 | adapter->tx_ring[0]->total_packets = 0; |
@@ -2311,6 +2604,14 @@ static irqreturn_t ixgbe_intr(int irq, void *data) | |||
2311 | __napi_schedule(&(q_vector->napi)); | 2604 | __napi_schedule(&(q_vector->napi)); |
2312 | } | 2605 | } |
2313 | 2606 | ||
2607 | /* | ||
2608 | * re-enable link(maybe) and non-queue interrupts, no flush. | ||
2609 | * ixgbe_poll will re-enable the queue interrupts | ||
2610 | */ | ||
2611 | |||
2612 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | ||
2613 | ixgbe_irq_enable(adapter, false, false); | ||
2614 | |||
2314 | return IRQ_HANDLED; | 2615 | return IRQ_HANDLED; |
2315 | } | 2616 | } |
2316 | 2617 | ||
@@ -2343,10 +2644,10 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter) | |||
2343 | err = ixgbe_request_msix_irqs(adapter); | 2644 | err = ixgbe_request_msix_irqs(adapter); |
2344 | } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { | 2645 | } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { |
2345 | err = request_irq(adapter->pdev->irq, ixgbe_intr, 0, | 2646 | err = request_irq(adapter->pdev->irq, ixgbe_intr, 0, |
2346 | netdev->name, netdev); | 2647 | netdev->name, netdev); |
2347 | } else { | 2648 | } else { |
2348 | err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED, | 2649 | err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED, |
2349 | netdev->name, netdev); | 2650 | netdev->name, netdev); |
2350 | } | 2651 | } |
2351 | 2652 | ||
2352 | if (err) | 2653 | if (err) |
@@ -2369,8 +2670,13 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter) | |||
2369 | 2670 | ||
2370 | i--; | 2671 | i--; |
2371 | for (; i >= 0; i--) { | 2672 | for (; i >= 0; i--) { |
2673 | /* free only the irqs that were actually requested */ | ||
2674 | if (!adapter->q_vector[i]->rxr_count && | ||
2675 | !adapter->q_vector[i]->txr_count) | ||
2676 | continue; | ||
2677 | |||
2372 | free_irq(adapter->msix_entries[i].vector, | 2678 | free_irq(adapter->msix_entries[i].vector, |
2373 | adapter->q_vector[i]); | 2679 | adapter->q_vector[i]); |
2374 | } | 2680 | } |
2375 | 2681 | ||
2376 | ixgbe_reset_q_vectors(adapter); | 2682 | ixgbe_reset_q_vectors(adapter); |
@@ -2385,14 +2691,20 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter) | |||
2385 | **/ | 2691 | **/ |
2386 | static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) | 2692 | static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) |
2387 | { | 2693 | { |
2388 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { | 2694 | switch (adapter->hw.mac.type) { |
2695 | case ixgbe_mac_82598EB: | ||
2389 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); | 2696 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); |
2390 | } else { | 2697 | break; |
2698 | case ixgbe_mac_82599EB: | ||
2699 | case ixgbe_mac_X540: | ||
2391 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); | 2700 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); |
2392 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); | 2701 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); |
2393 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); | 2702 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); |
2394 | if (adapter->num_vfs > 32) | 2703 | if (adapter->num_vfs > 32) |
2395 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0); | 2704 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0); |
2705 | break; | ||
2706 | default: | ||
2707 | break; | ||
2396 | } | 2708 | } |
2397 | IXGBE_WRITE_FLUSH(&adapter->hw); | 2709 | IXGBE_WRITE_FLUSH(&adapter->hw); |
2398 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | 2710 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { |
@@ -2413,7 +2725,7 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter) | |||
2413 | struct ixgbe_hw *hw = &adapter->hw; | 2725 | struct ixgbe_hw *hw = &adapter->hw; |
2414 | 2726 | ||
2415 | IXGBE_WRITE_REG(hw, IXGBE_EITR(0), | 2727 | IXGBE_WRITE_REG(hw, IXGBE_EITR(0), |
2416 | EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param)); | 2728 | EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param)); |
2417 | 2729 | ||
2418 | ixgbe_set_ivar(adapter, 0, 0, 0); | 2730 | ixgbe_set_ivar(adapter, 0, 0, 0); |
2419 | ixgbe_set_ivar(adapter, 1, 0, 0); | 2731 | ixgbe_set_ivar(adapter, 1, 0, 0); |
@@ -2425,115 +2737,176 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter) | |||
2425 | } | 2737 | } |
2426 | 2738 | ||
2427 | /** | 2739 | /** |
2428 | * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset | 2740 | * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset |
2429 | * @adapter: board private structure | 2741 | * @adapter: board private structure |
2742 | * @ring: structure containing ring specific data | ||
2430 | * | 2743 | * |
2431 | * Configure the Tx unit of the MAC after a reset. | 2744 | * Configure the Tx descriptor ring after a reset. |
2432 | **/ | 2745 | **/ |
2433 | static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) | 2746 | void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, |
2747 | struct ixgbe_ring *ring) | ||
2434 | { | 2748 | { |
2435 | u64 tdba; | ||
2436 | struct ixgbe_hw *hw = &adapter->hw; | 2749 | struct ixgbe_hw *hw = &adapter->hw; |
2437 | u32 i, j, tdlen, txctrl; | 2750 | u64 tdba = ring->dma; |
2751 | int wait_loop = 10; | ||
2752 | u32 txdctl; | ||
2753 | u8 reg_idx = ring->reg_idx; | ||
2438 | 2754 | ||
2439 | /* Setup the HW Tx Head and Tail descriptor pointers */ | 2755 | /* disable queue to avoid issues while updating state */ |
2440 | for (i = 0; i < adapter->num_tx_queues; i++) { | 2756 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); |
2441 | struct ixgbe_ring *ring = adapter->tx_ring[i]; | 2757 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), |
2442 | j = ring->reg_idx; | 2758 | txdctl & ~IXGBE_TXDCTL_ENABLE); |
2443 | tdba = ring->dma; | 2759 | IXGBE_WRITE_FLUSH(hw); |
2444 | tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc); | 2760 | |
2445 | IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), | 2761 | IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx), |
2446 | (tdba & DMA_BIT_MASK(32))); | 2762 | (tdba & DMA_BIT_MASK(32))); |
2447 | IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); | 2763 | IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32)); |
2448 | IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen); | 2764 | IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx), |
2449 | IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); | 2765 | ring->count * sizeof(union ixgbe_adv_tx_desc)); |
2450 | IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); | 2766 | IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0); |
2451 | adapter->tx_ring[i]->head = IXGBE_TDH(j); | 2767 | IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0); |
2452 | adapter->tx_ring[i]->tail = IXGBE_TDT(j); | 2768 | ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx); |
2453 | /* | 2769 | |
2454 | * Disable Tx Head Writeback RO bit, since this hoses | 2770 | /* configure fetching thresholds */ |
2455 | * bookkeeping if things aren't delivered in order. | 2771 | if (adapter->rx_itr_setting == 0) { |
2456 | */ | 2772 | /* cannot set wthresh when itr==0 */ |
2457 | switch (hw->mac.type) { | 2773 | txdctl &= ~0x007F0000; |
2458 | case ixgbe_mac_82598EB: | 2774 | } else { |
2459 | txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); | 2775 | /* enable WTHRESH=8 descriptors, to encourage burst writeback */ |
2460 | break; | 2776 | txdctl |= (8 << 16); |
2461 | case ixgbe_mac_82599EB: | 2777 | } |
2462 | default: | 2778 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { |
2463 | txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j)); | 2779 | /* PThresh workaround for Tx hang with DFP enabled. */ |
2464 | break; | 2780 | txdctl |= 32; |
2465 | } | ||
2466 | txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; | ||
2467 | switch (hw->mac.type) { | ||
2468 | case ixgbe_mac_82598EB: | ||
2469 | IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); | ||
2470 | break; | ||
2471 | case ixgbe_mac_82599EB: | ||
2472 | default: | ||
2473 | IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl); | ||
2474 | break; | ||
2475 | } | ||
2476 | } | 2781 | } |
2477 | 2782 | ||
2478 | if (hw->mac.type == ixgbe_mac_82599EB) { | 2783 | /* reinitialize flowdirector state */ |
2479 | u32 rttdcs; | 2784 | if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) && |
2480 | u32 mask; | 2785 | adapter->atr_sample_rate) { |
2786 | ring->atr_sample_rate = adapter->atr_sample_rate; | ||
2787 | ring->atr_count = 0; | ||
2788 | set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state); | ||
2789 | } else { | ||
2790 | ring->atr_sample_rate = 0; | ||
2791 | } | ||
2481 | 2792 | ||
2482 | /* disable the arbiter while setting MTQC */ | 2793 | clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state); |
2483 | rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); | ||
2484 | rttdcs |= IXGBE_RTTDCS_ARBDIS; | ||
2485 | IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); | ||
2486 | 2794 | ||
2487 | /* set transmit pool layout */ | 2795 | /* enable queue */ |
2488 | mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED); | 2796 | txdctl |= IXGBE_TXDCTL_ENABLE; |
2489 | switch (adapter->flags & mask) { | 2797 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl); |
2490 | 2798 | ||
2491 | case (IXGBE_FLAG_SRIOV_ENABLED): | 2799 | /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */ |
2492 | IXGBE_WRITE_REG(hw, IXGBE_MTQC, | 2800 | if (hw->mac.type == ixgbe_mac_82598EB && |
2493 | (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF)); | 2801 | !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) |
2494 | break; | 2802 | return; |
2495 | 2803 | ||
2496 | case (IXGBE_FLAG_DCB_ENABLED): | 2804 | /* poll to verify queue is enabled */ |
2497 | /* We enable 8 traffic classes, DCB only */ | 2805 | do { |
2498 | IXGBE_WRITE_REG(hw, IXGBE_MTQC, | 2806 | usleep_range(1000, 2000); |
2499 | (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ)); | 2807 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); |
2500 | break; | 2808 | } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); |
2809 | if (!wait_loop) | ||
2810 | e_err(drv, "Could not enable Tx Queue %d\n", reg_idx); | ||
2811 | } | ||
2501 | 2812 | ||
2502 | default: | 2813 | static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) |
2503 | IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB); | 2814 | { |
2504 | break; | 2815 | struct ixgbe_hw *hw = &adapter->hw; |
2505 | } | 2816 | u32 rttdcs; |
2817 | u32 mask; | ||
2818 | |||
2819 | if (hw->mac.type == ixgbe_mac_82598EB) | ||
2820 | return; | ||
2506 | 2821 | ||
2507 | /* re-eable the arbiter */ | 2822 | /* disable the arbiter while setting MTQC */ |
2508 | rttdcs &= ~IXGBE_RTTDCS_ARBDIS; | 2823 | rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); |
2509 | IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); | 2824 | rttdcs |= IXGBE_RTTDCS_ARBDIS; |
2825 | IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); | ||
2826 | |||
2827 | /* set transmit pool layout */ | ||
2828 | mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED); | ||
2829 | switch (adapter->flags & mask) { | ||
2830 | |||
2831 | case (IXGBE_FLAG_SRIOV_ENABLED): | ||
2832 | IXGBE_WRITE_REG(hw, IXGBE_MTQC, | ||
2833 | (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF)); | ||
2834 | break; | ||
2835 | |||
2836 | case (IXGBE_FLAG_DCB_ENABLED): | ||
2837 | /* We enable 8 traffic classes, DCB only */ | ||
2838 | IXGBE_WRITE_REG(hw, IXGBE_MTQC, | ||
2839 | (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ)); | ||
2840 | break; | ||
2841 | |||
2842 | default: | ||
2843 | IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB); | ||
2844 | break; | ||
2845 | } | ||
2846 | |||
2847 | /* re-enable the arbiter */ | ||
2848 | rttdcs &= ~IXGBE_RTTDCS_ARBDIS; | ||
2849 | IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); | ||
2850 | } | ||
2851 | |||
2852 | /** | ||
2853 | * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset | ||
2854 | * @adapter: board private structure | ||
2855 | * | ||
2856 | * Configure the Tx unit of the MAC after a reset. | ||
2857 | **/ | ||
2858 | static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) | ||
2859 | { | ||
2860 | struct ixgbe_hw *hw = &adapter->hw; | ||
2861 | u32 dmatxctl; | ||
2862 | u32 i; | ||
2863 | |||
2864 | ixgbe_setup_mtqc(adapter); | ||
2865 | |||
2866 | if (hw->mac.type != ixgbe_mac_82598EB) { | ||
2867 | /* DMATXCTL.EN must be before Tx queues are enabled */ | ||
2868 | dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); | ||
2869 | dmatxctl |= IXGBE_DMATXCTL_TE; | ||
2870 | IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); | ||
2510 | } | 2871 | } |
2872 | |||
2873 | /* Setup the HW Tx Head and Tail descriptor pointers */ | ||
2874 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
2875 | ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]); | ||
2511 | } | 2876 | } |
2512 | 2877 | ||
2513 | #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 | 2878 | #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 |
2514 | 2879 | ||
2515 | static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, | 2880 | static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, |
2516 | struct ixgbe_ring *rx_ring) | 2881 | struct ixgbe_ring *rx_ring) |
2517 | { | 2882 | { |
2518 | u32 srrctl; | 2883 | u32 srrctl; |
2519 | int index; | 2884 | u8 reg_idx = rx_ring->reg_idx; |
2520 | struct ixgbe_ring_feature *feature = adapter->ring_feature; | ||
2521 | 2885 | ||
2522 | index = rx_ring->reg_idx; | 2886 | switch (adapter->hw.mac.type) { |
2523 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { | 2887 | case ixgbe_mac_82598EB: { |
2524 | unsigned long mask; | 2888 | struct ixgbe_ring_feature *feature = adapter->ring_feature; |
2525 | mask = (unsigned long) feature[RING_F_RSS].mask; | 2889 | const int mask = feature[RING_F_RSS].mask; |
2526 | index = index & mask; | 2890 | reg_idx = reg_idx & mask; |
2891 | } | ||
2892 | break; | ||
2893 | case ixgbe_mac_82599EB: | ||
2894 | case ixgbe_mac_X540: | ||
2895 | default: | ||
2896 | break; | ||
2527 | } | 2897 | } |
2528 | srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index)); | 2898 | |
2899 | srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx)); | ||
2529 | 2900 | ||
2530 | srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; | 2901 | srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; |
2531 | srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; | 2902 | srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; |
2903 | if (adapter->num_vfs) | ||
2904 | srrctl |= IXGBE_SRRCTL_DROP_EN; | ||
2532 | 2905 | ||
2533 | srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & | 2906 | srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & |
2534 | IXGBE_SRRCTL_BSIZEHDR_MASK; | 2907 | IXGBE_SRRCTL_BSIZEHDR_MASK; |
2535 | 2908 | ||
2536 | if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { | 2909 | if (ring_is_ps_enabled(rx_ring)) { |
2537 | #if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER | 2910 | #if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER |
2538 | srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; | 2911 | srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; |
2539 | #else | 2912 | #else |
@@ -2546,41 +2919,93 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, | |||
2546 | srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; | 2919 | srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; |
2547 | } | 2920 | } |
2548 | 2921 | ||
2549 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl); | 2922 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl); |
2550 | } | 2923 | } |
2551 | 2924 | ||
2552 | static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) | 2925 | static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) |
2553 | { | 2926 | { |
2554 | u32 mrqc = 0; | 2927 | struct ixgbe_hw *hw = &adapter->hw; |
2928 | static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D, | ||
2929 | 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE, | ||
2930 | 0x6A3E67EA, 0x14364D17, 0x3BED200D}; | ||
2931 | u32 mrqc = 0, reta = 0; | ||
2932 | u32 rxcsum; | ||
2933 | int i, j; | ||
2555 | int mask; | 2934 | int mask; |
2556 | 2935 | ||
2557 | if (!(adapter->hw.mac.type == ixgbe_mac_82599EB)) | 2936 | /* Fill out hash function seeds */ |
2558 | return mrqc; | 2937 | for (i = 0; i < 10; i++) |
2938 | IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]); | ||
2939 | |||
2940 | /* Fill out redirection table */ | ||
2941 | for (i = 0, j = 0; i < 128; i++, j++) { | ||
2942 | if (j == adapter->ring_feature[RING_F_RSS].indices) | ||
2943 | j = 0; | ||
2944 | /* reta = 4-byte sliding window of | ||
2945 | * 0x00..(indices-1)(indices-1)00..etc. */ | ||
2946 | reta = (reta << 8) | (j * 0x11); | ||
2947 | if ((i & 3) == 3) | ||
2948 | IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); | ||
2949 | } | ||
2559 | 2950 | ||
2560 | mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED | 2951 | /* Disable indicating checksum in descriptor, enables RSS hash */ |
2952 | rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); | ||
2953 | rxcsum |= IXGBE_RXCSUM_PCSD; | ||
2954 | IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); | ||
2955 | |||
2956 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) | ||
2957 | mask = adapter->flags & IXGBE_FLAG_RSS_ENABLED; | ||
2958 | else | ||
2959 | mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED | ||
2561 | #ifdef CONFIG_IXGBE_DCB | 2960 | #ifdef CONFIG_IXGBE_DCB |
2562 | | IXGBE_FLAG_DCB_ENABLED | 2961 | | IXGBE_FLAG_DCB_ENABLED |
2563 | #endif | 2962 | #endif |
2564 | | IXGBE_FLAG_SRIOV_ENABLED | 2963 | | IXGBE_FLAG_SRIOV_ENABLED |
2565 | ); | 2964 | ); |
2566 | 2965 | ||
2567 | switch (mask) { | 2966 | switch (mask) { |
2967 | #ifdef CONFIG_IXGBE_DCB | ||
2968 | case (IXGBE_FLAG_DCB_ENABLED | IXGBE_FLAG_RSS_ENABLED): | ||
2969 | mrqc = IXGBE_MRQC_RTRSS8TCEN; | ||
2970 | break; | ||
2971 | case (IXGBE_FLAG_DCB_ENABLED): | ||
2972 | mrqc = IXGBE_MRQC_RT8TCEN; | ||
2973 | break; | ||
2974 | #endif /* CONFIG_IXGBE_DCB */ | ||
2568 | case (IXGBE_FLAG_RSS_ENABLED): | 2975 | case (IXGBE_FLAG_RSS_ENABLED): |
2569 | mrqc = IXGBE_MRQC_RSSEN; | 2976 | mrqc = IXGBE_MRQC_RSSEN; |
2570 | break; | 2977 | break; |
2571 | case (IXGBE_FLAG_SRIOV_ENABLED): | 2978 | case (IXGBE_FLAG_SRIOV_ENABLED): |
2572 | mrqc = IXGBE_MRQC_VMDQEN; | 2979 | mrqc = IXGBE_MRQC_VMDQEN; |
2573 | break; | 2980 | break; |
2574 | #ifdef CONFIG_IXGBE_DCB | ||
2575 | case (IXGBE_FLAG_DCB_ENABLED): | ||
2576 | mrqc = IXGBE_MRQC_RT8TCEN; | ||
2577 | break; | ||
2578 | #endif /* CONFIG_IXGBE_DCB */ | ||
2579 | default: | 2981 | default: |
2580 | break; | 2982 | break; |
2581 | } | 2983 | } |
2582 | 2984 | ||
2583 | return mrqc; | 2985 | /* Perform hash on these packet types */ |
2986 | mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 | ||
2987 | | IXGBE_MRQC_RSS_FIELD_IPV4_TCP | ||
2988 | | IXGBE_MRQC_RSS_FIELD_IPV6 | ||
2989 | | IXGBE_MRQC_RSS_FIELD_IPV6_TCP; | ||
2990 | |||
2991 | IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); | ||
2992 | } | ||
2993 | |||
2994 | /** | ||
2995 | * ixgbe_clear_rscctl - disable RSC for the indicated ring | ||
2996 | * @adapter: address of board private structure | ||
2997 | * @ring: structure containing ring specific data | ||
2998 | **/ | ||
2999 | void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter, | ||
3000 | struct ixgbe_ring *ring) | ||
3001 | { | ||
3002 | struct ixgbe_hw *hw = &adapter->hw; | ||
3003 | u32 rscctrl; | ||
3004 | u8 reg_idx = ring->reg_idx; | ||
3005 | |||
3006 | rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx)); | ||
3007 | rscctrl &= ~IXGBE_RSCCTL_RSCEN; | ||
3008 | IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl); | ||
2584 | } | 3009 | } |
2585 | 3010 | ||
2586 | /** | 3011 | /** |
@@ -2588,25 +3013,26 @@ static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) | |||
2588 | * @adapter: address of board private structure | 3013 | * @adapter: address of board private structure |
2589 | * @index: index of ring to set | 3014 | * @index: index of ring to set |
2590 | **/ | 3015 | **/ |
2591 | static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index) | 3016 | void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, |
3017 | struct ixgbe_ring *ring) | ||
2592 | { | 3018 | { |
2593 | struct ixgbe_ring *rx_ring; | ||
2594 | struct ixgbe_hw *hw = &adapter->hw; | 3019 | struct ixgbe_hw *hw = &adapter->hw; |
2595 | int j; | ||
2596 | u32 rscctrl; | 3020 | u32 rscctrl; |
2597 | int rx_buf_len; | 3021 | int rx_buf_len; |
3022 | u8 reg_idx = ring->reg_idx; | ||
3023 | |||
3024 | if (!ring_is_rsc_enabled(ring)) | ||
3025 | return; | ||
2598 | 3026 | ||
2599 | rx_ring = adapter->rx_ring[index]; | 3027 | rx_buf_len = ring->rx_buf_len; |
2600 | j = rx_ring->reg_idx; | 3028 | rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx)); |
2601 | rx_buf_len = rx_ring->rx_buf_len; | ||
2602 | rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j)); | ||
2603 | rscctrl |= IXGBE_RSCCTL_RSCEN; | 3029 | rscctrl |= IXGBE_RSCCTL_RSCEN; |
2604 | /* | 3030 | /* |
2605 | * we must limit the number of descriptors so that the | 3031 | * we must limit the number of descriptors so that the |
2606 | * total size of max desc * buf_len is not greater | 3032 | * total size of max desc * buf_len is not greater |
2607 | * than 65535 | 3033 | * than 65535 |
2608 | */ | 3034 | */ |
2609 | if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { | 3035 | if (ring_is_ps_enabled(ring)) { |
2610 | #if (MAX_SKB_FRAGS > 16) | 3036 | #if (MAX_SKB_FRAGS > 16) |
2611 | rscctrl |= IXGBE_RSCCTL_MAXDESC_16; | 3037 | rscctrl |= IXGBE_RSCCTL_MAXDESC_16; |
2612 | #elif (MAX_SKB_FRAGS > 8) | 3038 | #elif (MAX_SKB_FRAGS > 8) |
@@ -2624,120 +3050,309 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index) | |||
2624 | else | 3050 | else |
2625 | rscctrl |= IXGBE_RSCCTL_MAXDESC_4; | 3051 | rscctrl |= IXGBE_RSCCTL_MAXDESC_4; |
2626 | } | 3052 | } |
2627 | IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(j), rscctrl); | 3053 | IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl); |
2628 | } | 3054 | } |
2629 | 3055 | ||
2630 | /** | 3056 | /** |
2631 | * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset | 3057 | * ixgbe_set_uta - Set unicast filter table address |
2632 | * @adapter: board private structure | 3058 | * @adapter: board private structure |
2633 | * | 3059 | * |
2634 | * Configure the Rx unit of the MAC after a reset. | 3060 | * The unicast table address is a register array of 32-bit registers. |
3061 | * The table is meant to be used in a way similar to how the MTA is used | ||
3062 | * however due to certain limitations in the hardware it is necessary to | ||
3063 | * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous | ||
3064 | * enable bit to allow vlan tag stripping when promiscuous mode is enabled | ||
2635 | **/ | 3065 | **/ |
2636 | static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | 3066 | static void ixgbe_set_uta(struct ixgbe_adapter *adapter) |
3067 | { | ||
3068 | struct ixgbe_hw *hw = &adapter->hw; | ||
3069 | int i; | ||
3070 | |||
3071 | /* The UTA table only exists on 82599 hardware and newer */ | ||
3072 | if (hw->mac.type < ixgbe_mac_82599EB) | ||
3073 | return; | ||
3074 | |||
3075 | /* we only need to do this if VMDq is enabled */ | ||
3076 | if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) | ||
3077 | return; | ||
3078 | |||
3079 | for (i = 0; i < 128; i++) | ||
3080 | IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0); | ||
3081 | } | ||
3082 | |||
3083 | #define IXGBE_MAX_RX_DESC_POLL 10 | ||
3084 | static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, | ||
3085 | struct ixgbe_ring *ring) | ||
3086 | { | ||
3087 | struct ixgbe_hw *hw = &adapter->hw; | ||
3088 | int wait_loop = IXGBE_MAX_RX_DESC_POLL; | ||
3089 | u32 rxdctl; | ||
3090 | u8 reg_idx = ring->reg_idx; | ||
3091 | |||
3092 | /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */ | ||
3093 | if (hw->mac.type == ixgbe_mac_82598EB && | ||
3094 | !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) | ||
3095 | return; | ||
3096 | |||
3097 | do { | ||
3098 | usleep_range(1000, 2000); | ||
3099 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); | ||
3100 | } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE)); | ||
3101 | |||
3102 | if (!wait_loop) { | ||
3103 | e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within " | ||
3104 | "the polling period\n", reg_idx); | ||
3105 | } | ||
3106 | } | ||
3107 | |||
3108 | void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, | ||
3109 | struct ixgbe_ring *ring) | ||
3110 | { | ||
3111 | struct ixgbe_hw *hw = &adapter->hw; | ||
3112 | int wait_loop = IXGBE_MAX_RX_DESC_POLL; | ||
3113 | u32 rxdctl; | ||
3114 | u8 reg_idx = ring->reg_idx; | ||
3115 | |||
3116 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); | ||
3117 | rxdctl &= ~IXGBE_RXDCTL_ENABLE; | ||
3118 | |||
3119 | /* write value back with RXDCTL.ENABLE bit cleared */ | ||
3120 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); | ||
3121 | |||
3122 | if (hw->mac.type == ixgbe_mac_82598EB && | ||
3123 | !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) | ||
3124 | return; | ||
3125 | |||
3126 | /* the hardware may take up to 100us to really disable the rx queue */ | ||
3127 | do { | ||
3128 | udelay(10); | ||
3129 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); | ||
3130 | } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE)); | ||
3131 | |||
3132 | if (!wait_loop) { | ||
3133 | e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within " | ||
3134 | "the polling period\n", reg_idx); | ||
3135 | } | ||
3136 | } | ||
3137 | |||
3138 | void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, | ||
3139 | struct ixgbe_ring *ring) | ||
3140 | { | ||
3141 | struct ixgbe_hw *hw = &adapter->hw; | ||
3142 | u64 rdba = ring->dma; | ||
3143 | u32 rxdctl; | ||
3144 | u8 reg_idx = ring->reg_idx; | ||
3145 | |||
3146 | /* disable queue to avoid issues while updating state */ | ||
3147 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); | ||
3148 | ixgbe_disable_rx_queue(adapter, ring); | ||
3149 | |||
3150 | IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32))); | ||
3151 | IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32)); | ||
3152 | IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx), | ||
3153 | ring->count * sizeof(union ixgbe_adv_rx_desc)); | ||
3154 | IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0); | ||
3155 | IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0); | ||
3156 | ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx); | ||
3157 | |||
3158 | ixgbe_configure_srrctl(adapter, ring); | ||
3159 | ixgbe_configure_rscctl(adapter, ring); | ||
3160 | |||
3161 | /* If operating in IOV mode set RLPML for X540 */ | ||
3162 | if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && | ||
3163 | hw->mac.type == ixgbe_mac_X540) { | ||
3164 | rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK; | ||
3165 | rxdctl |= ((ring->netdev->mtu + ETH_HLEN + | ||
3166 | ETH_FCS_LEN + VLAN_HLEN) | IXGBE_RXDCTL_RLPML_EN); | ||
3167 | } | ||
3168 | |||
3169 | if (hw->mac.type == ixgbe_mac_82598EB) { | ||
3170 | /* | ||
3171 | * enable cache line friendly hardware writes: | ||
3172 | * PTHRESH=32 descriptors (half the internal cache), | ||
3173 | * this also removes ugly rx_no_buffer_count increment | ||
3174 | * HTHRESH=4 descriptors (to minimize latency on fetch) | ||
3175 | * WTHRESH=8 burst writeback up to two cache lines | ||
3176 | */ | ||
3177 | rxdctl &= ~0x3FFFFF; | ||
3178 | rxdctl |= 0x080420; | ||
3179 | } | ||
3180 | |||
3181 | /* enable receive descriptor ring */ | ||
3182 | rxdctl |= IXGBE_RXDCTL_ENABLE; | ||
3183 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); | ||
3184 | |||
3185 | ixgbe_rx_desc_queue_enable(adapter, ring); | ||
3186 | ixgbe_alloc_rx_buffers(ring, IXGBE_DESC_UNUSED(ring)); | ||
3187 | } | ||
3188 | |||
3189 | static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) | ||
3190 | { | ||
3191 | struct ixgbe_hw *hw = &adapter->hw; | ||
3192 | int p; | ||
3193 | |||
3194 | /* PSRTYPE must be initialized in non 82598 adapters */ | ||
3195 | u32 psrtype = IXGBE_PSRTYPE_TCPHDR | | ||
3196 | IXGBE_PSRTYPE_UDPHDR | | ||
3197 | IXGBE_PSRTYPE_IPV4HDR | | ||
3198 | IXGBE_PSRTYPE_L2HDR | | ||
3199 | IXGBE_PSRTYPE_IPV6HDR; | ||
3200 | |||
3201 | if (hw->mac.type == ixgbe_mac_82598EB) | ||
3202 | return; | ||
3203 | |||
3204 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) | ||
3205 | psrtype |= (adapter->num_rx_queues_per_pool << 29); | ||
3206 | |||
3207 | for (p = 0; p < adapter->num_rx_pools; p++) | ||
3208 | IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(adapter->num_vfs + p), | ||
3209 | psrtype); | ||
3210 | } | ||
3211 | |||
3212 | static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) | ||
3213 | { | ||
3214 | struct ixgbe_hw *hw = &adapter->hw; | ||
3215 | u32 gcr_ext; | ||
3216 | u32 vt_reg_bits; | ||
3217 | u32 reg_offset, vf_shift; | ||
3218 | u32 vmdctl; | ||
3219 | |||
3220 | if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) | ||
3221 | return; | ||
3222 | |||
3223 | vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); | ||
3224 | vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN | IXGBE_VT_CTL_REPLEN; | ||
3225 | vt_reg_bits |= (adapter->num_vfs << IXGBE_VT_CTL_POOL_SHIFT); | ||
3226 | IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits); | ||
3227 | |||
3228 | vf_shift = adapter->num_vfs % 32; | ||
3229 | reg_offset = (adapter->num_vfs > 32) ? 1 : 0; | ||
3230 | |||
3231 | /* Enable only the PF's pool for Tx/Rx */ | ||
3232 | IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift)); | ||
3233 | IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), 0); | ||
3234 | IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift)); | ||
3235 | IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), 0); | ||
3236 | IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); | ||
3237 | |||
3238 | /* Map PF MAC address in RAR Entry 0 to first pool following VFs */ | ||
3239 | hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs); | ||
3240 | |||
3241 | /* | ||
3242 | * Set up VF register offsets for selected VT Mode, | ||
3243 | * i.e. 32 or 64 VFs for SR-IOV | ||
3244 | */ | ||
3245 | gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); | ||
3246 | gcr_ext |= IXGBE_GCR_EXT_MSIX_EN; | ||
3247 | gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64; | ||
3248 | IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); | ||
3249 | |||
3250 | /* enable Tx loopback for VF/PF communication */ | ||
3251 | IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); | ||
3252 | /* Enable MAC Anti-Spoofing */ | ||
3253 | hw->mac.ops.set_mac_anti_spoofing(hw, | ||
3254 | (adapter->antispoofing_enabled = | ||
3255 | (adapter->num_vfs != 0)), | ||
3256 | adapter->num_vfs); | ||
3257 | } | ||
3258 | |||
3259 | static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) | ||
2637 | { | 3260 | { |
2638 | u64 rdba; | ||
2639 | struct ixgbe_hw *hw = &adapter->hw; | 3261 | struct ixgbe_hw *hw = &adapter->hw; |
2640 | struct ixgbe_ring *rx_ring; | ||
2641 | struct net_device *netdev = adapter->netdev; | 3262 | struct net_device *netdev = adapter->netdev; |
2642 | int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; | 3263 | int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; |
2643 | int i, j; | ||
2644 | u32 rdlen, rxctrl, rxcsum; | ||
2645 | static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D, | ||
2646 | 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE, | ||
2647 | 0x6A3E67EA, 0x14364D17, 0x3BED200D}; | ||
2648 | u32 fctrl, hlreg0; | ||
2649 | u32 reta = 0, mrqc = 0; | ||
2650 | u32 rdrxctl; | ||
2651 | int rx_buf_len; | 3264 | int rx_buf_len; |
3265 | struct ixgbe_ring *rx_ring; | ||
3266 | int i; | ||
3267 | u32 mhadd, hlreg0; | ||
2652 | 3268 | ||
2653 | /* Decide whether to use packet split mode or not */ | 3269 | /* Decide whether to use packet split mode or not */ |
3270 | /* On by default */ | ||
3271 | adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; | ||
3272 | |||
2654 | /* Do not use packet split if we're in SR-IOV Mode */ | 3273 | /* Do not use packet split if we're in SR-IOV Mode */ |
2655 | if (!adapter->num_vfs) | 3274 | if (adapter->num_vfs) |
2656 | adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; | 3275 | adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; |
3276 | |||
3277 | /* Disable packet split due to 82599 erratum #45 */ | ||
3278 | if (hw->mac.type == ixgbe_mac_82599EB) | ||
3279 | adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; | ||
2657 | 3280 | ||
2658 | /* Set the RX buffer length according to the mode */ | 3281 | /* Set the RX buffer length according to the mode */ |
2659 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { | 3282 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { |
2660 | rx_buf_len = IXGBE_RX_HDR_SIZE; | 3283 | rx_buf_len = IXGBE_RX_HDR_SIZE; |
2661 | if (hw->mac.type == ixgbe_mac_82599EB) { | ||
2662 | /* PSRTYPE must be initialized in 82599 */ | ||
2663 | u32 psrtype = IXGBE_PSRTYPE_TCPHDR | | ||
2664 | IXGBE_PSRTYPE_UDPHDR | | ||
2665 | IXGBE_PSRTYPE_IPV4HDR | | ||
2666 | IXGBE_PSRTYPE_IPV6HDR | | ||
2667 | IXGBE_PSRTYPE_L2HDR; | ||
2668 | IXGBE_WRITE_REG(hw, | ||
2669 | IXGBE_PSRTYPE(adapter->num_vfs), | ||
2670 | psrtype); | ||
2671 | } | ||
2672 | } else { | 3284 | } else { |
2673 | if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && | 3285 | if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && |
2674 | (netdev->mtu <= ETH_DATA_LEN)) | 3286 | (netdev->mtu <= ETH_DATA_LEN)) |
2675 | rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; | 3287 | rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; |
2676 | else | 3288 | else |
2677 | rx_buf_len = ALIGN(max_frame, 1024); | 3289 | rx_buf_len = ALIGN(max_frame + VLAN_HLEN, 1024); |
2678 | } | 3290 | } |
2679 | 3291 | ||
2680 | fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); | 3292 | #ifdef IXGBE_FCOE |
2681 | fctrl |= IXGBE_FCTRL_BAM; | 3293 | /* adjust max frame to be able to do baby jumbo for FCoE */ |
2682 | fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */ | 3294 | if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && |
2683 | fctrl |= IXGBE_FCTRL_PMCF; | 3295 | (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE)) |
2684 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl); | 3296 | max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE; |
3297 | |||
3298 | #endif /* IXGBE_FCOE */ | ||
3299 | mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); | ||
3300 | if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) { | ||
3301 | mhadd &= ~IXGBE_MHADD_MFS_MASK; | ||
3302 | mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT; | ||
3303 | |||
3304 | IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); | ||
3305 | } | ||
2685 | 3306 | ||
2686 | hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); | 3307 | hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); |
2687 | if (adapter->netdev->mtu <= ETH_DATA_LEN) | 3308 | /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */ |
2688 | hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; | 3309 | hlreg0 |= IXGBE_HLREG0_JUMBOEN; |
2689 | else | ||
2690 | hlreg0 |= IXGBE_HLREG0_JUMBOEN; | ||
2691 | #ifdef IXGBE_FCOE | ||
2692 | if (netdev->features & NETIF_F_FCOE_MTU) | ||
2693 | hlreg0 |= IXGBE_HLREG0_JUMBOEN; | ||
2694 | #endif | ||
2695 | IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); | 3310 | IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); |
2696 | 3311 | ||
2697 | rdlen = adapter->rx_ring[0]->count * sizeof(union ixgbe_adv_rx_desc); | ||
2698 | /* disable receives while setting up the descriptors */ | ||
2699 | rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); | ||
2700 | IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); | ||
2701 | |||
2702 | /* | 3312 | /* |
2703 | * Setup the HW Rx Head and Tail Descriptor Pointers and | 3313 | * Setup the HW Rx Head and Tail Descriptor Pointers and |
2704 | * the Base and Length of the Rx Descriptor Ring | 3314 | * the Base and Length of the Rx Descriptor Ring |
2705 | */ | 3315 | */ |
2706 | for (i = 0; i < adapter->num_rx_queues; i++) { | 3316 | for (i = 0; i < adapter->num_rx_queues; i++) { |
2707 | rx_ring = adapter->rx_ring[i]; | 3317 | rx_ring = adapter->rx_ring[i]; |
2708 | rdba = rx_ring->dma; | ||
2709 | j = rx_ring->reg_idx; | ||
2710 | IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32))); | ||
2711 | IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32)); | ||
2712 | IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), rdlen); | ||
2713 | IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0); | ||
2714 | IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0); | ||
2715 | rx_ring->head = IXGBE_RDH(j); | ||
2716 | rx_ring->tail = IXGBE_RDT(j); | ||
2717 | rx_ring->rx_buf_len = rx_buf_len; | 3318 | rx_ring->rx_buf_len = rx_buf_len; |
2718 | 3319 | ||
2719 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) | 3320 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) |
2720 | rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED; | 3321 | set_ring_ps_enabled(rx_ring); |
3322 | else | ||
3323 | clear_ring_ps_enabled(rx_ring); | ||
3324 | |||
3325 | if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) | ||
3326 | set_ring_rsc_enabled(rx_ring); | ||
2721 | else | 3327 | else |
2722 | rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED; | 3328 | clear_ring_rsc_enabled(rx_ring); |
2723 | 3329 | ||
2724 | #ifdef IXGBE_FCOE | 3330 | #ifdef IXGBE_FCOE |
2725 | if (netdev->features & NETIF_F_FCOE_MTU) { | 3331 | if (netdev->features & NETIF_F_FCOE_MTU) { |
2726 | struct ixgbe_ring_feature *f; | 3332 | struct ixgbe_ring_feature *f; |
2727 | f = &adapter->ring_feature[RING_F_FCOE]; | 3333 | f = &adapter->ring_feature[RING_F_FCOE]; |
2728 | if ((i >= f->mask) && (i < f->mask + f->indices)) { | 3334 | if ((i >= f->mask) && (i < f->mask + f->indices)) { |
2729 | rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED; | 3335 | clear_ring_ps_enabled(rx_ring); |
2730 | if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE) | 3336 | if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE) |
2731 | rx_ring->rx_buf_len = | 3337 | rx_ring->rx_buf_len = |
2732 | IXGBE_FCOE_JUMBO_FRAME_SIZE; | 3338 | IXGBE_FCOE_JUMBO_FRAME_SIZE; |
3339 | } else if (!ring_is_rsc_enabled(rx_ring) && | ||
3340 | !ring_is_ps_enabled(rx_ring)) { | ||
3341 | rx_ring->rx_buf_len = | ||
3342 | IXGBE_FCOE_JUMBO_FRAME_SIZE; | ||
2733 | } | 3343 | } |
2734 | } | 3344 | } |
2735 | |||
2736 | #endif /* IXGBE_FCOE */ | 3345 | #endif /* IXGBE_FCOE */ |
2737 | ixgbe_configure_srrctl(adapter, rx_ring); | ||
2738 | } | 3346 | } |
3347 | } | ||
2739 | 3348 | ||
2740 | if (hw->mac.type == ixgbe_mac_82598EB) { | 3349 | static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) |
3350 | { | ||
3351 | struct ixgbe_hw *hw = &adapter->hw; | ||
3352 | u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); | ||
3353 | |||
3354 | switch (hw->mac.type) { | ||
3355 | case ixgbe_mac_82598EB: | ||
2741 | /* | 3356 | /* |
2742 | * For VMDq support of different descriptor types or | 3357 | * For VMDq support of different descriptor types or |
2743 | * buffer sizes through the use of multiple SRRCTL | 3358 | * buffer sizes through the use of multiple SRRCTL |
@@ -2748,110 +3363,67 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
2748 | * effects of setting this bit are only that SRRCTL must be | 3363 | * effects of setting this bit are only that SRRCTL must be |
2749 | * fully programmed [0..15] | 3364 | * fully programmed [0..15] |
2750 | */ | 3365 | */ |
2751 | rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); | ||
2752 | rdrxctl |= IXGBE_RDRXCTL_MVMEN; | 3366 | rdrxctl |= IXGBE_RDRXCTL_MVMEN; |
2753 | IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); | 3367 | break; |
3368 | case ixgbe_mac_82599EB: | ||
3369 | case ixgbe_mac_X540: | ||
3370 | /* Disable RSC for ACK packets */ | ||
3371 | IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, | ||
3372 | (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); | ||
3373 | rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; | ||
3374 | /* hardware requires some bits to be set by default */ | ||
3375 | rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX); | ||
3376 | rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; | ||
3377 | break; | ||
3378 | default: | ||
3379 | /* We should do nothing since we don't know this hardware */ | ||
3380 | return; | ||
2754 | } | 3381 | } |
2755 | 3382 | ||
2756 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { | 3383 | IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); |
2757 | u32 vt_reg_bits; | 3384 | } |
2758 | u32 reg_offset, vf_shift; | ||
2759 | u32 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); | ||
2760 | vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN | ||
2761 | | IXGBE_VT_CTL_REPLEN; | ||
2762 | vt_reg_bits |= (adapter->num_vfs << | ||
2763 | IXGBE_VT_CTL_POOL_SHIFT); | ||
2764 | IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits); | ||
2765 | IXGBE_WRITE_REG(hw, IXGBE_MRQC, 0); | ||
2766 | |||
2767 | vf_shift = adapter->num_vfs % 32; | ||
2768 | reg_offset = adapter->num_vfs / 32; | ||
2769 | IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0); | ||
2770 | IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0); | ||
2771 | IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0); | ||
2772 | IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0); | ||
2773 | /* Enable only the PF's pool for Tx/Rx */ | ||
2774 | IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift)); | ||
2775 | IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift)); | ||
2776 | IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); | ||
2777 | ixgbe_set_vmolr(hw, adapter->num_vfs, true); | ||
2778 | } | ||
2779 | |||
2780 | /* Program MRQC for the distribution of queues */ | ||
2781 | mrqc = ixgbe_setup_mrqc(adapter); | ||
2782 | |||
2783 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { | ||
2784 | /* Fill out redirection table */ | ||
2785 | for (i = 0, j = 0; i < 128; i++, j++) { | ||
2786 | if (j == adapter->ring_feature[RING_F_RSS].indices) | ||
2787 | j = 0; | ||
2788 | /* reta = 4-byte sliding window of | ||
2789 | * 0x00..(indices-1)(indices-1)00..etc. */ | ||
2790 | reta = (reta << 8) | (j * 0x11); | ||
2791 | if ((i & 3) == 3) | ||
2792 | IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); | ||
2793 | } | ||
2794 | |||
2795 | /* Fill out hash function seeds */ | ||
2796 | for (i = 0; i < 10; i++) | ||
2797 | IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]); | ||
2798 | |||
2799 | if (hw->mac.type == ixgbe_mac_82598EB) | ||
2800 | mrqc |= IXGBE_MRQC_RSSEN; | ||
2801 | /* Perform hash on these packet types */ | ||
2802 | mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 | ||
2803 | | IXGBE_MRQC_RSS_FIELD_IPV4_TCP | ||
2804 | | IXGBE_MRQC_RSS_FIELD_IPV6 | ||
2805 | | IXGBE_MRQC_RSS_FIELD_IPV6_TCP; | ||
2806 | } | ||
2807 | IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); | ||
2808 | 3385 | ||
2809 | if (adapter->num_vfs) { | 3386 | /** |
2810 | u32 reg; | 3387 | * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset |
3388 | * @adapter: board private structure | ||
3389 | * | ||
3390 | * Configure the Rx unit of the MAC after a reset. | ||
3391 | **/ | ||
3392 | static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | ||
3393 | { | ||
3394 | struct ixgbe_hw *hw = &adapter->hw; | ||
3395 | int i; | ||
3396 | u32 rxctrl; | ||
2811 | 3397 | ||
2812 | /* Map PF MAC address in RAR Entry 0 to first pool | 3398 | /* disable receives while setting up the descriptors */ |
2813 | * following VFs */ | 3399 | rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); |
2814 | hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs); | 3400 | IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); |
2815 | 3401 | ||
2816 | /* Set up VF register offsets for selected VT Mode, i.e. | 3402 | ixgbe_setup_psrtype(adapter); |
2817 | * 64 VFs for SR-IOV */ | 3403 | ixgbe_setup_rdrxctl(adapter); |
2818 | reg = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); | ||
2819 | reg |= IXGBE_GCR_EXT_SRIOV; | ||
2820 | IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, reg); | ||
2821 | } | ||
2822 | 3404 | ||
2823 | rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); | 3405 | /* Program registers for the distribution of queues */ |
3406 | ixgbe_setup_mrqc(adapter); | ||
2824 | 3407 | ||
2825 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED || | 3408 | ixgbe_set_uta(adapter); |
2826 | adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED) { | ||
2827 | /* Disable indicating checksum in descriptor, enables | ||
2828 | * RSS hash */ | ||
2829 | rxcsum |= IXGBE_RXCSUM_PCSD; | ||
2830 | } | ||
2831 | if (!(rxcsum & IXGBE_RXCSUM_PCSD)) { | ||
2832 | /* Enable IPv4 payload checksum for UDP fragments | ||
2833 | * if PCSD is not set */ | ||
2834 | rxcsum |= IXGBE_RXCSUM_IPPCSE; | ||
2835 | } | ||
2836 | 3409 | ||
2837 | IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); | 3410 | /* set_rx_buffer_len must be called before ring initialization */ |
3411 | ixgbe_set_rx_buffer_len(adapter); | ||
2838 | 3412 | ||
2839 | if (hw->mac.type == ixgbe_mac_82599EB) { | 3413 | /* |
2840 | rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); | 3414 | * Setup the HW Rx Head and Tail Descriptor Pointers and |
2841 | rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; | 3415 | * the Base and Length of the Rx Descriptor Ring |
2842 | rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; | 3416 | */ |
2843 | IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); | 3417 | for (i = 0; i < adapter->num_rx_queues; i++) |
2844 | } | 3418 | ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]); |
2845 | 3419 | ||
2846 | if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { | 3420 | /* disable drop enable for 82598 parts */ |
2847 | /* Enable 82599 HW-RSC */ | 3421 | if (hw->mac.type == ixgbe_mac_82598EB) |
2848 | for (i = 0; i < adapter->num_rx_queues; i++) | 3422 | rxctrl |= IXGBE_RXCTRL_DMBYPS; |
2849 | ixgbe_configure_rscctl(adapter, i); | ||
2850 | 3423 | ||
2851 | /* Disable RSC for ACK packets */ | 3424 | /* enable all receives */ |
2852 | IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, | 3425 | rxctrl |= IXGBE_RXCTRL_RXEN; |
2853 | (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); | 3426 | hw->mac.ops.enable_rx_dma(hw, rxctrl); |
2854 | } | ||
2855 | } | 3427 | } |
2856 | 3428 | ||
2857 | static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) | 3429 | static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) |
@@ -2862,6 +3434,7 @@ static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) | |||
2862 | 3434 | ||
2863 | /* add VID to filter table */ | 3435 | /* add VID to filter table */ |
2864 | hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true); | 3436 | hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true); |
3437 | set_bit(vid, adapter->active_vlans); | ||
2865 | } | 3438 | } |
2866 | 3439 | ||
2867 | static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | 3440 | static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) |
@@ -2870,16 +3443,9 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | |||
2870 | struct ixgbe_hw *hw = &adapter->hw; | 3443 | struct ixgbe_hw *hw = &adapter->hw; |
2871 | int pool_ndx = adapter->num_vfs; | 3444 | int pool_ndx = adapter->num_vfs; |
2872 | 3445 | ||
2873 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | ||
2874 | ixgbe_irq_disable(adapter); | ||
2875 | |||
2876 | vlan_group_set_device(adapter->vlgrp, vid, NULL); | ||
2877 | |||
2878 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | ||
2879 | ixgbe_irq_enable(adapter); | ||
2880 | |||
2881 | /* remove VID from filter table */ | 3446 | /* remove VID from filter table */ |
2882 | hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false); | 3447 | hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false); |
3448 | clear_bit(vid, adapter->active_vlans); | ||
2883 | } | 3449 | } |
2884 | 3450 | ||
2885 | /** | 3451 | /** |
@@ -2889,27 +3455,46 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | |||
2889 | static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter) | 3455 | static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter) |
2890 | { | 3456 | { |
2891 | struct ixgbe_hw *hw = &adapter->hw; | 3457 | struct ixgbe_hw *hw = &adapter->hw; |
2892 | u32 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); | 3458 | u32 vlnctrl; |
3459 | |||
3460 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); | ||
3461 | vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); | ||
3462 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); | ||
3463 | } | ||
3464 | |||
3465 | /** | ||
3466 | * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering | ||
3467 | * @adapter: driver data | ||
3468 | */ | ||
3469 | static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter) | ||
3470 | { | ||
3471 | struct ixgbe_hw *hw = &adapter->hw; | ||
3472 | u32 vlnctrl; | ||
3473 | |||
3474 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); | ||
3475 | vlnctrl |= IXGBE_VLNCTRL_VFE; | ||
3476 | vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; | ||
3477 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); | ||
3478 | } | ||
3479 | |||
3480 | /** | ||
3481 | * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping | ||
3482 | * @adapter: driver data | ||
3483 | */ | ||
3484 | static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter) | ||
3485 | { | ||
3486 | struct ixgbe_hw *hw = &adapter->hw; | ||
3487 | u32 vlnctrl; | ||
2893 | int i, j; | 3488 | int i, j; |
2894 | 3489 | ||
2895 | switch (hw->mac.type) { | 3490 | switch (hw->mac.type) { |
2896 | case ixgbe_mac_82598EB: | 3491 | case ixgbe_mac_82598EB: |
2897 | vlnctrl &= ~IXGBE_VLNCTRL_VFE; | 3492 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); |
2898 | #ifdef CONFIG_IXGBE_DCB | 3493 | vlnctrl &= ~IXGBE_VLNCTRL_VME; |
2899 | if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) | ||
2900 | vlnctrl &= ~IXGBE_VLNCTRL_VME; | ||
2901 | #endif | ||
2902 | vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; | ||
2903 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); | 3494 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); |
2904 | break; | 3495 | break; |
2905 | case ixgbe_mac_82599EB: | 3496 | case ixgbe_mac_82599EB: |
2906 | vlnctrl &= ~IXGBE_VLNCTRL_VFE; | 3497 | case ixgbe_mac_X540: |
2907 | vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; | ||
2908 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); | ||
2909 | #ifdef CONFIG_IXGBE_DCB | ||
2910 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) | ||
2911 | break; | ||
2912 | #endif | ||
2913 | for (i = 0; i < adapter->num_rx_queues; i++) { | 3498 | for (i = 0; i < adapter->num_rx_queues; i++) { |
2914 | j = adapter->rx_ring[i]->reg_idx; | 3499 | j = adapter->rx_ring[i]->reg_idx; |
2915 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); | 3500 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); |
@@ -2923,25 +3508,23 @@ static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter) | |||
2923 | } | 3508 | } |
2924 | 3509 | ||
2925 | /** | 3510 | /** |
2926 | * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering | 3511 | * ixgbe_vlan_strip_enable - helper to enable hw vlan stripping |
2927 | * @adapter: driver data | 3512 | * @adapter: driver data |
2928 | */ | 3513 | */ |
2929 | static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter) | 3514 | static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) |
2930 | { | 3515 | { |
2931 | struct ixgbe_hw *hw = &adapter->hw; | 3516 | struct ixgbe_hw *hw = &adapter->hw; |
2932 | u32 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); | 3517 | u32 vlnctrl; |
2933 | int i, j; | 3518 | int i, j; |
2934 | 3519 | ||
2935 | switch (hw->mac.type) { | 3520 | switch (hw->mac.type) { |
2936 | case ixgbe_mac_82598EB: | 3521 | case ixgbe_mac_82598EB: |
2937 | vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE; | 3522 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); |
2938 | vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; | 3523 | vlnctrl |= IXGBE_VLNCTRL_VME; |
2939 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); | 3524 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); |
2940 | break; | 3525 | break; |
2941 | case ixgbe_mac_82599EB: | 3526 | case ixgbe_mac_82599EB: |
2942 | vlnctrl |= IXGBE_VLNCTRL_VFE; | 3527 | case ixgbe_mac_X540: |
2943 | vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; | ||
2944 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); | ||
2945 | for (i = 0; i < adapter->num_rx_queues; i++) { | 3528 | for (i = 0; i < adapter->num_rx_queues; i++) { |
2946 | j = adapter->rx_ring[i]->reg_idx; | 3529 | j = adapter->rx_ring[i]->reg_idx; |
2947 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); | 3530 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); |
@@ -2954,40 +3537,14 @@ static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter) | |||
2954 | } | 3537 | } |
2955 | } | 3538 | } |
2956 | 3539 | ||
2957 | static void ixgbe_vlan_rx_register(struct net_device *netdev, | ||
2958 | struct vlan_group *grp) | ||
2959 | { | ||
2960 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
2961 | |||
2962 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | ||
2963 | ixgbe_irq_disable(adapter); | ||
2964 | adapter->vlgrp = grp; | ||
2965 | |||
2966 | /* | ||
2967 | * For a DCB driver, always enable VLAN tag stripping so we can | ||
2968 | * still receive traffic from a DCB-enabled host even if we're | ||
2969 | * not in DCB mode. | ||
2970 | */ | ||
2971 | ixgbe_vlan_filter_enable(adapter); | ||
2972 | |||
2973 | ixgbe_vlan_rx_add_vid(netdev, 0); | ||
2974 | |||
2975 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | ||
2976 | ixgbe_irq_enable(adapter); | ||
2977 | } | ||
2978 | |||
2979 | static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) | 3540 | static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) |
2980 | { | 3541 | { |
2981 | ixgbe_vlan_rx_register(adapter->netdev, adapter->vlgrp); | 3542 | u16 vid; |
2982 | 3543 | ||
2983 | if (adapter->vlgrp) { | 3544 | ixgbe_vlan_rx_add_vid(adapter->netdev, 0); |
2984 | u16 vid; | 3545 | |
2985 | for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { | 3546 | for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) |
2986 | if (!vlan_group_get_device(adapter->vlgrp, vid)) | 3547 | ixgbe_vlan_rx_add_vid(adapter->netdev, vid); |
2987 | continue; | ||
2988 | ixgbe_vlan_rx_add_vid(adapter->netdev, vid); | ||
2989 | } | ||
2990 | } | ||
2991 | } | 3548 | } |
2992 | 3549 | ||
2993 | /** | 3550 | /** |
@@ -3004,7 +3561,7 @@ static int ixgbe_write_uc_addr_list(struct net_device *netdev) | |||
3004 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 3561 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
3005 | struct ixgbe_hw *hw = &adapter->hw; | 3562 | struct ixgbe_hw *hw = &adapter->hw; |
3006 | unsigned int vfn = adapter->num_vfs; | 3563 | unsigned int vfn = adapter->num_vfs; |
3007 | unsigned int rar_entries = hw->mac.num_rar_entries - (vfn + 1); | 3564 | unsigned int rar_entries = IXGBE_MAX_PF_MACVLANS; |
3008 | int count = 0; | 3565 | int count = 0; |
3009 | 3566 | ||
3010 | /* return ENOMEM indicating insufficient memory for addresses */ | 3567 | /* return ENOMEM indicating insufficient memory for addresses */ |
@@ -3052,6 +3609,11 @@ void ixgbe_set_rx_mode(struct net_device *netdev) | |||
3052 | 3609 | ||
3053 | fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); | 3610 | fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); |
3054 | 3611 | ||
3612 | /* set all bits that we expect to always be set */ | ||
3613 | fctrl |= IXGBE_FCTRL_BAM; | ||
3614 | fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */ | ||
3615 | fctrl |= IXGBE_FCTRL_PMCF; | ||
3616 | |||
3055 | /* clear the bits we are changing the status of */ | 3617 | /* clear the bits we are changing the status of */ |
3056 | fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); | 3618 | fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); |
3057 | 3619 | ||
@@ -3068,7 +3630,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev) | |||
3068 | } else { | 3630 | } else { |
3069 | /* | 3631 | /* |
3070 | * Write addresses to the MTA, if the attempt fails | 3632 | * Write addresses to the MTA, if the attempt fails |
3071 | * then we should just turn on promiscous mode so | 3633 | * then we should just turn on promiscuous mode so |
3072 | * that we can at least receive multicast traffic | 3634 | * that we can at least receive multicast traffic |
3073 | */ | 3635 | */ |
3074 | hw->mac.ops.update_mc_addr_list(hw, netdev); | 3636 | hw->mac.ops.update_mc_addr_list(hw, netdev); |
@@ -3079,7 +3641,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev) | |||
3079 | /* | 3641 | /* |
3080 | * Write addresses to available RAR registers, if there is not | 3642 | * Write addresses to available RAR registers, if there is not |
3081 | * sufficient space to store all the addresses then enable | 3643 | * sufficient space to store all the addresses then enable |
3082 | * unicast promiscous mode | 3644 | * unicast promiscuous mode |
3083 | */ | 3645 | */ |
3084 | count = ixgbe_write_uc_addr_list(netdev); | 3646 | count = ixgbe_write_uc_addr_list(netdev); |
3085 | if (count < 0) { | 3647 | if (count < 0) { |
@@ -3097,6 +3659,11 @@ void ixgbe_set_rx_mode(struct net_device *netdev) | |||
3097 | } | 3659 | } |
3098 | 3660 | ||
3099 | IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); | 3661 | IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); |
3662 | |||
3663 | if (netdev->features & NETIF_F_HW_VLAN_RX) | ||
3664 | ixgbe_vlan_strip_enable(adapter); | ||
3665 | else | ||
3666 | ixgbe_vlan_strip_disable(adapter); | ||
3100 | } | 3667 | } |
3101 | 3668 | ||
3102 | static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) | 3669 | static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) |
@@ -3154,27 +3721,61 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) | |||
3154 | static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) | 3721 | static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) |
3155 | { | 3722 | { |
3156 | struct ixgbe_hw *hw = &adapter->hw; | 3723 | struct ixgbe_hw *hw = &adapter->hw; |
3157 | u32 txdctl; | 3724 | int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; |
3158 | int i, j; | ||
3159 | 3725 | ||
3160 | ixgbe_dcb_check_config(&adapter->dcb_cfg); | 3726 | if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) { |
3161 | ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG); | 3727 | if (hw->mac.type == ixgbe_mac_82598EB) |
3162 | ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG); | 3728 | netif_set_gso_max_size(adapter->netdev, 65536); |
3729 | return; | ||
3730 | } | ||
3731 | |||
3732 | if (hw->mac.type == ixgbe_mac_82598EB) | ||
3733 | netif_set_gso_max_size(adapter->netdev, 32768); | ||
3163 | 3734 | ||
3164 | /* reconfigure the hardware */ | ||
3165 | ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg); | ||
3166 | 3735 | ||
3167 | for (i = 0; i < adapter->num_tx_queues; i++) { | ||
3168 | j = adapter->tx_ring[i]->reg_idx; | ||
3169 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); | ||
3170 | /* PThresh workaround for Tx hang with DFP enabled. */ | ||
3171 | txdctl |= 32; | ||
3172 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); | ||
3173 | } | ||
3174 | /* Enable VLAN tag insert/strip */ | 3736 | /* Enable VLAN tag insert/strip */ |
3175 | ixgbe_vlan_filter_enable(adapter); | 3737 | adapter->netdev->features |= NETIF_F_HW_VLAN_RX; |
3176 | 3738 | ||
3177 | hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); | 3739 | hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); |
3740 | |||
3741 | /* reconfigure the hardware */ | ||
3742 | if (adapter->dcbx_cap & (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE)) { | ||
3743 | #ifdef CONFIG_FCOE | ||
3744 | if (adapter->netdev->features & NETIF_F_FCOE_MTU) | ||
3745 | max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); | ||
3746 | #endif | ||
3747 | ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, | ||
3748 | DCB_TX_CONFIG); | ||
3749 | ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, | ||
3750 | DCB_RX_CONFIG); | ||
3751 | ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg); | ||
3752 | } else { | ||
3753 | struct net_device *dev = adapter->netdev; | ||
3754 | |||
3755 | if (adapter->ixgbe_ieee_ets) | ||
3756 | dev->dcbnl_ops->ieee_setets(dev, | ||
3757 | adapter->ixgbe_ieee_ets); | ||
3758 | if (adapter->ixgbe_ieee_pfc) | ||
3759 | dev->dcbnl_ops->ieee_setpfc(dev, | ||
3760 | adapter->ixgbe_ieee_pfc); | ||
3761 | } | ||
3762 | |||
3763 | /* Enable RSS Hash per TC */ | ||
3764 | if (hw->mac.type != ixgbe_mac_82598EB) { | ||
3765 | int i; | ||
3766 | u32 reg = 0; | ||
3767 | |||
3768 | for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { | ||
3769 | u8 msb = 0; | ||
3770 | u8 cnt = adapter->netdev->tc_to_txq[i].count; | ||
3771 | |||
3772 | while (cnt >>= 1) | ||
3773 | msb++; | ||
3774 | |||
3775 | reg |= msb << IXGBE_RQTC_SHIFT_TC(i); | ||
3776 | } | ||
3777 | IXGBE_WRITE_REG(hw, IXGBE_RQTC, reg); | ||
3778 | } | ||
3178 | } | 3779 | } |
3179 | 3780 | ||
3180 | #endif | 3781 | #endif |
@@ -3184,23 +3785,13 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) | |||
3184 | struct ixgbe_hw *hw = &adapter->hw; | 3785 | struct ixgbe_hw *hw = &adapter->hw; |
3185 | int i; | 3786 | int i; |
3186 | 3787 | ||
3187 | ixgbe_set_rx_mode(netdev); | ||
3188 | |||
3189 | ixgbe_restore_vlan(adapter); | ||
3190 | #ifdef CONFIG_IXGBE_DCB | 3788 | #ifdef CONFIG_IXGBE_DCB |
3191 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | 3789 | ixgbe_configure_dcb(adapter); |
3192 | if (hw->mac.type == ixgbe_mac_82598EB) | ||
3193 | netif_set_gso_max_size(netdev, 32768); | ||
3194 | else | ||
3195 | netif_set_gso_max_size(netdev, 65536); | ||
3196 | ixgbe_configure_dcb(adapter); | ||
3197 | } else { | ||
3198 | netif_set_gso_max_size(netdev, 65536); | ||
3199 | } | ||
3200 | #else | ||
3201 | netif_set_gso_max_size(netdev, 65536); | ||
3202 | #endif | 3790 | #endif |
3203 | 3791 | ||
3792 | ixgbe_set_rx_mode(netdev); | ||
3793 | ixgbe_restore_vlan(adapter); | ||
3794 | |||
3204 | #ifdef IXGBE_FCOE | 3795 | #ifdef IXGBE_FCOE |
3205 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) | 3796 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) |
3206 | ixgbe_configure_fcoe(adapter); | 3797 | ixgbe_configure_fcoe(adapter); |
@@ -3209,17 +3800,15 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) | |||
3209 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { | 3800 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { |
3210 | for (i = 0; i < adapter->num_tx_queues; i++) | 3801 | for (i = 0; i < adapter->num_tx_queues; i++) |
3211 | adapter->tx_ring[i]->atr_sample_rate = | 3802 | adapter->tx_ring[i]->atr_sample_rate = |
3212 | adapter->atr_sample_rate; | 3803 | adapter->atr_sample_rate; |
3213 | ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc); | 3804 | ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc); |
3214 | } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { | 3805 | } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { |
3215 | ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc); | 3806 | ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc); |
3216 | } | 3807 | } |
3808 | ixgbe_configure_virtualization(adapter); | ||
3217 | 3809 | ||
3218 | ixgbe_configure_tx(adapter); | 3810 | ixgbe_configure_tx(adapter); |
3219 | ixgbe_configure_rx(adapter); | 3811 | ixgbe_configure_rx(adapter); |
3220 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
3221 | ixgbe_alloc_rx_buffers(adapter, adapter->rx_ring[i], | ||
3222 | (adapter->rx_ring[i]->count - 1)); | ||
3223 | } | 3812 | } |
3224 | 3813 | ||
3225 | static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) | 3814 | static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) |
@@ -3245,30 +3834,16 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) | |||
3245 | **/ | 3834 | **/ |
3246 | static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) | 3835 | static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) |
3247 | { | 3836 | { |
3248 | struct ixgbe_hw *hw = &adapter->hw; | 3837 | /* |
3838 | * We are assuming the worst case scenerio here, and that | ||
3839 | * is that an SFP was inserted/removed after the reset | ||
3840 | * but before SFP detection was enabled. As such the best | ||
3841 | * solution is to just start searching as soon as we start | ||
3842 | */ | ||
3843 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) | ||
3844 | adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; | ||
3249 | 3845 | ||
3250 | if (hw->phy.multispeed_fiber) { | 3846 | adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; |
3251 | /* | ||
3252 | * In multispeed fiber setups, the device may not have | ||
3253 | * had a physical connection when the driver loaded. | ||
3254 | * If that's the case, the initial link configuration | ||
3255 | * couldn't get the MAC into 10G or 1G mode, so we'll | ||
3256 | * never have a link status change interrupt fire. | ||
3257 | * We need to try and force an autonegotiation | ||
3258 | * session, then bring up link. | ||
3259 | */ | ||
3260 | hw->mac.ops.setup_sfp(hw); | ||
3261 | if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) | ||
3262 | schedule_work(&adapter->multispeed_fiber_task); | ||
3263 | } else { | ||
3264 | /* | ||
3265 | * Direct Attach Cu and non-multispeed fiber modules | ||
3266 | * still need to be configured properly prior to | ||
3267 | * attempting link. | ||
3268 | */ | ||
3269 | if (!(adapter->flags & IXGBE_FLAG_IN_SFP_MOD_TASK)) | ||
3270 | schedule_work(&adapter->sfp_config_module_task); | ||
3271 | } | ||
3272 | } | 3847 | } |
3273 | 3848 | ||
3274 | /** | 3849 | /** |
@@ -3289,8 +3864,10 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw) | |||
3289 | if (ret) | 3864 | if (ret) |
3290 | goto link_cfg_out; | 3865 | goto link_cfg_out; |
3291 | 3866 | ||
3292 | if (hw->mac.ops.get_link_capabilities) | 3867 | autoneg = hw->phy.autoneg_advertised; |
3293 | ret = hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation); | 3868 | if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) |
3869 | ret = hw->mac.ops.get_link_capabilities(hw, &autoneg, | ||
3870 | &negotiation); | ||
3294 | if (ret) | 3871 | if (ret) |
3295 | goto link_cfg_out; | 3872 | goto link_cfg_out; |
3296 | 3873 | ||
@@ -3300,62 +3877,15 @@ link_cfg_out: | |||
3300 | return ret; | 3877 | return ret; |
3301 | } | 3878 | } |
3302 | 3879 | ||
3303 | #define IXGBE_MAX_RX_DESC_POLL 10 | 3880 | static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) |
3304 | static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, | ||
3305 | int rxr) | ||
3306 | { | ||
3307 | int j = adapter->rx_ring[rxr]->reg_idx; | ||
3308 | int k; | ||
3309 | |||
3310 | for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) { | ||
3311 | if (IXGBE_READ_REG(&adapter->hw, | ||
3312 | IXGBE_RXDCTL(j)) & IXGBE_RXDCTL_ENABLE) | ||
3313 | break; | ||
3314 | else | ||
3315 | msleep(1); | ||
3316 | } | ||
3317 | if (k >= IXGBE_MAX_RX_DESC_POLL) { | ||
3318 | e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within " | ||
3319 | "the polling period\n", rxr); | ||
3320 | } | ||
3321 | ixgbe_release_rx_desc(&adapter->hw, adapter->rx_ring[rxr], | ||
3322 | (adapter->rx_ring[rxr]->count - 1)); | ||
3323 | } | ||
3324 | |||
3325 | static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | ||
3326 | { | 3881 | { |
3327 | struct net_device *netdev = adapter->netdev; | ||
3328 | struct ixgbe_hw *hw = &adapter->hw; | 3882 | struct ixgbe_hw *hw = &adapter->hw; |
3329 | int i, j = 0; | 3883 | u32 gpie = 0; |
3330 | int num_rx_rings = adapter->num_rx_queues; | ||
3331 | int err; | ||
3332 | int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; | ||
3333 | u32 txdctl, rxdctl, mhadd; | ||
3334 | u32 dmatxctl; | ||
3335 | u32 gpie; | ||
3336 | u32 ctrl_ext; | ||
3337 | |||
3338 | ixgbe_get_hw_control(adapter); | ||
3339 | |||
3340 | if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) || | ||
3341 | (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) { | ||
3342 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | ||
3343 | gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME | | ||
3344 | IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD); | ||
3345 | } else { | ||
3346 | /* MSI only */ | ||
3347 | gpie = 0; | ||
3348 | } | ||
3349 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { | ||
3350 | gpie &= ~IXGBE_GPIE_VTMODE_MASK; | ||
3351 | gpie |= IXGBE_GPIE_VTMODE_64; | ||
3352 | } | ||
3353 | /* XXX: to interrupt immediately for EICS writes, enable this */ | ||
3354 | /* gpie |= IXGBE_GPIE_EIMEN; */ | ||
3355 | IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); | ||
3356 | } | ||
3357 | 3884 | ||
3358 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | 3885 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { |
3886 | gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT | | ||
3887 | IXGBE_GPIE_OCD; | ||
3888 | gpie |= IXGBE_GPIE_EIAME; | ||
3359 | /* | 3889 | /* |
3360 | * use EIAM to auto-mask when MSI-X interrupt is asserted | 3890 | * use EIAM to auto-mask when MSI-X interrupt is asserted |
3361 | * this saves a register write for every interrupt | 3891 | * this saves a register write for every interrupt |
@@ -3364,8 +3894,9 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | |||
3364 | case ixgbe_mac_82598EB: | 3894 | case ixgbe_mac_82598EB: |
3365 | IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); | 3895 | IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); |
3366 | break; | 3896 | break; |
3367 | default: | ||
3368 | case ixgbe_mac_82599EB: | 3897 | case ixgbe_mac_82599EB: |
3898 | case ixgbe_mac_X540: | ||
3899 | default: | ||
3369 | IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); | 3900 | IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); |
3370 | IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); | 3901 | IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); |
3371 | break; | 3902 | break; |
@@ -3376,115 +3907,61 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | |||
3376 | IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); | 3907 | IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); |
3377 | } | 3908 | } |
3378 | 3909 | ||
3379 | /* Enable Thermal over heat sensor interrupt */ | 3910 | /* XXX: to interrupt immediately for EICS writes, enable this */ |
3380 | if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) { | 3911 | /* gpie |= IXGBE_GPIE_EIMEN; */ |
3381 | gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); | 3912 | |
3382 | gpie |= IXGBE_SDP0_GPIEN; | 3913 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { |
3383 | IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); | 3914 | gpie &= ~IXGBE_GPIE_VTMODE_MASK; |
3915 | gpie |= IXGBE_GPIE_VTMODE_64; | ||
3384 | } | 3916 | } |
3385 | 3917 | ||
3386 | /* Enable fan failure interrupt if media type is copper */ | 3918 | /* Enable fan failure interrupt */ |
3387 | if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { | 3919 | if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) |
3388 | gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); | ||
3389 | gpie |= IXGBE_SDP1_GPIEN; | 3920 | gpie |= IXGBE_SDP1_GPIEN; |
3390 | IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); | ||
3391 | } | ||
3392 | 3921 | ||
3393 | if (hw->mac.type == ixgbe_mac_82599EB) { | 3922 | if (hw->mac.type == ixgbe_mac_82599EB) { |
3394 | gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); | ||
3395 | gpie |= IXGBE_SDP1_GPIEN; | 3923 | gpie |= IXGBE_SDP1_GPIEN; |
3396 | gpie |= IXGBE_SDP2_GPIEN; | 3924 | gpie |= IXGBE_SDP2_GPIEN; |
3397 | IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); | ||
3398 | } | ||
3399 | |||
3400 | #ifdef IXGBE_FCOE | ||
3401 | /* adjust max frame to be able to do baby jumbo for FCoE */ | ||
3402 | if ((netdev->features & NETIF_F_FCOE_MTU) && | ||
3403 | (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE)) | ||
3404 | max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE; | ||
3405 | |||
3406 | #endif /* IXGBE_FCOE */ | ||
3407 | mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); | ||
3408 | if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) { | ||
3409 | mhadd &= ~IXGBE_MHADD_MFS_MASK; | ||
3410 | mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT; | ||
3411 | |||
3412 | IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); | ||
3413 | } | 3925 | } |
3414 | 3926 | ||
3415 | for (i = 0; i < adapter->num_tx_queues; i++) { | 3927 | IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); |
3416 | j = adapter->tx_ring[i]->reg_idx; | 3928 | } |
3417 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); | ||
3418 | if (adapter->rx_itr_setting == 0) { | ||
3419 | /* cannot set wthresh when itr==0 */ | ||
3420 | txdctl &= ~0x007F0000; | ||
3421 | } else { | ||
3422 | /* enable WTHRESH=8 descriptors, to encourage burst writeback */ | ||
3423 | txdctl |= (8 << 16); | ||
3424 | } | ||
3425 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); | ||
3426 | } | ||
3427 | 3929 | ||
3428 | if (hw->mac.type == ixgbe_mac_82599EB) { | 3930 | static int ixgbe_up_complete(struct ixgbe_adapter *adapter) |
3429 | /* DMATXCTL.EN must be set after all Tx queue config is done */ | 3931 | { |
3430 | dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); | 3932 | struct ixgbe_hw *hw = &adapter->hw; |
3431 | dmatxctl |= IXGBE_DMATXCTL_TE; | 3933 | int err; |
3432 | IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); | 3934 | u32 ctrl_ext; |
3433 | } | ||
3434 | for (i = 0; i < adapter->num_tx_queues; i++) { | ||
3435 | j = adapter->tx_ring[i]->reg_idx; | ||
3436 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); | ||
3437 | txdctl |= IXGBE_TXDCTL_ENABLE; | ||
3438 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); | ||
3439 | if (hw->mac.type == ixgbe_mac_82599EB) { | ||
3440 | int wait_loop = 10; | ||
3441 | /* poll for Tx Enable ready */ | ||
3442 | do { | ||
3443 | msleep(1); | ||
3444 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); | ||
3445 | } while (--wait_loop && | ||
3446 | !(txdctl & IXGBE_TXDCTL_ENABLE)); | ||
3447 | if (!wait_loop) | ||
3448 | e_err(drv, "Could not enable Tx Queue %d\n", j); | ||
3449 | } | ||
3450 | } | ||
3451 | 3935 | ||
3452 | for (i = 0; i < num_rx_rings; i++) { | 3936 | ixgbe_get_hw_control(adapter); |
3453 | j = adapter->rx_ring[i]->reg_idx; | 3937 | ixgbe_setup_gpie(adapter); |
3454 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); | ||
3455 | /* enable PTHRESH=32 descriptors (half the internal cache) | ||
3456 | * and HTHRESH=0 descriptors (to minimize latency on fetch), | ||
3457 | * this also removes a pesky rx_no_buffer_count increment */ | ||
3458 | rxdctl |= 0x0020; | ||
3459 | rxdctl |= IXGBE_RXDCTL_ENABLE; | ||
3460 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), rxdctl); | ||
3461 | if (hw->mac.type == ixgbe_mac_82599EB) | ||
3462 | ixgbe_rx_desc_queue_enable(adapter, i); | ||
3463 | } | ||
3464 | /* enable all receives */ | ||
3465 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); | ||
3466 | if (hw->mac.type == ixgbe_mac_82598EB) | ||
3467 | rxdctl |= (IXGBE_RXCTRL_DMBYPS | IXGBE_RXCTRL_RXEN); | ||
3468 | else | ||
3469 | rxdctl |= IXGBE_RXCTRL_RXEN; | ||
3470 | hw->mac.ops.enable_rx_dma(hw, rxdctl); | ||
3471 | 3938 | ||
3472 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) | 3939 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) |
3473 | ixgbe_configure_msix(adapter); | 3940 | ixgbe_configure_msix(adapter); |
3474 | else | 3941 | else |
3475 | ixgbe_configure_msi_and_legacy(adapter); | 3942 | ixgbe_configure_msi_and_legacy(adapter); |
3476 | 3943 | ||
3477 | /* enable the optics */ | 3944 | /* enable the optics for both mult-speed fiber and 82599 SFP+ fiber */ |
3478 | if (hw->phy.multispeed_fiber) | 3945 | if (hw->mac.ops.enable_tx_laser && |
3946 | ((hw->phy.multispeed_fiber) || | ||
3947 | ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) && | ||
3948 | (hw->mac.type == ixgbe_mac_82599EB)))) | ||
3479 | hw->mac.ops.enable_tx_laser(hw); | 3949 | hw->mac.ops.enable_tx_laser(hw); |
3480 | 3950 | ||
3481 | clear_bit(__IXGBE_DOWN, &adapter->state); | 3951 | clear_bit(__IXGBE_DOWN, &adapter->state); |
3482 | ixgbe_napi_enable_all(adapter); | 3952 | ixgbe_napi_enable_all(adapter); |
3483 | 3953 | ||
3954 | if (ixgbe_is_sfp(hw)) { | ||
3955 | ixgbe_sfp_link_config(adapter); | ||
3956 | } else { | ||
3957 | err = ixgbe_non_sfp_link_config(hw); | ||
3958 | if (err) | ||
3959 | e_err(probe, "link_config FAILED %d\n", err); | ||
3960 | } | ||
3961 | |||
3484 | /* clear any pending interrupts, may auto mask */ | 3962 | /* clear any pending interrupts, may auto mask */ |
3485 | IXGBE_READ_REG(hw, IXGBE_EICR); | 3963 | IXGBE_READ_REG(hw, IXGBE_EICR); |
3486 | 3964 | ixgbe_irq_enable(adapter, true, true); | |
3487 | ixgbe_irq_enable(adapter); | ||
3488 | 3965 | ||
3489 | /* | 3966 | /* |
3490 | * If this adapter has a fan, check to see if we had a failure | 3967 | * If this adapter has a fan, check to see if we had a failure |
@@ -3496,47 +3973,14 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | |||
3496 | e_crit(drv, "Fan has stopped, replace the adapter\n"); | 3973 | e_crit(drv, "Fan has stopped, replace the adapter\n"); |
3497 | } | 3974 | } |
3498 | 3975 | ||
3499 | /* | ||
3500 | * For hot-pluggable SFP+ devices, a new SFP+ module may have | ||
3501 | * arrived before interrupts were enabled but after probe. Such | ||
3502 | * devices wouldn't have their type identified yet. We need to | ||
3503 | * kick off the SFP+ module setup first, then try to bring up link. | ||
3504 | * If we're not hot-pluggable SFP+, we just need to configure link | ||
3505 | * and bring it up. | ||
3506 | */ | ||
3507 | if (hw->phy.type == ixgbe_phy_unknown) { | ||
3508 | err = hw->phy.ops.identify(hw); | ||
3509 | if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { | ||
3510 | /* | ||
3511 | * Take the device down and schedule the sfp tasklet | ||
3512 | * which will unregister_netdev and log it. | ||
3513 | */ | ||
3514 | ixgbe_down(adapter); | ||
3515 | schedule_work(&adapter->sfp_config_module_task); | ||
3516 | return err; | ||
3517 | } | ||
3518 | } | ||
3519 | |||
3520 | if (ixgbe_is_sfp(hw)) { | ||
3521 | ixgbe_sfp_link_config(adapter); | ||
3522 | } else { | ||
3523 | err = ixgbe_non_sfp_link_config(hw); | ||
3524 | if (err) | ||
3525 | e_err(probe, "link_config FAILED %d\n", err); | ||
3526 | } | ||
3527 | |||
3528 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
3529 | set_bit(__IXGBE_FDIR_INIT_DONE, | ||
3530 | &(adapter->tx_ring[i]->reinit_state)); | ||
3531 | |||
3532 | /* enable transmits */ | 3976 | /* enable transmits */ |
3533 | netif_tx_start_all_queues(netdev); | 3977 | netif_tx_start_all_queues(adapter->netdev); |
3534 | 3978 | ||
3535 | /* bring the link up in the watchdog, this could race with our first | 3979 | /* bring the link up in the watchdog, this could race with our first |
3536 | * link up interrupt but shouldn't be a problem */ | 3980 | * link up interrupt but shouldn't be a problem */ |
3537 | adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; | 3981 | adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; |
3538 | adapter->link_check_timeout = jiffies; | 3982 | adapter->link_check_timeout = jiffies; |
3539 | mod_timer(&adapter->watchdog_timer, jiffies); | 3983 | mod_timer(&adapter->service_timer, jiffies); |
3540 | 3984 | ||
3541 | /* Set PF Reset Done bit so PF/VF Mail Ops can work */ | 3985 | /* Set PF Reset Done bit so PF/VF Mail Ops can work */ |
3542 | ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); | 3986 | ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); |
@@ -3549,8 +3993,11 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | |||
3549 | void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) | 3993 | void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) |
3550 | { | 3994 | { |
3551 | WARN_ON(in_interrupt()); | 3995 | WARN_ON(in_interrupt()); |
3996 | /* put off any impending NetWatchDogTimeout */ | ||
3997 | adapter->netdev->trans_start = jiffies; | ||
3998 | |||
3552 | while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) | 3999 | while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) |
3553 | msleep(1); | 4000 | usleep_range(1000, 2000); |
3554 | ixgbe_down(adapter); | 4001 | ixgbe_down(adapter); |
3555 | /* | 4002 | /* |
3556 | * If SR-IOV enabled then wait a bit before bringing the adapter | 4003 | * If SR-IOV enabled then wait a bit before bringing the adapter |
@@ -3577,10 +4024,20 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) | |||
3577 | struct ixgbe_hw *hw = &adapter->hw; | 4024 | struct ixgbe_hw *hw = &adapter->hw; |
3578 | int err; | 4025 | int err; |
3579 | 4026 | ||
4027 | /* lock SFP init bit to prevent race conditions with the watchdog */ | ||
4028 | while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) | ||
4029 | usleep_range(1000, 2000); | ||
4030 | |||
4031 | /* clear all SFP and link config related flags while holding SFP_INIT */ | ||
4032 | adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP | | ||
4033 | IXGBE_FLAG2_SFP_NEEDS_RESET); | ||
4034 | adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; | ||
4035 | |||
3580 | err = hw->mac.ops.init_hw(hw); | 4036 | err = hw->mac.ops.init_hw(hw); |
3581 | switch (err) { | 4037 | switch (err) { |
3582 | case 0: | 4038 | case 0: |
3583 | case IXGBE_ERR_SFP_NOT_PRESENT: | 4039 | case IXGBE_ERR_SFP_NOT_PRESENT: |
4040 | case IXGBE_ERR_SFP_NOT_SUPPORTED: | ||
3584 | break; | 4041 | break; |
3585 | case IXGBE_ERR_MASTER_REQUESTS_PENDING: | 4042 | case IXGBE_ERR_MASTER_REQUESTS_PENDING: |
3586 | e_dev_err("master disable timed out\n"); | 4043 | e_dev_err("master disable timed out\n"); |
@@ -3598,6 +4055,8 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) | |||
3598 | e_dev_err("Hardware Error: %d\n", err); | 4055 | e_dev_err("Hardware Error: %d\n", err); |
3599 | } | 4056 | } |
3600 | 4057 | ||
4058 | clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); | ||
4059 | |||
3601 | /* reprogram the RAR[0] in case user changed it. */ | 4060 | /* reprogram the RAR[0] in case user changed it. */ |
3602 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs, | 4061 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs, |
3603 | IXGBE_RAH_AV); | 4062 | IXGBE_RAH_AV); |
@@ -3605,25 +4064,26 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) | |||
3605 | 4064 | ||
3606 | /** | 4065 | /** |
3607 | * ixgbe_clean_rx_ring - Free Rx Buffers per Queue | 4066 | * ixgbe_clean_rx_ring - Free Rx Buffers per Queue |
3608 | * @adapter: board private structure | ||
3609 | * @rx_ring: ring to free buffers from | 4067 | * @rx_ring: ring to free buffers from |
3610 | **/ | 4068 | **/ |
3611 | static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, | 4069 | static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) |
3612 | struct ixgbe_ring *rx_ring) | ||
3613 | { | 4070 | { |
3614 | struct pci_dev *pdev = adapter->pdev; | 4071 | struct device *dev = rx_ring->dev; |
3615 | unsigned long size; | 4072 | unsigned long size; |
3616 | unsigned int i; | 4073 | u16 i; |
3617 | 4074 | ||
3618 | /* Free all the Rx ring sk_buffs */ | 4075 | /* ring already cleared, nothing to do */ |
4076 | if (!rx_ring->rx_buffer_info) | ||
4077 | return; | ||
3619 | 4078 | ||
4079 | /* Free all the Rx ring sk_buffs */ | ||
3620 | for (i = 0; i < rx_ring->count; i++) { | 4080 | for (i = 0; i < rx_ring->count; i++) { |
3621 | struct ixgbe_rx_buffer *rx_buffer_info; | 4081 | struct ixgbe_rx_buffer *rx_buffer_info; |
3622 | 4082 | ||
3623 | rx_buffer_info = &rx_ring->rx_buffer_info[i]; | 4083 | rx_buffer_info = &rx_ring->rx_buffer_info[i]; |
3624 | if (rx_buffer_info->dma) { | 4084 | if (rx_buffer_info->dma) { |
3625 | dma_unmap_single(&pdev->dev, rx_buffer_info->dma, | 4085 | dma_unmap_single(rx_ring->dev, rx_buffer_info->dma, |
3626 | rx_ring->rx_buf_len, | 4086 | rx_ring->rx_buf_len, |
3627 | DMA_FROM_DEVICE); | 4087 | DMA_FROM_DEVICE); |
3628 | rx_buffer_info->dma = 0; | 4088 | rx_buffer_info->dma = 0; |
3629 | } | 4089 | } |
@@ -3633,9 +4093,9 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, | |||
3633 | do { | 4093 | do { |
3634 | struct sk_buff *this = skb; | 4094 | struct sk_buff *this = skb; |
3635 | if (IXGBE_RSC_CB(this)->delay_unmap) { | 4095 | if (IXGBE_RSC_CB(this)->delay_unmap) { |
3636 | dma_unmap_single(&pdev->dev, | 4096 | dma_unmap_single(dev, |
3637 | IXGBE_RSC_CB(this)->dma, | 4097 | IXGBE_RSC_CB(this)->dma, |
3638 | rx_ring->rx_buf_len, | 4098 | rx_ring->rx_buf_len, |
3639 | DMA_FROM_DEVICE); | 4099 | DMA_FROM_DEVICE); |
3640 | IXGBE_RSC_CB(this)->dma = 0; | 4100 | IXGBE_RSC_CB(this)->dma = 0; |
3641 | IXGBE_RSC_CB(skb)->delay_unmap = false; | 4101 | IXGBE_RSC_CB(skb)->delay_unmap = false; |
@@ -3647,7 +4107,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, | |||
3647 | if (!rx_buffer_info->page) | 4107 | if (!rx_buffer_info->page) |
3648 | continue; | 4108 | continue; |
3649 | if (rx_buffer_info->page_dma) { | 4109 | if (rx_buffer_info->page_dma) { |
3650 | dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma, | 4110 | dma_unmap_page(dev, rx_buffer_info->page_dma, |
3651 | PAGE_SIZE / 2, DMA_FROM_DEVICE); | 4111 | PAGE_SIZE / 2, DMA_FROM_DEVICE); |
3652 | rx_buffer_info->page_dma = 0; | 4112 | rx_buffer_info->page_dma = 0; |
3653 | } | 4113 | } |
@@ -3664,30 +4124,26 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, | |||
3664 | 4124 | ||
3665 | rx_ring->next_to_clean = 0; | 4125 | rx_ring->next_to_clean = 0; |
3666 | rx_ring->next_to_use = 0; | 4126 | rx_ring->next_to_use = 0; |
3667 | |||
3668 | if (rx_ring->head) | ||
3669 | writel(0, adapter->hw.hw_addr + rx_ring->head); | ||
3670 | if (rx_ring->tail) | ||
3671 | writel(0, adapter->hw.hw_addr + rx_ring->tail); | ||
3672 | } | 4127 | } |
3673 | 4128 | ||
3674 | /** | 4129 | /** |
3675 | * ixgbe_clean_tx_ring - Free Tx Buffers | 4130 | * ixgbe_clean_tx_ring - Free Tx Buffers |
3676 | * @adapter: board private structure | ||
3677 | * @tx_ring: ring to be cleaned | 4131 | * @tx_ring: ring to be cleaned |
3678 | **/ | 4132 | **/ |
3679 | static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter, | 4133 | static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring) |
3680 | struct ixgbe_ring *tx_ring) | ||
3681 | { | 4134 | { |
3682 | struct ixgbe_tx_buffer *tx_buffer_info; | 4135 | struct ixgbe_tx_buffer *tx_buffer_info; |
3683 | unsigned long size; | 4136 | unsigned long size; |
3684 | unsigned int i; | 4137 | u16 i; |
3685 | 4138 | ||
3686 | /* Free all the Tx ring sk_buffs */ | 4139 | /* ring already cleared, nothing to do */ |
4140 | if (!tx_ring->tx_buffer_info) | ||
4141 | return; | ||
3687 | 4142 | ||
4143 | /* Free all the Tx ring sk_buffs */ | ||
3688 | for (i = 0; i < tx_ring->count; i++) { | 4144 | for (i = 0; i < tx_ring->count; i++) { |
3689 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | 4145 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; |
3690 | ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); | 4146 | ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); |
3691 | } | 4147 | } |
3692 | 4148 | ||
3693 | size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; | 4149 | size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; |
@@ -3698,11 +4154,6 @@ static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter, | |||
3698 | 4154 | ||
3699 | tx_ring->next_to_use = 0; | 4155 | tx_ring->next_to_use = 0; |
3700 | tx_ring->next_to_clean = 0; | 4156 | tx_ring->next_to_clean = 0; |
3701 | |||
3702 | if (tx_ring->head) | ||
3703 | writel(0, adapter->hw.hw_addr + tx_ring->head); | ||
3704 | if (tx_ring->tail) | ||
3705 | writel(0, adapter->hw.hw_addr + tx_ring->tail); | ||
3706 | } | 4157 | } |
3707 | 4158 | ||
3708 | /** | 4159 | /** |
@@ -3714,7 +4165,7 @@ static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter) | |||
3714 | int i; | 4165 | int i; |
3715 | 4166 | ||
3716 | for (i = 0; i < adapter->num_rx_queues; i++) | 4167 | for (i = 0; i < adapter->num_rx_queues; i++) |
3717 | ixgbe_clean_rx_ring(adapter, adapter->rx_ring[i]); | 4168 | ixgbe_clean_rx_ring(adapter->rx_ring[i]); |
3718 | } | 4169 | } |
3719 | 4170 | ||
3720 | /** | 4171 | /** |
@@ -3726,7 +4177,7 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter) | |||
3726 | int i; | 4177 | int i; |
3727 | 4178 | ||
3728 | for (i = 0; i < adapter->num_tx_queues; i++) | 4179 | for (i = 0; i < adapter->num_tx_queues; i++) |
3729 | ixgbe_clean_tx_ring(adapter, adapter->tx_ring[i]); | 4180 | ixgbe_clean_tx_ring(adapter->tx_ring[i]); |
3730 | } | 4181 | } |
3731 | 4182 | ||
3732 | void ixgbe_down(struct ixgbe_adapter *adapter) | 4183 | void ixgbe_down(struct ixgbe_adapter *adapter) |
@@ -3734,39 +4185,26 @@ void ixgbe_down(struct ixgbe_adapter *adapter) | |||
3734 | struct net_device *netdev = adapter->netdev; | 4185 | struct net_device *netdev = adapter->netdev; |
3735 | struct ixgbe_hw *hw = &adapter->hw; | 4186 | struct ixgbe_hw *hw = &adapter->hw; |
3736 | u32 rxctrl; | 4187 | u32 rxctrl; |
3737 | u32 txdctl; | 4188 | int i; |
3738 | int i, j; | 4189 | int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; |
3739 | 4190 | ||
3740 | /* signal that we are down to the interrupt handler */ | 4191 | /* signal that we are down to the interrupt handler */ |
3741 | set_bit(__IXGBE_DOWN, &adapter->state); | 4192 | set_bit(__IXGBE_DOWN, &adapter->state); |
3742 | 4193 | ||
3743 | /* disable receive for all VFs and wait one second */ | ||
3744 | if (adapter->num_vfs) { | ||
3745 | /* ping all the active vfs to let them know we are going down */ | ||
3746 | ixgbe_ping_all_vfs(adapter); | ||
3747 | |||
3748 | /* Disable all VFTE/VFRE TX/RX */ | ||
3749 | ixgbe_disable_tx_rx(adapter); | ||
3750 | |||
3751 | /* Mark all the VFs as inactive */ | ||
3752 | for (i = 0 ; i < adapter->num_vfs; i++) | ||
3753 | adapter->vfinfo[i].clear_to_send = 0; | ||
3754 | } | ||
3755 | |||
3756 | /* disable receives */ | 4194 | /* disable receives */ |
3757 | rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); | 4195 | rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); |
3758 | IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); | 4196 | IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); |
3759 | 4197 | ||
3760 | IXGBE_WRITE_FLUSH(hw); | 4198 | /* disable all enabled rx queues */ |
3761 | msleep(10); | 4199 | for (i = 0; i < adapter->num_rx_queues; i++) |
4200 | /* this call also flushes the previous write */ | ||
4201 | ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]); | ||
3762 | 4202 | ||
3763 | netif_tx_stop_all_queues(netdev); | 4203 | usleep_range(10000, 20000); |
3764 | 4204 | ||
3765 | clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); | 4205 | netif_tx_stop_all_queues(netdev); |
3766 | del_timer_sync(&adapter->sfp_timer); | ||
3767 | del_timer_sync(&adapter->watchdog_timer); | ||
3768 | cancel_work_sync(&adapter->watchdog_task); | ||
3769 | 4206 | ||
4207 | /* call carrier off first to avoid false dev_watchdog timeouts */ | ||
3770 | netif_carrier_off(netdev); | 4208 | netif_carrier_off(netdev); |
3771 | netif_tx_disable(netdev); | 4209 | netif_tx_disable(netdev); |
3772 | 4210 | ||
@@ -3774,35 +4212,62 @@ void ixgbe_down(struct ixgbe_adapter *adapter) | |||
3774 | 4212 | ||
3775 | ixgbe_napi_disable_all(adapter); | 4213 | ixgbe_napi_disable_all(adapter); |
3776 | 4214 | ||
3777 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || | 4215 | adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT | |
3778 | adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) | 4216 | IXGBE_FLAG2_RESET_REQUESTED); |
3779 | cancel_work_sync(&adapter->fdir_reinit_task); | 4217 | adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; |
3780 | 4218 | ||
3781 | if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) | 4219 | del_timer_sync(&adapter->service_timer); |
3782 | cancel_work_sync(&adapter->check_overtemp_task); | 4220 | |
4221 | /* disable receive for all VFs and wait one second */ | ||
4222 | if (adapter->num_vfs) { | ||
4223 | /* ping all the active vfs to let them know we are going down */ | ||
4224 | ixgbe_ping_all_vfs(adapter); | ||
4225 | |||
4226 | /* Disable all VFTE/VFRE TX/RX */ | ||
4227 | ixgbe_disable_tx_rx(adapter); | ||
4228 | |||
4229 | /* Mark all the VFs as inactive */ | ||
4230 | for (i = 0 ; i < adapter->num_vfs; i++) | ||
4231 | adapter->vfinfo[i].clear_to_send = 0; | ||
4232 | } | ||
4233 | |||
4234 | /* Cleanup the affinity_hint CPU mask memory and callback */ | ||
4235 | for (i = 0; i < num_q_vectors; i++) { | ||
4236 | struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; | ||
4237 | /* clear the affinity_mask in the IRQ descriptor */ | ||
4238 | irq_set_affinity_hint(adapter->msix_entries[i]. vector, NULL); | ||
4239 | /* release the CPU mask memory */ | ||
4240 | free_cpumask_var(q_vector->affinity_mask); | ||
4241 | } | ||
3783 | 4242 | ||
3784 | /* disable transmits in the hardware now that interrupts are off */ | 4243 | /* disable transmits in the hardware now that interrupts are off */ |
3785 | for (i = 0; i < adapter->num_tx_queues; i++) { | 4244 | for (i = 0; i < adapter->num_tx_queues; i++) { |
3786 | j = adapter->tx_ring[i]->reg_idx; | 4245 | u8 reg_idx = adapter->tx_ring[i]->reg_idx; |
3787 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); | 4246 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); |
3788 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), | ||
3789 | (txdctl & ~IXGBE_TXDCTL_ENABLE)); | ||
3790 | } | 4247 | } |
3791 | /* Disable the Tx DMA engine on 82599 */ | ||
3792 | if (hw->mac.type == ixgbe_mac_82599EB) | ||
3793 | IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, | ||
3794 | (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & | ||
3795 | ~IXGBE_DMATXCTL_TE)); | ||
3796 | |||
3797 | /* power down the optics */ | ||
3798 | if (hw->phy.multispeed_fiber) | ||
3799 | hw->mac.ops.disable_tx_laser(hw); | ||
3800 | 4248 | ||
3801 | /* clear n-tuple filters that are cached */ | 4249 | /* Disable the Tx DMA engine on 82599 and X540 */ |
3802 | ethtool_ntuple_flush(netdev); | 4250 | switch (hw->mac.type) { |
4251 | case ixgbe_mac_82599EB: | ||
4252 | case ixgbe_mac_X540: | ||
4253 | IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, | ||
4254 | (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & | ||
4255 | ~IXGBE_DMATXCTL_TE)); | ||
4256 | break; | ||
4257 | default: | ||
4258 | break; | ||
4259 | } | ||
3803 | 4260 | ||
3804 | if (!pci_channel_offline(adapter->pdev)) | 4261 | if (!pci_channel_offline(adapter->pdev)) |
3805 | ixgbe_reset(adapter); | 4262 | ixgbe_reset(adapter); |
4263 | |||
4264 | /* power down the optics for multispeed fiber and 82599 SFP+ fiber */ | ||
4265 | if (hw->mac.ops.disable_tx_laser && | ||
4266 | ((hw->phy.multispeed_fiber) || | ||
4267 | ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) && | ||
4268 | (hw->mac.type == ixgbe_mac_82599EB)))) | ||
4269 | hw->mac.ops.disable_tx_laser(hw); | ||
4270 | |||
3806 | ixgbe_clean_all_tx_rings(adapter); | 4271 | ixgbe_clean_all_tx_rings(adapter); |
3807 | ixgbe_clean_all_rx_rings(adapter); | 4272 | ixgbe_clean_all_rx_rings(adapter); |
3808 | 4273 | ||
@@ -3822,15 +4287,13 @@ void ixgbe_down(struct ixgbe_adapter *adapter) | |||
3822 | static int ixgbe_poll(struct napi_struct *napi, int budget) | 4287 | static int ixgbe_poll(struct napi_struct *napi, int budget) |
3823 | { | 4288 | { |
3824 | struct ixgbe_q_vector *q_vector = | 4289 | struct ixgbe_q_vector *q_vector = |
3825 | container_of(napi, struct ixgbe_q_vector, napi); | 4290 | container_of(napi, struct ixgbe_q_vector, napi); |
3826 | struct ixgbe_adapter *adapter = q_vector->adapter; | 4291 | struct ixgbe_adapter *adapter = q_vector->adapter; |
3827 | int tx_clean_complete, work_done = 0; | 4292 | int tx_clean_complete, work_done = 0; |
3828 | 4293 | ||
3829 | #ifdef CONFIG_IXGBE_DCA | 4294 | #ifdef CONFIG_IXGBE_DCA |
3830 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { | 4295 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) |
3831 | ixgbe_update_tx_dca(adapter, adapter->tx_ring[0]); | 4296 | ixgbe_update_dca(q_vector); |
3832 | ixgbe_update_rx_dca(adapter, adapter->rx_ring[0]); | ||
3833 | } | ||
3834 | #endif | 4297 | #endif |
3835 | 4298 | ||
3836 | tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]); | 4299 | tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]); |
@@ -3859,44 +4322,9 @@ static void ixgbe_tx_timeout(struct net_device *netdev) | |||
3859 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 4322 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
3860 | 4323 | ||
3861 | /* Do the reset outside of interrupt context */ | 4324 | /* Do the reset outside of interrupt context */ |
3862 | schedule_work(&adapter->reset_task); | 4325 | ixgbe_tx_timeout_reset(adapter); |
3863 | } | ||
3864 | |||
3865 | static void ixgbe_reset_task(struct work_struct *work) | ||
3866 | { | ||
3867 | struct ixgbe_adapter *adapter; | ||
3868 | adapter = container_of(work, struct ixgbe_adapter, reset_task); | ||
3869 | |||
3870 | /* If we're already down or resetting, just bail */ | ||
3871 | if (test_bit(__IXGBE_DOWN, &adapter->state) || | ||
3872 | test_bit(__IXGBE_RESETTING, &adapter->state)) | ||
3873 | return; | ||
3874 | |||
3875 | adapter->tx_timeout_count++; | ||
3876 | |||
3877 | ixgbe_dump(adapter); | ||
3878 | netdev_err(adapter->netdev, "Reset adapter\n"); | ||
3879 | ixgbe_reinit_locked(adapter); | ||
3880 | } | 4326 | } |
3881 | 4327 | ||
3882 | #ifdef CONFIG_IXGBE_DCB | ||
3883 | static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) | ||
3884 | { | ||
3885 | bool ret = false; | ||
3886 | struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB]; | ||
3887 | |||
3888 | if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) | ||
3889 | return ret; | ||
3890 | |||
3891 | f->mask = 0x7 << 3; | ||
3892 | adapter->num_rx_queues = f->indices; | ||
3893 | adapter->num_tx_queues = f->indices; | ||
3894 | ret = true; | ||
3895 | |||
3896 | return ret; | ||
3897 | } | ||
3898 | #endif | ||
3899 | |||
3900 | /** | 4328 | /** |
3901 | * ixgbe_set_rss_queues: Allocate queues for RSS | 4329 | * ixgbe_set_rss_queues: Allocate queues for RSS |
3902 | * @adapter: board private structure to initialize | 4330 | * @adapter: board private structure to initialize |
@@ -3932,7 +4360,7 @@ static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) | |||
3932 | * Rx load across CPUs using RSS. | 4360 | * Rx load across CPUs using RSS. |
3933 | * | 4361 | * |
3934 | **/ | 4362 | **/ |
3935 | static bool inline ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter) | 4363 | static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter) |
3936 | { | 4364 | { |
3937 | bool ret = false; | 4365 | bool ret = false; |
3938 | struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR]; | 4366 | struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR]; |
@@ -3967,19 +4395,26 @@ static bool inline ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter) | |||
3967 | **/ | 4395 | **/ |
3968 | static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) | 4396 | static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) |
3969 | { | 4397 | { |
3970 | bool ret = false; | ||
3971 | struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; | 4398 | struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; |
3972 | 4399 | ||
3973 | f->indices = min((int)num_online_cpus(), f->indices); | 4400 | if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) |
3974 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { | 4401 | return false; |
3975 | adapter->num_rx_queues = 1; | 4402 | |
3976 | adapter->num_tx_queues = 1; | 4403 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { |
3977 | #ifdef CONFIG_IXGBE_DCB | 4404 | #ifdef CONFIG_IXGBE_DCB |
3978 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | 4405 | int tc; |
3979 | e_info(probe, "FCoE enabled with DCB\n"); | 4406 | struct net_device *dev = adapter->netdev; |
3980 | ixgbe_set_dcb_queues(adapter); | 4407 | |
3981 | } | 4408 | tc = netdev_get_prio_tc_map(dev, adapter->fcoe.up); |
4409 | f->indices = dev->tc_to_txq[tc].count; | ||
4410 | f->mask = dev->tc_to_txq[tc].offset; | ||
3982 | #endif | 4411 | #endif |
4412 | } else { | ||
4413 | f->indices = min((int)num_online_cpus(), f->indices); | ||
4414 | |||
4415 | adapter->num_rx_queues = 1; | ||
4416 | adapter->num_tx_queues = 1; | ||
4417 | |||
3983 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { | 4418 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { |
3984 | e_info(probe, "FCoE enabled with RSS\n"); | 4419 | e_info(probe, "FCoE enabled with RSS\n"); |
3985 | if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || | 4420 | if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || |
@@ -3992,14 +4427,45 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) | |||
3992 | f->mask = adapter->num_rx_queues; | 4427 | f->mask = adapter->num_rx_queues; |
3993 | adapter->num_rx_queues += f->indices; | 4428 | adapter->num_rx_queues += f->indices; |
3994 | adapter->num_tx_queues += f->indices; | 4429 | adapter->num_tx_queues += f->indices; |
4430 | } | ||
3995 | 4431 | ||
3996 | ret = true; | 4432 | return true; |
4433 | } | ||
4434 | #endif /* IXGBE_FCOE */ | ||
4435 | |||
4436 | #ifdef CONFIG_IXGBE_DCB | ||
4437 | static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) | ||
4438 | { | ||
4439 | bool ret = false; | ||
4440 | struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB]; | ||
4441 | int i, q; | ||
4442 | |||
4443 | if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) | ||
4444 | return ret; | ||
4445 | |||
4446 | f->indices = 0; | ||
4447 | for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { | ||
4448 | q = min((int)num_online_cpus(), MAX_TRAFFIC_CLASS); | ||
4449 | f->indices += q; | ||
3997 | } | 4450 | } |
3998 | 4451 | ||
4452 | f->mask = 0x7 << 3; | ||
4453 | adapter->num_rx_queues = f->indices; | ||
4454 | adapter->num_tx_queues = f->indices; | ||
4455 | ret = true; | ||
4456 | |||
4457 | #ifdef IXGBE_FCOE | ||
4458 | /* FCoE enabled queues require special configuration done through | ||
4459 | * configure_fcoe() and others. Here we map FCoE indices onto the | ||
4460 | * DCB queue pairs allowing FCoE to own configuration later. | ||
4461 | */ | ||
4462 | ixgbe_set_fcoe_queues(adapter); | ||
4463 | #endif | ||
4464 | |||
3999 | return ret; | 4465 | return ret; |
4000 | } | 4466 | } |
4467 | #endif | ||
4001 | 4468 | ||
4002 | #endif /* IXGBE_FCOE */ | ||
4003 | /** | 4469 | /** |
4004 | * ixgbe_set_sriov_queues: Allocate queues for IOV use | 4470 | * ixgbe_set_sriov_queues: Allocate queues for IOV use |
4005 | * @adapter: board private structure to initialize | 4471 | * @adapter: board private structure to initialize |
@@ -4014,7 +4480,7 @@ static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) | |||
4014 | } | 4480 | } |
4015 | 4481 | ||
4016 | /* | 4482 | /* |
4017 | * ixgbe_set_num_queues: Allocate queues for device, feature dependant | 4483 | * ixgbe_set_num_queues: Allocate queues for device, feature dependent |
4018 | * @adapter: board private structure to initialize | 4484 | * @adapter: board private structure to initialize |
4019 | * | 4485 | * |
4020 | * This is the top level queue allocation routine. The order here is very | 4486 | * This is the top level queue allocation routine. The order here is very |
@@ -4024,7 +4490,7 @@ static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) | |||
4024 | * fallthrough conditions. | 4490 | * fallthrough conditions. |
4025 | * | 4491 | * |
4026 | **/ | 4492 | **/ |
4027 | static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) | 4493 | static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter) |
4028 | { | 4494 | { |
4029 | /* Start with base case */ | 4495 | /* Start with base case */ |
4030 | adapter->num_rx_queues = 1; | 4496 | adapter->num_rx_queues = 1; |
@@ -4033,18 +4499,18 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) | |||
4033 | adapter->num_rx_queues_per_pool = 1; | 4499 | adapter->num_rx_queues_per_pool = 1; |
4034 | 4500 | ||
4035 | if (ixgbe_set_sriov_queues(adapter)) | 4501 | if (ixgbe_set_sriov_queues(adapter)) |
4036 | return; | ||
4037 | |||
4038 | #ifdef IXGBE_FCOE | ||
4039 | if (ixgbe_set_fcoe_queues(adapter)) | ||
4040 | goto done; | 4502 | goto done; |
4041 | 4503 | ||
4042 | #endif /* IXGBE_FCOE */ | ||
4043 | #ifdef CONFIG_IXGBE_DCB | 4504 | #ifdef CONFIG_IXGBE_DCB |
4044 | if (ixgbe_set_dcb_queues(adapter)) | 4505 | if (ixgbe_set_dcb_queues(adapter)) |
4045 | goto done; | 4506 | goto done; |
4046 | 4507 | ||
4047 | #endif | 4508 | #endif |
4509 | #ifdef IXGBE_FCOE | ||
4510 | if (ixgbe_set_fcoe_queues(adapter)) | ||
4511 | goto done; | ||
4512 | |||
4513 | #endif /* IXGBE_FCOE */ | ||
4048 | if (ixgbe_set_fdir_queues(adapter)) | 4514 | if (ixgbe_set_fdir_queues(adapter)) |
4049 | goto done; | 4515 | goto done; |
4050 | 4516 | ||
@@ -4056,12 +4522,14 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) | |||
4056 | adapter->num_tx_queues = 1; | 4522 | adapter->num_tx_queues = 1; |
4057 | 4523 | ||
4058 | done: | 4524 | done: |
4059 | /* Notify the stack of the (possibly) reduced Tx Queue count. */ | 4525 | /* Notify the stack of the (possibly) reduced queue counts. */ |
4060 | netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); | 4526 | netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); |
4527 | return netif_set_real_num_rx_queues(adapter->netdev, | ||
4528 | adapter->num_rx_queues); | ||
4061 | } | 4529 | } |
4062 | 4530 | ||
4063 | static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, | 4531 | static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, |
4064 | int vectors) | 4532 | int vectors) |
4065 | { | 4533 | { |
4066 | int err, vector_threshold; | 4534 | int err, vector_threshold; |
4067 | 4535 | ||
@@ -4080,7 +4548,7 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, | |||
4080 | */ | 4548 | */ |
4081 | while (vectors >= vector_threshold) { | 4549 | while (vectors >= vector_threshold) { |
4082 | err = pci_enable_msix(adapter->pdev, adapter->msix_entries, | 4550 | err = pci_enable_msix(adapter->pdev, adapter->msix_entries, |
4083 | vectors); | 4551 | vectors); |
4084 | if (!err) /* Success in acquiring all requested vectors. */ | 4552 | if (!err) /* Success in acquiring all requested vectors. */ |
4085 | break; | 4553 | break; |
4086 | else if (err < 0) | 4554 | else if (err < 0) |
@@ -4107,7 +4575,7 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, | |||
4107 | * vectors we were allocated. | 4575 | * vectors we were allocated. |
4108 | */ | 4576 | */ |
4109 | adapter->num_msix_vectors = min(vectors, | 4577 | adapter->num_msix_vectors = min(vectors, |
4110 | adapter->max_msix_q_vectors + NON_Q_VECTORS); | 4578 | adapter->max_msix_q_vectors + NON_Q_VECTORS); |
4111 | } | 4579 | } |
4112 | } | 4580 | } |
4113 | 4581 | ||
@@ -4121,22 +4589,123 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, | |||
4121 | static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) | 4589 | static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) |
4122 | { | 4590 | { |
4123 | int i; | 4591 | int i; |
4124 | bool ret = false; | ||
4125 | 4592 | ||
4126 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { | 4593 | if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) |
4127 | for (i = 0; i < adapter->num_rx_queues; i++) | 4594 | return false; |
4128 | adapter->rx_ring[i]->reg_idx = i; | ||
4129 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
4130 | adapter->tx_ring[i]->reg_idx = i; | ||
4131 | ret = true; | ||
4132 | } else { | ||
4133 | ret = false; | ||
4134 | } | ||
4135 | 4595 | ||
4136 | return ret; | 4596 | for (i = 0; i < adapter->num_rx_queues; i++) |
4597 | adapter->rx_ring[i]->reg_idx = i; | ||
4598 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
4599 | adapter->tx_ring[i]->reg_idx = i; | ||
4600 | |||
4601 | return true; | ||
4137 | } | 4602 | } |
4138 | 4603 | ||
4139 | #ifdef CONFIG_IXGBE_DCB | 4604 | #ifdef CONFIG_IXGBE_DCB |
4605 | |||
4606 | /* ixgbe_get_first_reg_idx - Return first register index associated with ring */ | ||
4607 | static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, | ||
4608 | unsigned int *tx, unsigned int *rx) | ||
4609 | { | ||
4610 | struct net_device *dev = adapter->netdev; | ||
4611 | struct ixgbe_hw *hw = &adapter->hw; | ||
4612 | u8 num_tcs = netdev_get_num_tc(dev); | ||
4613 | |||
4614 | *tx = 0; | ||
4615 | *rx = 0; | ||
4616 | |||
4617 | switch (hw->mac.type) { | ||
4618 | case ixgbe_mac_82598EB: | ||
4619 | *tx = tc << 3; | ||
4620 | *rx = tc << 2; | ||
4621 | break; | ||
4622 | case ixgbe_mac_82599EB: | ||
4623 | case ixgbe_mac_X540: | ||
4624 | if (num_tcs == 8) { | ||
4625 | if (tc < 3) { | ||
4626 | *tx = tc << 5; | ||
4627 | *rx = tc << 4; | ||
4628 | } else if (tc < 5) { | ||
4629 | *tx = ((tc + 2) << 4); | ||
4630 | *rx = tc << 4; | ||
4631 | } else if (tc < num_tcs) { | ||
4632 | *tx = ((tc + 8) << 3); | ||
4633 | *rx = tc << 4; | ||
4634 | } | ||
4635 | } else if (num_tcs == 4) { | ||
4636 | *rx = tc << 5; | ||
4637 | switch (tc) { | ||
4638 | case 0: | ||
4639 | *tx = 0; | ||
4640 | break; | ||
4641 | case 1: | ||
4642 | *tx = 64; | ||
4643 | break; | ||
4644 | case 2: | ||
4645 | *tx = 96; | ||
4646 | break; | ||
4647 | case 3: | ||
4648 | *tx = 112; | ||
4649 | break; | ||
4650 | default: | ||
4651 | break; | ||
4652 | } | ||
4653 | } | ||
4654 | break; | ||
4655 | default: | ||
4656 | break; | ||
4657 | } | ||
4658 | } | ||
4659 | |||
4660 | #define IXGBE_MAX_Q_PER_TC (IXGBE_MAX_DCB_INDICES / MAX_TRAFFIC_CLASS) | ||
4661 | |||
4662 | /* ixgbe_setup_tc - routine to configure net_device for multiple traffic | ||
4663 | * classes. | ||
4664 | * | ||
4665 | * @netdev: net device to configure | ||
4666 | * @tc: number of traffic classes to enable | ||
4667 | */ | ||
4668 | int ixgbe_setup_tc(struct net_device *dev, u8 tc) | ||
4669 | { | ||
4670 | int i; | ||
4671 | unsigned int q, offset = 0; | ||
4672 | |||
4673 | if (!tc) { | ||
4674 | netdev_reset_tc(dev); | ||
4675 | } else { | ||
4676 | struct ixgbe_adapter *adapter = netdev_priv(dev); | ||
4677 | |||
4678 | /* Hardware supports up to 8 traffic classes */ | ||
4679 | if (tc > MAX_TRAFFIC_CLASS || netdev_set_num_tc(dev, tc)) | ||
4680 | return -EINVAL; | ||
4681 | |||
4682 | /* Partition Tx queues evenly amongst traffic classes */ | ||
4683 | for (i = 0; i < tc; i++) { | ||
4684 | q = min((int)num_online_cpus(), IXGBE_MAX_Q_PER_TC); | ||
4685 | netdev_set_prio_tc_map(dev, i, i); | ||
4686 | netdev_set_tc_queue(dev, i, q, offset); | ||
4687 | offset += q; | ||
4688 | } | ||
4689 | |||
4690 | /* This enables multiple traffic class support in the hardware | ||
4691 | * which defaults to strict priority transmission by default. | ||
4692 | * If traffic classes are already enabled perhaps through DCB | ||
4693 | * code path then existing configuration will be used. | ||
4694 | */ | ||
4695 | if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) && | ||
4696 | dev->dcbnl_ops && dev->dcbnl_ops->setdcbx) { | ||
4697 | struct ieee_ets ets = { | ||
4698 | .prio_tc = {0, 1, 2, 3, 4, 5, 6, 7}, | ||
4699 | }; | ||
4700 | u8 mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; | ||
4701 | |||
4702 | dev->dcbnl_ops->setdcbx(dev, mode); | ||
4703 | dev->dcbnl_ops->ieee_setets(dev, &ets); | ||
4704 | } | ||
4705 | } | ||
4706 | return 0; | ||
4707 | } | ||
4708 | |||
4140 | /** | 4709 | /** |
4141 | * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB | 4710 | * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB |
4142 | * @adapter: board private structure to initialize | 4711 | * @adapter: board private structure to initialize |
@@ -4146,76 +4715,27 @@ static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) | |||
4146 | **/ | 4715 | **/ |
4147 | static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) | 4716 | static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) |
4148 | { | 4717 | { |
4149 | int i; | 4718 | struct net_device *dev = adapter->netdev; |
4150 | bool ret = false; | 4719 | int i, j, k; |
4151 | int dcb_i = adapter->ring_feature[RING_F_DCB].indices; | 4720 | u8 num_tcs = netdev_get_num_tc(dev); |
4152 | 4721 | ||
4153 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | 4722 | if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) |
4154 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { | 4723 | return false; |
4155 | /* the number of queues is assumed to be symmetric */ | ||
4156 | for (i = 0; i < dcb_i; i++) { | ||
4157 | adapter->rx_ring[i]->reg_idx = i << 3; | ||
4158 | adapter->tx_ring[i]->reg_idx = i << 2; | ||
4159 | } | ||
4160 | ret = true; | ||
4161 | } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { | ||
4162 | if (dcb_i == 8) { | ||
4163 | /* | ||
4164 | * Tx TC0 starts at: descriptor queue 0 | ||
4165 | * Tx TC1 starts at: descriptor queue 32 | ||
4166 | * Tx TC2 starts at: descriptor queue 64 | ||
4167 | * Tx TC3 starts at: descriptor queue 80 | ||
4168 | * Tx TC4 starts at: descriptor queue 96 | ||
4169 | * Tx TC5 starts at: descriptor queue 104 | ||
4170 | * Tx TC6 starts at: descriptor queue 112 | ||
4171 | * Tx TC7 starts at: descriptor queue 120 | ||
4172 | * | ||
4173 | * Rx TC0-TC7 are offset by 16 queues each | ||
4174 | */ | ||
4175 | for (i = 0; i < 3; i++) { | ||
4176 | adapter->tx_ring[i]->reg_idx = i << 5; | ||
4177 | adapter->rx_ring[i]->reg_idx = i << 4; | ||
4178 | } | ||
4179 | for ( ; i < 5; i++) { | ||
4180 | adapter->tx_ring[i]->reg_idx = | ||
4181 | ((i + 2) << 4); | ||
4182 | adapter->rx_ring[i]->reg_idx = i << 4; | ||
4183 | } | ||
4184 | for ( ; i < dcb_i; i++) { | ||
4185 | adapter->tx_ring[i]->reg_idx = | ||
4186 | ((i + 8) << 3); | ||
4187 | adapter->rx_ring[i]->reg_idx = i << 4; | ||
4188 | } | ||
4189 | 4724 | ||
4190 | ret = true; | 4725 | for (i = 0, k = 0; i < num_tcs; i++) { |
4191 | } else if (dcb_i == 4) { | 4726 | unsigned int tx_s, rx_s; |
4192 | /* | 4727 | u16 count = dev->tc_to_txq[i].count; |
4193 | * Tx TC0 starts at: descriptor queue 0 | 4728 | |
4194 | * Tx TC1 starts at: descriptor queue 64 | 4729 | ixgbe_get_first_reg_idx(adapter, i, &tx_s, &rx_s); |
4195 | * Tx TC2 starts at: descriptor queue 96 | 4730 | for (j = 0; j < count; j++, k++) { |
4196 | * Tx TC3 starts at: descriptor queue 112 | 4731 | adapter->tx_ring[k]->reg_idx = tx_s + j; |
4197 | * | 4732 | adapter->rx_ring[k]->reg_idx = rx_s + j; |
4198 | * Rx TC0-TC3 are offset by 32 queues each | 4733 | adapter->tx_ring[k]->dcb_tc = i; |
4199 | */ | 4734 | adapter->rx_ring[k]->dcb_tc = i; |
4200 | adapter->tx_ring[0]->reg_idx = 0; | ||
4201 | adapter->tx_ring[1]->reg_idx = 64; | ||
4202 | adapter->tx_ring[2]->reg_idx = 96; | ||
4203 | adapter->tx_ring[3]->reg_idx = 112; | ||
4204 | for (i = 0 ; i < dcb_i; i++) | ||
4205 | adapter->rx_ring[i]->reg_idx = i << 5; | ||
4206 | |||
4207 | ret = true; | ||
4208 | } else { | ||
4209 | ret = false; | ||
4210 | } | ||
4211 | } else { | ||
4212 | ret = false; | ||
4213 | } | 4735 | } |
4214 | } else { | ||
4215 | ret = false; | ||
4216 | } | 4736 | } |
4217 | 4737 | ||
4218 | return ret; | 4738 | return true; |
4219 | } | 4739 | } |
4220 | #endif | 4740 | #endif |
4221 | 4741 | ||
@@ -4226,7 +4746,7 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) | |||
4226 | * Cache the descriptor ring offsets for Flow Director to the assigned rings. | 4746 | * Cache the descriptor ring offsets for Flow Director to the assigned rings. |
4227 | * | 4747 | * |
4228 | **/ | 4748 | **/ |
4229 | static bool inline ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter) | 4749 | static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter) |
4230 | { | 4750 | { |
4231 | int i; | 4751 | int i; |
4232 | bool ret = false; | 4752 | bool ret = false; |
@@ -4254,55 +4774,28 @@ static bool inline ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter) | |||
4254 | */ | 4774 | */ |
4255 | static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) | 4775 | static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) |
4256 | { | 4776 | { |
4257 | int i, fcoe_rx_i = 0, fcoe_tx_i = 0; | ||
4258 | bool ret = false; | ||
4259 | struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; | 4777 | struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; |
4778 | int i; | ||
4779 | u8 fcoe_rx_i = 0, fcoe_tx_i = 0; | ||
4260 | 4780 | ||
4261 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { | 4781 | if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) |
4262 | #ifdef CONFIG_IXGBE_DCB | 4782 | return false; |
4263 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | ||
4264 | struct ixgbe_fcoe *fcoe = &adapter->fcoe; | ||
4265 | 4783 | ||
4266 | ixgbe_cache_ring_dcb(adapter); | 4784 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { |
4267 | /* find out queues in TC for FCoE */ | 4785 | if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || |
4268 | fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1; | 4786 | (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) |
4269 | fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1; | 4787 | ixgbe_cache_ring_fdir(adapter); |
4270 | /* | 4788 | else |
4271 | * In 82599, the number of Tx queues for each traffic | 4789 | ixgbe_cache_ring_rss(adapter); |
4272 | * class for both 8-TC and 4-TC modes are: | ||
4273 | * TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7 | ||
4274 | * 8 TCs: 32 32 16 16 8 8 8 8 | ||
4275 | * 4 TCs: 64 64 32 32 | ||
4276 | * We have max 8 queues for FCoE, where 8 the is | ||
4277 | * FCoE redirection table size. If TC for FCoE is | ||
4278 | * less than or equal to TC3, we have enough queues | ||
4279 | * to add max of 8 queues for FCoE, so we start FCoE | ||
4280 | * tx descriptor from the next one, i.e., reg_idx + 1. | ||
4281 | * If TC for FCoE is above TC3, implying 8 TC mode, | ||
4282 | * and we need 8 for FCoE, we have to take all queues | ||
4283 | * in that traffic class for FCoE. | ||
4284 | */ | ||
4285 | if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3)) | ||
4286 | fcoe_tx_i--; | ||
4287 | } | ||
4288 | #endif /* CONFIG_IXGBE_DCB */ | ||
4289 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { | ||
4290 | if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || | ||
4291 | (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) | ||
4292 | ixgbe_cache_ring_fdir(adapter); | ||
4293 | else | ||
4294 | ixgbe_cache_ring_rss(adapter); | ||
4295 | 4790 | ||
4296 | fcoe_rx_i = f->mask; | 4791 | fcoe_rx_i = f->mask; |
4297 | fcoe_tx_i = f->mask; | 4792 | fcoe_tx_i = f->mask; |
4298 | } | ||
4299 | for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) { | ||
4300 | adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i; | ||
4301 | adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i; | ||
4302 | } | ||
4303 | ret = true; | ||
4304 | } | 4793 | } |
4305 | return ret; | 4794 | for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) { |
4795 | adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i; | ||
4796 | adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i; | ||
4797 | } | ||
4798 | return true; | ||
4306 | } | 4799 | } |
4307 | 4800 | ||
4308 | #endif /* IXGBE_FCOE */ | 4801 | #endif /* IXGBE_FCOE */ |
@@ -4344,16 +4837,16 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) | |||
4344 | if (ixgbe_cache_ring_sriov(adapter)) | 4837 | if (ixgbe_cache_ring_sriov(adapter)) |
4345 | return; | 4838 | return; |
4346 | 4839 | ||
4840 | #ifdef CONFIG_IXGBE_DCB | ||
4841 | if (ixgbe_cache_ring_dcb(adapter)) | ||
4842 | return; | ||
4843 | #endif | ||
4844 | |||
4347 | #ifdef IXGBE_FCOE | 4845 | #ifdef IXGBE_FCOE |
4348 | if (ixgbe_cache_ring_fcoe(adapter)) | 4846 | if (ixgbe_cache_ring_fcoe(adapter)) |
4349 | return; | 4847 | return; |
4350 | |||
4351 | #endif /* IXGBE_FCOE */ | 4848 | #endif /* IXGBE_FCOE */ |
4352 | #ifdef CONFIG_IXGBE_DCB | ||
4353 | if (ixgbe_cache_ring_dcb(adapter)) | ||
4354 | return; | ||
4355 | 4849 | ||
4356 | #endif | ||
4357 | if (ixgbe_cache_ring_fdir(adapter)) | 4850 | if (ixgbe_cache_ring_fdir(adapter)) |
4358 | return; | 4851 | return; |
4359 | 4852 | ||
@@ -4371,65 +4864,55 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) | |||
4371 | **/ | 4864 | **/ |
4372 | static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter) | 4865 | static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter) |
4373 | { | 4866 | { |
4374 | int i; | 4867 | int rx = 0, tx = 0, nid = adapter->node; |
4375 | int orig_node = adapter->node; | ||
4376 | 4868 | ||
4377 | for (i = 0; i < adapter->num_tx_queues; i++) { | 4869 | if (nid < 0 || !node_online(nid)) |
4378 | struct ixgbe_ring *ring = adapter->tx_ring[i]; | 4870 | nid = first_online_node; |
4379 | if (orig_node == -1) { | 4871 | |
4380 | int cur_node = next_online_node(adapter->node); | 4872 | for (; tx < adapter->num_tx_queues; tx++) { |
4381 | if (cur_node == MAX_NUMNODES) | 4873 | struct ixgbe_ring *ring; |
4382 | cur_node = first_online_node; | 4874 | |
4383 | adapter->node = cur_node; | 4875 | ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid); |
4384 | } | ||
4385 | ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL, | ||
4386 | adapter->node); | ||
4387 | if (!ring) | 4876 | if (!ring) |
4388 | ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL); | 4877 | ring = kzalloc(sizeof(*ring), GFP_KERNEL); |
4389 | if (!ring) | 4878 | if (!ring) |
4390 | goto err_tx_ring_allocation; | 4879 | goto err_allocation; |
4391 | ring->count = adapter->tx_ring_count; | 4880 | ring->count = adapter->tx_ring_count; |
4392 | ring->queue_index = i; | 4881 | ring->queue_index = tx; |
4393 | ring->numa_node = adapter->node; | 4882 | ring->numa_node = nid; |
4883 | ring->dev = &adapter->pdev->dev; | ||
4884 | ring->netdev = adapter->netdev; | ||
4394 | 4885 | ||
4395 | adapter->tx_ring[i] = ring; | 4886 | adapter->tx_ring[tx] = ring; |
4396 | } | 4887 | } |
4397 | 4888 | ||
4398 | /* Restore the adapter's original node */ | 4889 | for (; rx < adapter->num_rx_queues; rx++) { |
4399 | adapter->node = orig_node; | 4890 | struct ixgbe_ring *ring; |
4400 | 4891 | ||
4401 | for (i = 0; i < adapter->num_rx_queues; i++) { | 4892 | ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid); |
4402 | struct ixgbe_ring *ring = adapter->rx_ring[i]; | ||
4403 | if (orig_node == -1) { | ||
4404 | int cur_node = next_online_node(adapter->node); | ||
4405 | if (cur_node == MAX_NUMNODES) | ||
4406 | cur_node = first_online_node; | ||
4407 | adapter->node = cur_node; | ||
4408 | } | ||
4409 | ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL, | ||
4410 | adapter->node); | ||
4411 | if (!ring) | 4893 | if (!ring) |
4412 | ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL); | 4894 | ring = kzalloc(sizeof(*ring), GFP_KERNEL); |
4413 | if (!ring) | 4895 | if (!ring) |
4414 | goto err_rx_ring_allocation; | 4896 | goto err_allocation; |
4415 | ring->count = adapter->rx_ring_count; | 4897 | ring->count = adapter->rx_ring_count; |
4416 | ring->queue_index = i; | 4898 | ring->queue_index = rx; |
4417 | ring->numa_node = adapter->node; | 4899 | ring->numa_node = nid; |
4900 | ring->dev = &adapter->pdev->dev; | ||
4901 | ring->netdev = adapter->netdev; | ||
4418 | 4902 | ||
4419 | adapter->rx_ring[i] = ring; | 4903 | adapter->rx_ring[rx] = ring; |
4420 | } | 4904 | } |
4421 | 4905 | ||
4422 | /* Restore the adapter's original node */ | ||
4423 | adapter->node = orig_node; | ||
4424 | |||
4425 | ixgbe_cache_ring_register(adapter); | 4906 | ixgbe_cache_ring_register(adapter); |
4426 | 4907 | ||
4427 | return 0; | 4908 | return 0; |
4428 | 4909 | ||
4429 | err_rx_ring_allocation: | 4910 | err_allocation: |
4430 | for (i = 0; i < adapter->num_tx_queues; i++) | 4911 | while (tx) |
4431 | kfree(adapter->tx_ring[i]); | 4912 | kfree(adapter->tx_ring[--tx]); |
4432 | err_tx_ring_allocation: | 4913 | |
4914 | while (rx) | ||
4915 | kfree(adapter->rx_ring[--rx]); | ||
4433 | return -ENOMEM; | 4916 | return -ENOMEM; |
4434 | } | 4917 | } |
4435 | 4918 | ||
@@ -4453,7 +4936,7 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) | |||
4453 | * (roughly) the same number of vectors as there are CPU's. | 4936 | * (roughly) the same number of vectors as there are CPU's. |
4454 | */ | 4937 | */ |
4455 | v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues, | 4938 | v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues, |
4456 | (int)num_online_cpus()) + NON_Q_VECTORS; | 4939 | (int)num_online_cpus()) + NON_Q_VECTORS; |
4457 | 4940 | ||
4458 | /* | 4941 | /* |
4459 | * At the same time, hardware can only support a maximum of | 4942 | * At the same time, hardware can only support a maximum of |
@@ -4467,7 +4950,7 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) | |||
4467 | /* A failure in MSI-X entry allocation isn't fatal, but it does | 4950 | /* A failure in MSI-X entry allocation isn't fatal, but it does |
4468 | * mean we disable MSI-X capabilities of the adapter. */ | 4951 | * mean we disable MSI-X capabilities of the adapter. */ |
4469 | adapter->msix_entries = kcalloc(v_budget, | 4952 | adapter->msix_entries = kcalloc(v_budget, |
4470 | sizeof(struct msix_entry), GFP_KERNEL); | 4953 | sizeof(struct msix_entry), GFP_KERNEL); |
4471 | if (adapter->msix_entries) { | 4954 | if (adapter->msix_entries) { |
4472 | for (vector = 0; vector < v_budget; vector++) | 4955 | for (vector = 0; vector < v_budget; vector++) |
4473 | adapter->msix_entries[vector].entry = vector; | 4956 | adapter->msix_entries[vector].entry = vector; |
@@ -4480,13 +4963,21 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) | |||
4480 | 4963 | ||
4481 | adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; | 4964 | adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; |
4482 | adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; | 4965 | adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; |
4966 | if (adapter->flags & (IXGBE_FLAG_FDIR_HASH_CAPABLE | | ||
4967 | IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) { | ||
4968 | e_err(probe, | ||
4969 | "Flow Director is not supported while multiple " | ||
4970 | "queues are disabled. Disabling Flow Director\n"); | ||
4971 | } | ||
4483 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; | 4972 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; |
4484 | adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; | 4973 | adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; |
4485 | adapter->atr_sample_rate = 0; | 4974 | adapter->atr_sample_rate = 0; |
4486 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) | 4975 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) |
4487 | ixgbe_disable_sriov(adapter); | 4976 | ixgbe_disable_sriov(adapter); |
4488 | 4977 | ||
4489 | ixgbe_set_num_queues(adapter); | 4978 | err = ixgbe_set_num_queues(adapter); |
4979 | if (err) | ||
4980 | return err; | ||
4490 | 4981 | ||
4491 | err = pci_enable_msi(adapter->pdev); | 4982 | err = pci_enable_msi(adapter->pdev); |
4492 | if (!err) { | 4983 | if (!err) { |
@@ -4514,25 +5005,22 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) | |||
4514 | { | 5005 | { |
4515 | int q_idx, num_q_vectors; | 5006 | int q_idx, num_q_vectors; |
4516 | struct ixgbe_q_vector *q_vector; | 5007 | struct ixgbe_q_vector *q_vector; |
4517 | int napi_vectors; | ||
4518 | int (*poll)(struct napi_struct *, int); | 5008 | int (*poll)(struct napi_struct *, int); |
4519 | 5009 | ||
4520 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | 5010 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { |
4521 | num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | 5011 | num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; |
4522 | napi_vectors = adapter->num_rx_queues; | ||
4523 | poll = &ixgbe_clean_rxtx_many; | 5012 | poll = &ixgbe_clean_rxtx_many; |
4524 | } else { | 5013 | } else { |
4525 | num_q_vectors = 1; | 5014 | num_q_vectors = 1; |
4526 | napi_vectors = 1; | ||
4527 | poll = &ixgbe_poll; | 5015 | poll = &ixgbe_poll; |
4528 | } | 5016 | } |
4529 | 5017 | ||
4530 | for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { | 5018 | for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { |
4531 | q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector), | 5019 | q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector), |
4532 | GFP_KERNEL, adapter->node); | 5020 | GFP_KERNEL, adapter->node); |
4533 | if (!q_vector) | 5021 | if (!q_vector) |
4534 | q_vector = kzalloc(sizeof(struct ixgbe_q_vector), | 5022 | q_vector = kzalloc(sizeof(struct ixgbe_q_vector), |
4535 | GFP_KERNEL); | 5023 | GFP_KERNEL); |
4536 | if (!q_vector) | 5024 | if (!q_vector) |
4537 | goto err_out; | 5025 | goto err_out; |
4538 | q_vector->adapter = adapter; | 5026 | q_vector->adapter = adapter; |
@@ -4611,7 +5099,9 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) | |||
4611 | int err; | 5099 | int err; |
4612 | 5100 | ||
4613 | /* Number of supported queues */ | 5101 | /* Number of supported queues */ |
4614 | ixgbe_set_num_queues(adapter); | 5102 | err = ixgbe_set_num_queues(adapter); |
5103 | if (err) | ||
5104 | return err; | ||
4615 | 5105 | ||
4616 | err = ixgbe_set_interrupt_capability(adapter); | 5106 | err = ixgbe_set_interrupt_capability(adapter); |
4617 | if (err) { | 5107 | if (err) { |
@@ -4663,66 +5153,23 @@ void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) | |||
4663 | adapter->tx_ring[i] = NULL; | 5153 | adapter->tx_ring[i] = NULL; |
4664 | } | 5154 | } |
4665 | for (i = 0; i < adapter->num_rx_queues; i++) { | 5155 | for (i = 0; i < adapter->num_rx_queues; i++) { |
4666 | kfree(adapter->rx_ring[i]); | 5156 | struct ixgbe_ring *ring = adapter->rx_ring[i]; |
5157 | |||
5158 | /* ixgbe_get_stats64() might access this ring, we must wait | ||
5159 | * a grace period before freeing it. | ||
5160 | */ | ||
5161 | kfree_rcu(ring, rcu); | ||
4667 | adapter->rx_ring[i] = NULL; | 5162 | adapter->rx_ring[i] = NULL; |
4668 | } | 5163 | } |
4669 | 5164 | ||
5165 | adapter->num_tx_queues = 0; | ||
5166 | adapter->num_rx_queues = 0; | ||
5167 | |||
4670 | ixgbe_free_q_vectors(adapter); | 5168 | ixgbe_free_q_vectors(adapter); |
4671 | ixgbe_reset_interrupt_capability(adapter); | 5169 | ixgbe_reset_interrupt_capability(adapter); |
4672 | } | 5170 | } |
4673 | 5171 | ||
4674 | /** | 5172 | /** |
4675 | * ixgbe_sfp_timer - worker thread to find a missing module | ||
4676 | * @data: pointer to our adapter struct | ||
4677 | **/ | ||
4678 | static void ixgbe_sfp_timer(unsigned long data) | ||
4679 | { | ||
4680 | struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; | ||
4681 | |||
4682 | /* | ||
4683 | * Do the sfp_timer outside of interrupt context due to the | ||
4684 | * delays that sfp+ detection requires | ||
4685 | */ | ||
4686 | schedule_work(&adapter->sfp_task); | ||
4687 | } | ||
4688 | |||
4689 | /** | ||
4690 | * ixgbe_sfp_task - worker thread to find a missing module | ||
4691 | * @work: pointer to work_struct containing our data | ||
4692 | **/ | ||
4693 | static void ixgbe_sfp_task(struct work_struct *work) | ||
4694 | { | ||
4695 | struct ixgbe_adapter *adapter = container_of(work, | ||
4696 | struct ixgbe_adapter, | ||
4697 | sfp_task); | ||
4698 | struct ixgbe_hw *hw = &adapter->hw; | ||
4699 | |||
4700 | if ((hw->phy.type == ixgbe_phy_nl) && | ||
4701 | (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) { | ||
4702 | s32 ret = hw->phy.ops.identify_sfp(hw); | ||
4703 | if (ret == IXGBE_ERR_SFP_NOT_PRESENT) | ||
4704 | goto reschedule; | ||
4705 | ret = hw->phy.ops.reset(hw); | ||
4706 | if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) { | ||
4707 | e_dev_err("failed to initialize because an unsupported " | ||
4708 | "SFP+ module type was detected.\n"); | ||
4709 | e_dev_err("Reload the driver after installing a " | ||
4710 | "supported module.\n"); | ||
4711 | unregister_netdev(adapter->netdev); | ||
4712 | } else { | ||
4713 | e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type); | ||
4714 | } | ||
4715 | /* don't need this routine any more */ | ||
4716 | clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); | ||
4717 | } | ||
4718 | return; | ||
4719 | reschedule: | ||
4720 | if (test_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state)) | ||
4721 | mod_timer(&adapter->sfp_timer, | ||
4722 | round_jiffies(jiffies + (2 * HZ))); | ||
4723 | } | ||
4724 | |||
4725 | /** | ||
4726 | * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter) | 5173 | * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter) |
4727 | * @adapter: board private structure to initialize | 5174 | * @adapter: board private structure to initialize |
4728 | * | 5175 | * |
@@ -4740,6 +5187,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
4740 | int j; | 5187 | int j; |
4741 | struct tc_configuration *tc; | 5188 | struct tc_configuration *tc; |
4742 | #endif | 5189 | #endif |
5190 | int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN; | ||
4743 | 5191 | ||
4744 | /* PCI config space info */ | 5192 | /* PCI config space info */ |
4745 | 5193 | ||
@@ -4754,28 +5202,26 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
4754 | adapter->ring_feature[RING_F_RSS].indices = rss; | 5202 | adapter->ring_feature[RING_F_RSS].indices = rss; |
4755 | adapter->flags |= IXGBE_FLAG_RSS_ENABLED; | 5203 | adapter->flags |= IXGBE_FLAG_RSS_ENABLED; |
4756 | adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES; | 5204 | adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES; |
4757 | if (hw->mac.type == ixgbe_mac_82598EB) { | 5205 | switch (hw->mac.type) { |
5206 | case ixgbe_mac_82598EB: | ||
4758 | if (hw->device_id == IXGBE_DEV_ID_82598AT) | 5207 | if (hw->device_id == IXGBE_DEV_ID_82598AT) |
4759 | adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; | 5208 | adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; |
4760 | adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; | 5209 | adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; |
4761 | } else if (hw->mac.type == ixgbe_mac_82599EB) { | 5210 | break; |
5211 | case ixgbe_mac_82599EB: | ||
5212 | case ixgbe_mac_X540: | ||
4762 | adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; | 5213 | adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; |
4763 | adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; | 5214 | adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; |
4764 | adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; | 5215 | adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; |
4765 | if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) | 5216 | if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) |
4766 | adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; | 5217 | adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; |
4767 | if (dev->features & NETIF_F_NTUPLE) { | 5218 | /* n-tuple support exists, always init our spinlock */ |
4768 | /* Flow Director perfect filter enabled */ | 5219 | spin_lock_init(&adapter->fdir_perfect_lock); |
4769 | adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; | 5220 | /* Flow Director hash filters enabled */ |
4770 | adapter->atr_sample_rate = 0; | 5221 | adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; |
4771 | spin_lock_init(&adapter->fdir_perfect_lock); | 5222 | adapter->atr_sample_rate = 20; |
4772 | } else { | ||
4773 | /* Flow Director hash filters enabled */ | ||
4774 | adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; | ||
4775 | adapter->atr_sample_rate = 20; | ||
4776 | } | ||
4777 | adapter->ring_feature[RING_F_FDIR].indices = | 5223 | adapter->ring_feature[RING_F_FDIR].indices = |
4778 | IXGBE_MAX_FDIR_INDICES; | 5224 | IXGBE_MAX_FDIR_INDICES; |
4779 | adapter->fdir_pballoc = 0; | 5225 | adapter->fdir_pballoc = 0; |
4780 | #ifdef IXGBE_FCOE | 5226 | #ifdef IXGBE_FCOE |
4781 | adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; | 5227 | adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; |
@@ -4787,6 +5233,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
4787 | adapter->fcoe.up = IXGBE_FCOE_DEFTC; | 5233 | adapter->fcoe.up = IXGBE_FCOE_DEFTC; |
4788 | #endif | 5234 | #endif |
4789 | #endif /* IXGBE_FCOE */ | 5235 | #endif /* IXGBE_FCOE */ |
5236 | break; | ||
5237 | default: | ||
5238 | break; | ||
4790 | } | 5239 | } |
4791 | 5240 | ||
4792 | #ifdef CONFIG_IXGBE_DCB | 5241 | #ifdef CONFIG_IXGBE_DCB |
@@ -4803,10 +5252,10 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
4803 | adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100; | 5252 | adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100; |
4804 | adapter->dcb_cfg.rx_pba_cfg = pba_equal; | 5253 | adapter->dcb_cfg.rx_pba_cfg = pba_equal; |
4805 | adapter->dcb_cfg.pfc_mode_enable = false; | 5254 | adapter->dcb_cfg.pfc_mode_enable = false; |
4806 | adapter->dcb_cfg.round_robin_enable = false; | ||
4807 | adapter->dcb_set_bitmap = 0x00; | 5255 | adapter->dcb_set_bitmap = 0x00; |
5256 | adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; | ||
4808 | ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg, | 5257 | ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg, |
4809 | adapter->ring_feature[RING_F_DCB].indices); | 5258 | MAX_TRAFFIC_CLASS); |
4810 | 5259 | ||
4811 | #endif | 5260 | #endif |
4812 | 5261 | ||
@@ -4816,8 +5265,8 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
4816 | #ifdef CONFIG_DCB | 5265 | #ifdef CONFIG_DCB |
4817 | adapter->last_lfc_mode = hw->fc.current_mode; | 5266 | adapter->last_lfc_mode = hw->fc.current_mode; |
4818 | #endif | 5267 | #endif |
4819 | hw->fc.high_water = IXGBE_DEFAULT_FCRTH; | 5268 | hw->fc.high_water = FC_HIGH_WATER(max_frame); |
4820 | hw->fc.low_water = IXGBE_DEFAULT_FCRTL; | 5269 | hw->fc.low_water = FC_LOW_WATER(max_frame); |
4821 | hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; | 5270 | hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; |
4822 | hw->fc.send_xon = true; | 5271 | hw->fc.send_xon = true; |
4823 | hw->fc.disable_fc_autoneg = false; | 5272 | hw->fc.disable_fc_autoneg = false; |
@@ -4855,30 +5304,27 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
4855 | 5304 | ||
4856 | /** | 5305 | /** |
4857 | * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors) | 5306 | * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors) |
4858 | * @adapter: board private structure | ||
4859 | * @tx_ring: tx descriptor ring (for a specific queue) to setup | 5307 | * @tx_ring: tx descriptor ring (for a specific queue) to setup |
4860 | * | 5308 | * |
4861 | * Return 0 on success, negative on failure | 5309 | * Return 0 on success, negative on failure |
4862 | **/ | 5310 | **/ |
4863 | int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, | 5311 | int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) |
4864 | struct ixgbe_ring *tx_ring) | ||
4865 | { | 5312 | { |
4866 | struct pci_dev *pdev = adapter->pdev; | 5313 | struct device *dev = tx_ring->dev; |
4867 | int size; | 5314 | int size; |
4868 | 5315 | ||
4869 | size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; | 5316 | size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; |
4870 | tx_ring->tx_buffer_info = vmalloc_node(size, tx_ring->numa_node); | 5317 | tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node); |
4871 | if (!tx_ring->tx_buffer_info) | 5318 | if (!tx_ring->tx_buffer_info) |
4872 | tx_ring->tx_buffer_info = vmalloc(size); | 5319 | tx_ring->tx_buffer_info = vzalloc(size); |
4873 | if (!tx_ring->tx_buffer_info) | 5320 | if (!tx_ring->tx_buffer_info) |
4874 | goto err; | 5321 | goto err; |
4875 | memset(tx_ring->tx_buffer_info, 0, size); | ||
4876 | 5322 | ||
4877 | /* round up to nearest 4K */ | 5323 | /* round up to nearest 4K */ |
4878 | tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); | 5324 | tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); |
4879 | tx_ring->size = ALIGN(tx_ring->size, 4096); | 5325 | tx_ring->size = ALIGN(tx_ring->size, 4096); |
4880 | 5326 | ||
4881 | tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, | 5327 | tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, |
4882 | &tx_ring->dma, GFP_KERNEL); | 5328 | &tx_ring->dma, GFP_KERNEL); |
4883 | if (!tx_ring->desc) | 5329 | if (!tx_ring->desc) |
4884 | goto err; | 5330 | goto err; |
@@ -4891,7 +5337,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, | |||
4891 | err: | 5337 | err: |
4892 | vfree(tx_ring->tx_buffer_info); | 5338 | vfree(tx_ring->tx_buffer_info); |
4893 | tx_ring->tx_buffer_info = NULL; | 5339 | tx_ring->tx_buffer_info = NULL; |
4894 | e_err(probe, "Unable to allocate memory for the Tx descriptor ring\n"); | 5340 | dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); |
4895 | return -ENOMEM; | 5341 | return -ENOMEM; |
4896 | } | 5342 | } |
4897 | 5343 | ||
@@ -4910,7 +5356,7 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) | |||
4910 | int i, err = 0; | 5356 | int i, err = 0; |
4911 | 5357 | ||
4912 | for (i = 0; i < adapter->num_tx_queues; i++) { | 5358 | for (i = 0; i < adapter->num_tx_queues; i++) { |
4913 | err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]); | 5359 | err = ixgbe_setup_tx_resources(adapter->tx_ring[i]); |
4914 | if (!err) | 5360 | if (!err) |
4915 | continue; | 5361 | continue; |
4916 | e_err(probe, "Allocation for Tx Queue %u failed\n", i); | 5362 | e_err(probe, "Allocation for Tx Queue %u failed\n", i); |
@@ -4922,48 +5368,40 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) | |||
4922 | 5368 | ||
4923 | /** | 5369 | /** |
4924 | * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors) | 5370 | * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors) |
4925 | * @adapter: board private structure | ||
4926 | * @rx_ring: rx descriptor ring (for a specific queue) to setup | 5371 | * @rx_ring: rx descriptor ring (for a specific queue) to setup |
4927 | * | 5372 | * |
4928 | * Returns 0 on success, negative on failure | 5373 | * Returns 0 on success, negative on failure |
4929 | **/ | 5374 | **/ |
4930 | int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, | 5375 | int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring) |
4931 | struct ixgbe_ring *rx_ring) | ||
4932 | { | 5376 | { |
4933 | struct pci_dev *pdev = adapter->pdev; | 5377 | struct device *dev = rx_ring->dev; |
4934 | int size; | 5378 | int size; |
4935 | 5379 | ||
4936 | size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; | 5380 | size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; |
4937 | rx_ring->rx_buffer_info = vmalloc_node(size, adapter->node); | 5381 | rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node); |
4938 | if (!rx_ring->rx_buffer_info) | 5382 | if (!rx_ring->rx_buffer_info) |
4939 | rx_ring->rx_buffer_info = vmalloc(size); | 5383 | rx_ring->rx_buffer_info = vzalloc(size); |
4940 | if (!rx_ring->rx_buffer_info) { | 5384 | if (!rx_ring->rx_buffer_info) |
4941 | e_err(probe, "vmalloc allocation failed for the Rx " | 5385 | goto err; |
4942 | "descriptor ring\n"); | ||
4943 | goto alloc_failed; | ||
4944 | } | ||
4945 | memset(rx_ring->rx_buffer_info, 0, size); | ||
4946 | 5386 | ||
4947 | /* Round up to nearest 4K */ | 5387 | /* Round up to nearest 4K */ |
4948 | rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); | 5388 | rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); |
4949 | rx_ring->size = ALIGN(rx_ring->size, 4096); | 5389 | rx_ring->size = ALIGN(rx_ring->size, 4096); |
4950 | 5390 | ||
4951 | rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, | 5391 | rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, |
4952 | &rx_ring->dma, GFP_KERNEL); | 5392 | &rx_ring->dma, GFP_KERNEL); |
4953 | 5393 | ||
4954 | if (!rx_ring->desc) { | 5394 | if (!rx_ring->desc) |
4955 | e_err(probe, "Memory allocation failed for the Rx " | 5395 | goto err; |
4956 | "descriptor ring\n"); | ||
4957 | vfree(rx_ring->rx_buffer_info); | ||
4958 | goto alloc_failed; | ||
4959 | } | ||
4960 | 5396 | ||
4961 | rx_ring->next_to_clean = 0; | 5397 | rx_ring->next_to_clean = 0; |
4962 | rx_ring->next_to_use = 0; | 5398 | rx_ring->next_to_use = 0; |
4963 | 5399 | ||
4964 | return 0; | 5400 | return 0; |
4965 | 5401 | err: | |
4966 | alloc_failed: | 5402 | vfree(rx_ring->rx_buffer_info); |
5403 | rx_ring->rx_buffer_info = NULL; | ||
5404 | dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); | ||
4967 | return -ENOMEM; | 5405 | return -ENOMEM; |
4968 | } | 5406 | } |
4969 | 5407 | ||
@@ -4977,13 +5415,12 @@ alloc_failed: | |||
4977 | * | 5415 | * |
4978 | * Return 0 on success, negative on failure | 5416 | * Return 0 on success, negative on failure |
4979 | **/ | 5417 | **/ |
4980 | |||
4981 | static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) | 5418 | static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) |
4982 | { | 5419 | { |
4983 | int i, err = 0; | 5420 | int i, err = 0; |
4984 | 5421 | ||
4985 | for (i = 0; i < adapter->num_rx_queues; i++) { | 5422 | for (i = 0; i < adapter->num_rx_queues; i++) { |
4986 | err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]); | 5423 | err = ixgbe_setup_rx_resources(adapter->rx_ring[i]); |
4987 | if (!err) | 5424 | if (!err) |
4988 | continue; | 5425 | continue; |
4989 | e_err(probe, "Allocation for Rx Queue %u failed\n", i); | 5426 | e_err(probe, "Allocation for Rx Queue %u failed\n", i); |
@@ -4995,23 +5432,23 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) | |||
4995 | 5432 | ||
4996 | /** | 5433 | /** |
4997 | * ixgbe_free_tx_resources - Free Tx Resources per Queue | 5434 | * ixgbe_free_tx_resources - Free Tx Resources per Queue |
4998 | * @adapter: board private structure | ||
4999 | * @tx_ring: Tx descriptor ring for a specific queue | 5435 | * @tx_ring: Tx descriptor ring for a specific queue |
5000 | * | 5436 | * |
5001 | * Free all transmit software resources | 5437 | * Free all transmit software resources |
5002 | **/ | 5438 | **/ |
5003 | void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter, | 5439 | void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring) |
5004 | struct ixgbe_ring *tx_ring) | ||
5005 | { | 5440 | { |
5006 | struct pci_dev *pdev = adapter->pdev; | 5441 | ixgbe_clean_tx_ring(tx_ring); |
5007 | |||
5008 | ixgbe_clean_tx_ring(adapter, tx_ring); | ||
5009 | 5442 | ||
5010 | vfree(tx_ring->tx_buffer_info); | 5443 | vfree(tx_ring->tx_buffer_info); |
5011 | tx_ring->tx_buffer_info = NULL; | 5444 | tx_ring->tx_buffer_info = NULL; |
5012 | 5445 | ||
5013 | dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, | 5446 | /* if not set, then don't free */ |
5014 | tx_ring->dma); | 5447 | if (!tx_ring->desc) |
5448 | return; | ||
5449 | |||
5450 | dma_free_coherent(tx_ring->dev, tx_ring->size, | ||
5451 | tx_ring->desc, tx_ring->dma); | ||
5015 | 5452 | ||
5016 | tx_ring->desc = NULL; | 5453 | tx_ring->desc = NULL; |
5017 | } | 5454 | } |
@@ -5028,28 +5465,28 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter) | |||
5028 | 5465 | ||
5029 | for (i = 0; i < adapter->num_tx_queues; i++) | 5466 | for (i = 0; i < adapter->num_tx_queues; i++) |
5030 | if (adapter->tx_ring[i]->desc) | 5467 | if (adapter->tx_ring[i]->desc) |
5031 | ixgbe_free_tx_resources(adapter, adapter->tx_ring[i]); | 5468 | ixgbe_free_tx_resources(adapter->tx_ring[i]); |
5032 | } | 5469 | } |
5033 | 5470 | ||
5034 | /** | 5471 | /** |
5035 | * ixgbe_free_rx_resources - Free Rx Resources | 5472 | * ixgbe_free_rx_resources - Free Rx Resources |
5036 | * @adapter: board private structure | ||
5037 | * @rx_ring: ring to clean the resources from | 5473 | * @rx_ring: ring to clean the resources from |
5038 | * | 5474 | * |
5039 | * Free all receive software resources | 5475 | * Free all receive software resources |
5040 | **/ | 5476 | **/ |
5041 | void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter, | 5477 | void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring) |
5042 | struct ixgbe_ring *rx_ring) | ||
5043 | { | 5478 | { |
5044 | struct pci_dev *pdev = adapter->pdev; | 5479 | ixgbe_clean_rx_ring(rx_ring); |
5045 | |||
5046 | ixgbe_clean_rx_ring(adapter, rx_ring); | ||
5047 | 5480 | ||
5048 | vfree(rx_ring->rx_buffer_info); | 5481 | vfree(rx_ring->rx_buffer_info); |
5049 | rx_ring->rx_buffer_info = NULL; | 5482 | rx_ring->rx_buffer_info = NULL; |
5050 | 5483 | ||
5051 | dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, | 5484 | /* if not set, then don't free */ |
5052 | rx_ring->dma); | 5485 | if (!rx_ring->desc) |
5486 | return; | ||
5487 | |||
5488 | dma_free_coherent(rx_ring->dev, rx_ring->size, | ||
5489 | rx_ring->desc, rx_ring->dma); | ||
5053 | 5490 | ||
5054 | rx_ring->desc = NULL; | 5491 | rx_ring->desc = NULL; |
5055 | } | 5492 | } |
@@ -5066,7 +5503,7 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) | |||
5066 | 5503 | ||
5067 | for (i = 0; i < adapter->num_rx_queues; i++) | 5504 | for (i = 0; i < adapter->num_rx_queues; i++) |
5068 | if (adapter->rx_ring[i]->desc) | 5505 | if (adapter->rx_ring[i]->desc) |
5069 | ixgbe_free_rx_resources(adapter, adapter->rx_ring[i]); | 5506 | ixgbe_free_rx_resources(adapter->rx_ring[i]); |
5070 | } | 5507 | } |
5071 | 5508 | ||
5072 | /** | 5509 | /** |
@@ -5079,16 +5516,26 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) | |||
5079 | static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) | 5516 | static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) |
5080 | { | 5517 | { |
5081 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 5518 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
5519 | struct ixgbe_hw *hw = &adapter->hw; | ||
5082 | int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; | 5520 | int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; |
5083 | 5521 | ||
5084 | /* MTU < 68 is an error and causes problems on some kernels */ | 5522 | /* MTU < 68 is an error and causes problems on some kernels */ |
5085 | if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) | 5523 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED && |
5086 | return -EINVAL; | 5524 | hw->mac.type != ixgbe_mac_X540) { |
5525 | if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE)) | ||
5526 | return -EINVAL; | ||
5527 | } else { | ||
5528 | if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) | ||
5529 | return -EINVAL; | ||
5530 | } | ||
5087 | 5531 | ||
5088 | e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); | 5532 | e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); |
5089 | /* must set new MTU before calling down or up */ | 5533 | /* must set new MTU before calling down or up */ |
5090 | netdev->mtu = new_mtu; | 5534 | netdev->mtu = new_mtu; |
5091 | 5535 | ||
5536 | hw->fc.high_water = FC_HIGH_WATER(max_frame); | ||
5537 | hw->fc.low_water = FC_LOW_WATER(max_frame); | ||
5538 | |||
5092 | if (netif_running(netdev)) | 5539 | if (netif_running(netdev)) |
5093 | ixgbe_reinit_locked(adapter); | 5540 | ixgbe_reinit_locked(adapter); |
5094 | 5541 | ||
@@ -5184,8 +5631,8 @@ static int ixgbe_close(struct net_device *netdev) | |||
5184 | #ifdef CONFIG_PM | 5631 | #ifdef CONFIG_PM |
5185 | static int ixgbe_resume(struct pci_dev *pdev) | 5632 | static int ixgbe_resume(struct pci_dev *pdev) |
5186 | { | 5633 | { |
5187 | struct net_device *netdev = pci_get_drvdata(pdev); | 5634 | struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); |
5188 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 5635 | struct net_device *netdev = adapter->netdev; |
5189 | u32 err; | 5636 | u32 err; |
5190 | 5637 | ||
5191 | pci_set_power_state(pdev, PCI_D0); | 5638 | pci_set_power_state(pdev, PCI_D0); |
@@ -5216,7 +5663,7 @@ static int ixgbe_resume(struct pci_dev *pdev) | |||
5216 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); | 5663 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); |
5217 | 5664 | ||
5218 | if (netif_running(netdev)) { | 5665 | if (netif_running(netdev)) { |
5219 | err = ixgbe_open(adapter->netdev); | 5666 | err = ixgbe_open(netdev); |
5220 | if (err) | 5667 | if (err) |
5221 | return err; | 5668 | return err; |
5222 | } | 5669 | } |
@@ -5229,8 +5676,8 @@ static int ixgbe_resume(struct pci_dev *pdev) | |||
5229 | 5676 | ||
5230 | static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) | 5677 | static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) |
5231 | { | 5678 | { |
5232 | struct net_device *netdev = pci_get_drvdata(pdev); | 5679 | struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); |
5233 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 5680 | struct net_device *netdev = adapter->netdev; |
5234 | struct ixgbe_hw *hw = &adapter->hw; | 5681 | struct ixgbe_hw *hw = &adapter->hw; |
5235 | u32 ctrl, fctrl; | 5682 | u32 ctrl, fctrl; |
5236 | u32 wufc = adapter->wol; | 5683 | u32 wufc = adapter->wol; |
@@ -5247,6 +5694,12 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) | |||
5247 | ixgbe_free_all_rx_resources(adapter); | 5694 | ixgbe_free_all_rx_resources(adapter); |
5248 | } | 5695 | } |
5249 | 5696 | ||
5697 | ixgbe_clear_interrupt_scheme(adapter); | ||
5698 | #ifdef CONFIG_DCB | ||
5699 | kfree(adapter->ixgbe_ieee_pfc); | ||
5700 | kfree(adapter->ixgbe_ieee_ets); | ||
5701 | #endif | ||
5702 | |||
5250 | #ifdef CONFIG_PM | 5703 | #ifdef CONFIG_PM |
5251 | retval = pci_save_state(pdev); | 5704 | retval = pci_save_state(pdev); |
5252 | if (retval) | 5705 | if (retval) |
@@ -5273,15 +5726,20 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) | |||
5273 | IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); | 5726 | IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); |
5274 | } | 5727 | } |
5275 | 5728 | ||
5276 | if (wufc && hw->mac.type == ixgbe_mac_82599EB) | 5729 | switch (hw->mac.type) { |
5277 | pci_wake_from_d3(pdev, true); | 5730 | case ixgbe_mac_82598EB: |
5278 | else | ||
5279 | pci_wake_from_d3(pdev, false); | 5731 | pci_wake_from_d3(pdev, false); |
5732 | break; | ||
5733 | case ixgbe_mac_82599EB: | ||
5734 | case ixgbe_mac_X540: | ||
5735 | pci_wake_from_d3(pdev, !!wufc); | ||
5736 | break; | ||
5737 | default: | ||
5738 | break; | ||
5739 | } | ||
5280 | 5740 | ||
5281 | *enable_wake = !!wufc; | 5741 | *enable_wake = !!wufc; |
5282 | 5742 | ||
5283 | ixgbe_clear_interrupt_scheme(adapter); | ||
5284 | |||
5285 | ixgbe_release_hw_control(adapter); | 5743 | ixgbe_release_hw_control(adapter); |
5286 | 5744 | ||
5287 | pci_disable_device(pdev); | 5745 | pci_disable_device(pdev); |
@@ -5330,9 +5788,12 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) | |||
5330 | { | 5788 | { |
5331 | struct net_device *netdev = adapter->netdev; | 5789 | struct net_device *netdev = adapter->netdev; |
5332 | struct ixgbe_hw *hw = &adapter->hw; | 5790 | struct ixgbe_hw *hw = &adapter->hw; |
5791 | struct ixgbe_hw_stats *hwstats = &adapter->stats; | ||
5333 | u64 total_mpc = 0; | 5792 | u64 total_mpc = 0; |
5334 | u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot; | 5793 | u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot; |
5335 | u64 non_eop_descs = 0, restart_queue = 0; | 5794 | u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0; |
5795 | u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0; | ||
5796 | u64 bytes = 0, packets = 0; | ||
5336 | 5797 | ||
5337 | if (test_bit(__IXGBE_DOWN, &adapter->state) || | 5798 | if (test_bit(__IXGBE_DOWN, &adapter->state) || |
5338 | test_bit(__IXGBE_RESETTING, &adapter->state)) | 5799 | test_bit(__IXGBE_RESETTING, &adapter->state)) |
@@ -5343,158 +5804,227 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) | |||
5343 | u64 rsc_flush = 0; | 5804 | u64 rsc_flush = 0; |
5344 | for (i = 0; i < 16; i++) | 5805 | for (i = 0; i < 16; i++) |
5345 | adapter->hw_rx_no_dma_resources += | 5806 | adapter->hw_rx_no_dma_resources += |
5346 | IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); | 5807 | IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); |
5347 | for (i = 0; i < adapter->num_rx_queues; i++) { | 5808 | for (i = 0; i < adapter->num_rx_queues; i++) { |
5348 | rsc_count += adapter->rx_ring[i]->rsc_count; | 5809 | rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count; |
5349 | rsc_flush += adapter->rx_ring[i]->rsc_flush; | 5810 | rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush; |
5350 | } | 5811 | } |
5351 | adapter->rsc_total_count = rsc_count; | 5812 | adapter->rsc_total_count = rsc_count; |
5352 | adapter->rsc_total_flush = rsc_flush; | 5813 | adapter->rsc_total_flush = rsc_flush; |
5353 | } | 5814 | } |
5354 | 5815 | ||
5816 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
5817 | struct ixgbe_ring *rx_ring = adapter->rx_ring[i]; | ||
5818 | non_eop_descs += rx_ring->rx_stats.non_eop_descs; | ||
5819 | alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; | ||
5820 | alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; | ||
5821 | bytes += rx_ring->stats.bytes; | ||
5822 | packets += rx_ring->stats.packets; | ||
5823 | } | ||
5824 | adapter->non_eop_descs = non_eop_descs; | ||
5825 | adapter->alloc_rx_page_failed = alloc_rx_page_failed; | ||
5826 | adapter->alloc_rx_buff_failed = alloc_rx_buff_failed; | ||
5827 | netdev->stats.rx_bytes = bytes; | ||
5828 | netdev->stats.rx_packets = packets; | ||
5829 | |||
5830 | bytes = 0; | ||
5831 | packets = 0; | ||
5355 | /* gather some stats to the adapter struct that are per queue */ | 5832 | /* gather some stats to the adapter struct that are per queue */ |
5356 | for (i = 0; i < adapter->num_tx_queues; i++) | 5833 | for (i = 0; i < adapter->num_tx_queues; i++) { |
5357 | restart_queue += adapter->tx_ring[i]->restart_queue; | 5834 | struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; |
5835 | restart_queue += tx_ring->tx_stats.restart_queue; | ||
5836 | tx_busy += tx_ring->tx_stats.tx_busy; | ||
5837 | bytes += tx_ring->stats.bytes; | ||
5838 | packets += tx_ring->stats.packets; | ||
5839 | } | ||
5358 | adapter->restart_queue = restart_queue; | 5840 | adapter->restart_queue = restart_queue; |
5841 | adapter->tx_busy = tx_busy; | ||
5842 | netdev->stats.tx_bytes = bytes; | ||
5843 | netdev->stats.tx_packets = packets; | ||
5359 | 5844 | ||
5360 | for (i = 0; i < adapter->num_rx_queues; i++) | 5845 | hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); |
5361 | non_eop_descs += adapter->rx_ring[i]->non_eop_descs; | ||
5362 | adapter->non_eop_descs = non_eop_descs; | ||
5363 | |||
5364 | adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); | ||
5365 | for (i = 0; i < 8; i++) { | 5846 | for (i = 0; i < 8; i++) { |
5366 | /* for packet buffers not used, the register should read 0 */ | 5847 | /* for packet buffers not used, the register should read 0 */ |
5367 | mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i)); | 5848 | mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i)); |
5368 | missed_rx += mpc; | 5849 | missed_rx += mpc; |
5369 | adapter->stats.mpc[i] += mpc; | 5850 | hwstats->mpc[i] += mpc; |
5370 | total_mpc += adapter->stats.mpc[i]; | 5851 | total_mpc += hwstats->mpc[i]; |
5371 | if (hw->mac.type == ixgbe_mac_82598EB) | 5852 | if (hw->mac.type == ixgbe_mac_82598EB) |
5372 | adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i)); | 5853 | hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i)); |
5373 | adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); | 5854 | hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); |
5374 | adapter->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i)); | 5855 | hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i)); |
5375 | adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); | 5856 | hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); |
5376 | adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i)); | 5857 | hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i)); |
5377 | if (hw->mac.type == ixgbe_mac_82599EB) { | 5858 | switch (hw->mac.type) { |
5378 | adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw, | 5859 | case ixgbe_mac_82598EB: |
5379 | IXGBE_PXONRXCNT(i)); | 5860 | hwstats->pxonrxc[i] += |
5380 | adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw, | 5861 | IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); |
5381 | IXGBE_PXOFFRXCNT(i)); | 5862 | break; |
5382 | adapter->stats.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); | 5863 | case ixgbe_mac_82599EB: |
5383 | } else { | 5864 | case ixgbe_mac_X540: |
5384 | adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw, | 5865 | hwstats->pxonrxc[i] += |
5385 | IXGBE_PXONRXC(i)); | 5866 | IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); |
5386 | adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw, | 5867 | break; |
5387 | IXGBE_PXOFFRXC(i)); | 5868 | default: |
5869 | break; | ||
5388 | } | 5870 | } |
5389 | adapter->stats.pxontxc[i] += IXGBE_READ_REG(hw, | 5871 | hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); |
5390 | IXGBE_PXONTXC(i)); | 5872 | hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); |
5391 | adapter->stats.pxofftxc[i] += IXGBE_READ_REG(hw, | ||
5392 | IXGBE_PXOFFTXC(i)); | ||
5393 | } | 5873 | } |
5394 | adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); | 5874 | hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); |
5395 | /* work around hardware counting issue */ | 5875 | /* work around hardware counting issue */ |
5396 | adapter->stats.gprc -= missed_rx; | 5876 | hwstats->gprc -= missed_rx; |
5877 | |||
5878 | ixgbe_update_xoff_received(adapter); | ||
5397 | 5879 | ||
5398 | /* 82598 hardware only has a 32 bit counter in the high register */ | 5880 | /* 82598 hardware only has a 32 bit counter in the high register */ |
5399 | if (hw->mac.type == ixgbe_mac_82599EB) { | 5881 | switch (hw->mac.type) { |
5400 | u64 tmp; | 5882 | case ixgbe_mac_82598EB: |
5401 | adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); | 5883 | hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); |
5402 | tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF; /* 4 high bits of GORC */ | 5884 | hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); |
5403 | adapter->stats.gorc += (tmp << 32); | 5885 | hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); |
5404 | adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); | 5886 | hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); |
5405 | tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF; /* 4 high bits of GOTC */ | 5887 | break; |
5406 | adapter->stats.gotc += (tmp << 32); | 5888 | case ixgbe_mac_X540: |
5407 | adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL); | 5889 | /* OS2BMC stats are X540 only*/ |
5890 | hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC); | ||
5891 | hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC); | ||
5892 | hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC); | ||
5893 | hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC); | ||
5894 | case ixgbe_mac_82599EB: | ||
5895 | hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); | ||
5896 | IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */ | ||
5897 | hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); | ||
5898 | IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */ | ||
5899 | hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); | ||
5408 | IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ | 5900 | IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ |
5409 | adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); | 5901 | hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); |
5410 | adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); | 5902 | hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); |
5411 | adapter->stats.fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); | 5903 | hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); |
5412 | adapter->stats.fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); | ||
5413 | #ifdef IXGBE_FCOE | 5904 | #ifdef IXGBE_FCOE |
5414 | adapter->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); | 5905 | hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); |
5415 | adapter->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); | 5906 | hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); |
5416 | adapter->stats.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); | 5907 | hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); |
5417 | adapter->stats.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); | 5908 | hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); |
5418 | adapter->stats.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); | 5909 | hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); |
5419 | adapter->stats.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); | 5910 | hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); |
5420 | #endif /* IXGBE_FCOE */ | 5911 | #endif /* IXGBE_FCOE */ |
5421 | } else { | 5912 | break; |
5422 | adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); | 5913 | default: |
5423 | adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); | 5914 | break; |
5424 | adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); | ||
5425 | adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); | ||
5426 | adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH); | ||
5427 | } | 5915 | } |
5428 | bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); | 5916 | bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); |
5429 | adapter->stats.bprc += bprc; | 5917 | hwstats->bprc += bprc; |
5430 | adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); | 5918 | hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); |
5431 | if (hw->mac.type == ixgbe_mac_82598EB) | 5919 | if (hw->mac.type == ixgbe_mac_82598EB) |
5432 | adapter->stats.mprc -= bprc; | 5920 | hwstats->mprc -= bprc; |
5433 | adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC); | 5921 | hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); |
5434 | adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); | 5922 | hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); |
5435 | adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); | 5923 | hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); |
5436 | adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); | 5924 | hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); |
5437 | adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); | 5925 | hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); |
5438 | adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); | 5926 | hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); |
5439 | adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); | 5927 | hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); |
5440 | adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); | 5928 | hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); |
5441 | lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); | 5929 | lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); |
5442 | adapter->stats.lxontxc += lxon; | 5930 | hwstats->lxontxc += lxon; |
5443 | lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); | 5931 | lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); |
5444 | adapter->stats.lxofftxc += lxoff; | 5932 | hwstats->lxofftxc += lxoff; |
5445 | adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC); | 5933 | hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); |
5446 | adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); | 5934 | hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); |
5447 | adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); | 5935 | hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); |
5448 | /* | 5936 | /* |
5449 | * 82598 errata - tx of flow control packets is included in tx counters | 5937 | * 82598 errata - tx of flow control packets is included in tx counters |
5450 | */ | 5938 | */ |
5451 | xon_off_tot = lxon + lxoff; | 5939 | xon_off_tot = lxon + lxoff; |
5452 | adapter->stats.gptc -= xon_off_tot; | 5940 | hwstats->gptc -= xon_off_tot; |
5453 | adapter->stats.mptc -= xon_off_tot; | 5941 | hwstats->mptc -= xon_off_tot; |
5454 | adapter->stats.gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN)); | 5942 | hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN)); |
5455 | adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC); | 5943 | hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); |
5456 | adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC); | 5944 | hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); |
5457 | adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC); | 5945 | hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); |
5458 | adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR); | 5946 | hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); |
5459 | adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); | 5947 | hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); |
5460 | adapter->stats.ptc64 -= xon_off_tot; | 5948 | hwstats->ptc64 -= xon_off_tot; |
5461 | adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); | 5949 | hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); |
5462 | adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); | 5950 | hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); |
5463 | adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); | 5951 | hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); |
5464 | adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); | 5952 | hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); |
5465 | adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); | 5953 | hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); |
5466 | adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); | 5954 | hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); |
5467 | 5955 | ||
5468 | /* Fill out the OS statistics structure */ | 5956 | /* Fill out the OS statistics structure */ |
5469 | netdev->stats.multicast = adapter->stats.mprc; | 5957 | netdev->stats.multicast = hwstats->mprc; |
5470 | 5958 | ||
5471 | /* Rx Errors */ | 5959 | /* Rx Errors */ |
5472 | netdev->stats.rx_errors = adapter->stats.crcerrs + | 5960 | netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec; |
5473 | adapter->stats.rlec; | ||
5474 | netdev->stats.rx_dropped = 0; | 5961 | netdev->stats.rx_dropped = 0; |
5475 | netdev->stats.rx_length_errors = adapter->stats.rlec; | 5962 | netdev->stats.rx_length_errors = hwstats->rlec; |
5476 | netdev->stats.rx_crc_errors = adapter->stats.crcerrs; | 5963 | netdev->stats.rx_crc_errors = hwstats->crcerrs; |
5477 | netdev->stats.rx_missed_errors = total_mpc; | 5964 | netdev->stats.rx_missed_errors = total_mpc; |
5478 | } | 5965 | } |
5479 | 5966 | ||
5480 | /** | 5967 | /** |
5481 | * ixgbe_watchdog - Timer Call-back | 5968 | * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table |
5482 | * @data: pointer to adapter cast into an unsigned long | 5969 | * @adapter - pointer to the device adapter structure |
5483 | **/ | 5970 | **/ |
5484 | static void ixgbe_watchdog(unsigned long data) | 5971 | static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter) |
5485 | { | 5972 | { |
5486 | struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; | ||
5487 | struct ixgbe_hw *hw = &adapter->hw; | 5973 | struct ixgbe_hw *hw = &adapter->hw; |
5488 | u64 eics = 0; | ||
5489 | int i; | 5974 | int i; |
5490 | 5975 | ||
5491 | /* | 5976 | if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT)) |
5492 | * Do the watchdog outside of interrupt context due to the lovely | 5977 | return; |
5493 | * delays that some of the newer hardware requires | 5978 | |
5494 | */ | 5979 | adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT; |
5495 | 5980 | ||
5981 | /* if interface is down do nothing */ | ||
5496 | if (test_bit(__IXGBE_DOWN, &adapter->state)) | 5982 | if (test_bit(__IXGBE_DOWN, &adapter->state)) |
5497 | goto watchdog_short_circuit; | 5983 | return; |
5984 | |||
5985 | /* do nothing if we are not using signature filters */ | ||
5986 | if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) | ||
5987 | return; | ||
5988 | |||
5989 | adapter->fdir_overflow++; | ||
5990 | |||
5991 | if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { | ||
5992 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
5993 | set_bit(__IXGBE_TX_FDIR_INIT_DONE, | ||
5994 | &(adapter->tx_ring[i]->state)); | ||
5995 | /* re-enable flow director interrupts */ | ||
5996 | IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR); | ||
5997 | } else { | ||
5998 | e_err(probe, "failed to finish FDIR re-initialization, " | ||
5999 | "ignored adding FDIR ATR filters\n"); | ||
6000 | } | ||
6001 | } | ||
6002 | |||
6003 | /** | ||
6004 | * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts | ||
6005 | * @adapter - pointer to the device adapter structure | ||
6006 | * | ||
6007 | * This function serves two purposes. First it strobes the interrupt lines | ||
6008 | * in order to make certain interrupts are occuring. Secondly it sets the | ||
6009 | * bits needed to check for TX hangs. As a result we should immediately | ||
6010 | * determine if a hang has occured. | ||
6011 | */ | ||
6012 | static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) | ||
6013 | { | ||
6014 | struct ixgbe_hw *hw = &adapter->hw; | ||
6015 | u64 eics = 0; | ||
6016 | int i; | ||
6017 | |||
6018 | /* If we're down or resetting, just bail */ | ||
6019 | if (test_bit(__IXGBE_DOWN, &adapter->state) || | ||
6020 | test_bit(__IXGBE_RESETTING, &adapter->state)) | ||
6021 | return; | ||
6022 | |||
6023 | /* Force detection of hung controller */ | ||
6024 | if (netif_carrier_ok(adapter->netdev)) { | ||
6025 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
6026 | set_check_for_tx_hang(adapter->tx_ring[i]); | ||
6027 | } | ||
5498 | 6028 | ||
5499 | if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { | 6029 | if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { |
5500 | /* | 6030 | /* |
@@ -5504,200 +6034,157 @@ static void ixgbe_watchdog(unsigned long data) | |||
5504 | */ | 6034 | */ |
5505 | IXGBE_WRITE_REG(hw, IXGBE_EICS, | 6035 | IXGBE_WRITE_REG(hw, IXGBE_EICS, |
5506 | (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER)); | 6036 | (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER)); |
5507 | goto watchdog_reschedule; | 6037 | } else { |
5508 | } | 6038 | /* get one bit for every active tx/rx interrupt vector */ |
5509 | 6039 | for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { | |
5510 | /* get one bit for every active tx/rx interrupt vector */ | 6040 | struct ixgbe_q_vector *qv = adapter->q_vector[i]; |
5511 | for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { | 6041 | if (qv->rxr_count || qv->txr_count) |
5512 | struct ixgbe_q_vector *qv = adapter->q_vector[i]; | 6042 | eics |= ((u64)1 << i); |
5513 | if (qv->rxr_count || qv->txr_count) | 6043 | } |
5514 | eics |= ((u64)1 << i); | ||
5515 | } | 6044 | } |
5516 | 6045 | ||
5517 | /* Cause software interrupt to ensure rx rings are cleaned */ | 6046 | /* Cause software interrupt to ensure rings are cleaned */ |
5518 | ixgbe_irq_rearm_queues(adapter, eics); | 6047 | ixgbe_irq_rearm_queues(adapter, eics); |
5519 | 6048 | ||
5520 | watchdog_reschedule: | ||
5521 | /* Reset the timer */ | ||
5522 | mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ)); | ||
5523 | |||
5524 | watchdog_short_circuit: | ||
5525 | schedule_work(&adapter->watchdog_task); | ||
5526 | } | 6049 | } |
5527 | 6050 | ||
5528 | /** | 6051 | /** |
5529 | * ixgbe_multispeed_fiber_task - worker thread to configure multispeed fiber | 6052 | * ixgbe_watchdog_update_link - update the link status |
5530 | * @work: pointer to work_struct containing our data | 6053 | * @adapter - pointer to the device adapter structure |
6054 | * @link_speed - pointer to a u32 to store the link_speed | ||
5531 | **/ | 6055 | **/ |
5532 | static void ixgbe_multispeed_fiber_task(struct work_struct *work) | 6056 | static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter) |
5533 | { | 6057 | { |
5534 | struct ixgbe_adapter *adapter = container_of(work, | ||
5535 | struct ixgbe_adapter, | ||
5536 | multispeed_fiber_task); | ||
5537 | struct ixgbe_hw *hw = &adapter->hw; | 6058 | struct ixgbe_hw *hw = &adapter->hw; |
5538 | u32 autoneg; | 6059 | u32 link_speed = adapter->link_speed; |
5539 | bool negotiation; | 6060 | bool link_up = adapter->link_up; |
6061 | int i; | ||
5540 | 6062 | ||
5541 | adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK; | 6063 | if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) |
5542 | autoneg = hw->phy.autoneg_advertised; | 6064 | return; |
5543 | if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) | 6065 | |
5544 | hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation); | 6066 | if (hw->mac.ops.check_link) { |
5545 | hw->mac.autotry_restart = false; | 6067 | hw->mac.ops.check_link(hw, &link_speed, &link_up, false); |
5546 | if (hw->mac.ops.setup_link) | 6068 | } else { |
5547 | hw->mac.ops.setup_link(hw, autoneg, negotiation, true); | 6069 | /* always assume link is up, if no check link function */ |
5548 | adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; | 6070 | link_speed = IXGBE_LINK_SPEED_10GB_FULL; |
5549 | adapter->flags &= ~IXGBE_FLAG_IN_SFP_LINK_TASK; | 6071 | link_up = true; |
6072 | } | ||
6073 | if (link_up) { | ||
6074 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | ||
6075 | for (i = 0; i < MAX_TRAFFIC_CLASS; i++) | ||
6076 | hw->mac.ops.fc_enable(hw, i); | ||
6077 | } else { | ||
6078 | hw->mac.ops.fc_enable(hw, 0); | ||
6079 | } | ||
6080 | } | ||
6081 | |||
6082 | if (link_up || | ||
6083 | time_after(jiffies, (adapter->link_check_timeout + | ||
6084 | IXGBE_TRY_LINK_TIMEOUT))) { | ||
6085 | adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; | ||
6086 | IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC); | ||
6087 | IXGBE_WRITE_FLUSH(hw); | ||
6088 | } | ||
6089 | |||
6090 | adapter->link_up = link_up; | ||
6091 | adapter->link_speed = link_speed; | ||
5550 | } | 6092 | } |
5551 | 6093 | ||
5552 | /** | 6094 | /** |
5553 | * ixgbe_sfp_config_module_task - worker thread to configure a new SFP+ module | 6095 | * ixgbe_watchdog_link_is_up - update netif_carrier status and |
5554 | * @work: pointer to work_struct containing our data | 6096 | * print link up message |
6097 | * @adapter - pointer to the device adapter structure | ||
5555 | **/ | 6098 | **/ |
5556 | static void ixgbe_sfp_config_module_task(struct work_struct *work) | 6099 | static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) |
5557 | { | 6100 | { |
5558 | struct ixgbe_adapter *adapter = container_of(work, | 6101 | struct net_device *netdev = adapter->netdev; |
5559 | struct ixgbe_adapter, | ||
5560 | sfp_config_module_task); | ||
5561 | struct ixgbe_hw *hw = &adapter->hw; | 6102 | struct ixgbe_hw *hw = &adapter->hw; |
5562 | u32 err; | 6103 | u32 link_speed = adapter->link_speed; |
6104 | bool flow_rx, flow_tx; | ||
5563 | 6105 | ||
5564 | adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK; | 6106 | /* only continue if link was previously down */ |
6107 | if (netif_carrier_ok(netdev)) | ||
6108 | return; | ||
5565 | 6109 | ||
5566 | /* Time for electrical oscillations to settle down */ | 6110 | adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; |
5567 | msleep(100); | ||
5568 | err = hw->phy.ops.identify_sfp(hw); | ||
5569 | 6111 | ||
5570 | if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { | 6112 | switch (hw->mac.type) { |
5571 | e_dev_err("failed to initialize because an unsupported SFP+ " | 6113 | case ixgbe_mac_82598EB: { |
5572 | "module type was detected.\n"); | 6114 | u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL); |
5573 | e_dev_err("Reload the driver after installing a supported " | 6115 | u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS); |
5574 | "module.\n"); | 6116 | flow_rx = !!(frctl & IXGBE_FCTRL_RFCE); |
5575 | unregister_netdev(adapter->netdev); | 6117 | flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X); |
5576 | return; | 6118 | } |
6119 | break; | ||
6120 | case ixgbe_mac_X540: | ||
6121 | case ixgbe_mac_82599EB: { | ||
6122 | u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); | ||
6123 | u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG); | ||
6124 | flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE); | ||
6125 | flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X); | ||
5577 | } | 6126 | } |
5578 | hw->mac.ops.setup_sfp(hw); | 6127 | break; |
6128 | default: | ||
6129 | flow_tx = false; | ||
6130 | flow_rx = false; | ||
6131 | break; | ||
6132 | } | ||
6133 | e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", | ||
6134 | (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? | ||
6135 | "10 Gbps" : | ||
6136 | (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? | ||
6137 | "1 Gbps" : | ||
6138 | (link_speed == IXGBE_LINK_SPEED_100_FULL ? | ||
6139 | "100 Mbps" : | ||
6140 | "unknown speed"))), | ||
6141 | ((flow_rx && flow_tx) ? "RX/TX" : | ||
6142 | (flow_rx ? "RX" : | ||
6143 | (flow_tx ? "TX" : "None")))); | ||
5579 | 6144 | ||
5580 | if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) | 6145 | netif_carrier_on(netdev); |
5581 | /* This will also work for DA Twinax connections */ | 6146 | #ifdef HAVE_IPLINK_VF_CONFIG |
5582 | schedule_work(&adapter->multispeed_fiber_task); | 6147 | ixgbe_check_vf_rate_limit(adapter); |
5583 | adapter->flags &= ~IXGBE_FLAG_IN_SFP_MOD_TASK; | 6148 | #endif /* HAVE_IPLINK_VF_CONFIG */ |
5584 | } | 6149 | } |
5585 | 6150 | ||
5586 | /** | 6151 | /** |
5587 | * ixgbe_fdir_reinit_task - worker thread to reinit FDIR filter table | 6152 | * ixgbe_watchdog_link_is_down - update netif_carrier status and |
5588 | * @work: pointer to work_struct containing our data | 6153 | * print link down message |
6154 | * @adapter - pointer to the adapter structure | ||
5589 | **/ | 6155 | **/ |
5590 | static void ixgbe_fdir_reinit_task(struct work_struct *work) | 6156 | static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter* adapter) |
5591 | { | 6157 | { |
5592 | struct ixgbe_adapter *adapter = container_of(work, | 6158 | struct net_device *netdev = adapter->netdev; |
5593 | struct ixgbe_adapter, | ||
5594 | fdir_reinit_task); | ||
5595 | struct ixgbe_hw *hw = &adapter->hw; | 6159 | struct ixgbe_hw *hw = &adapter->hw; |
5596 | int i; | ||
5597 | 6160 | ||
5598 | if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { | 6161 | adapter->link_up = false; |
5599 | for (i = 0; i < adapter->num_tx_queues; i++) | 6162 | adapter->link_speed = 0; |
5600 | set_bit(__IXGBE_FDIR_INIT_DONE, | ||
5601 | &(adapter->tx_ring[i]->reinit_state)); | ||
5602 | } else { | ||
5603 | e_err(probe, "failed to finish FDIR re-initialization, " | ||
5604 | "ignored adding FDIR ATR filters\n"); | ||
5605 | } | ||
5606 | /* Done FDIR Re-initialization, enable transmits */ | ||
5607 | netif_tx_start_all_queues(adapter->netdev); | ||
5608 | } | ||
5609 | 6163 | ||
5610 | static DEFINE_MUTEX(ixgbe_watchdog_lock); | 6164 | /* only continue if link was up previously */ |
6165 | if (!netif_carrier_ok(netdev)) | ||
6166 | return; | ||
6167 | |||
6168 | /* poll for SFP+ cable when link is down */ | ||
6169 | if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB) | ||
6170 | adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; | ||
6171 | |||
6172 | e_info(drv, "NIC Link is Down\n"); | ||
6173 | netif_carrier_off(netdev); | ||
6174 | } | ||
5611 | 6175 | ||
5612 | /** | 6176 | /** |
5613 | * ixgbe_watchdog_task - worker thread to bring link up | 6177 | * ixgbe_watchdog_flush_tx - flush queues on link down |
5614 | * @work: pointer to work_struct containing our data | 6178 | * @adapter - pointer to the device adapter structure |
5615 | **/ | 6179 | **/ |
5616 | static void ixgbe_watchdog_task(struct work_struct *work) | 6180 | static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter) |
5617 | { | 6181 | { |
5618 | struct ixgbe_adapter *adapter = container_of(work, | ||
5619 | struct ixgbe_adapter, | ||
5620 | watchdog_task); | ||
5621 | struct net_device *netdev = adapter->netdev; | ||
5622 | struct ixgbe_hw *hw = &adapter->hw; | ||
5623 | u32 link_speed; | ||
5624 | bool link_up; | ||
5625 | int i; | 6182 | int i; |
5626 | struct ixgbe_ring *tx_ring; | ||
5627 | int some_tx_pending = 0; | 6183 | int some_tx_pending = 0; |
5628 | 6184 | ||
5629 | mutex_lock(&ixgbe_watchdog_lock); | 6185 | if (!netif_carrier_ok(adapter->netdev)) { |
5630 | |||
5631 | link_up = adapter->link_up; | ||
5632 | link_speed = adapter->link_speed; | ||
5633 | |||
5634 | if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { | ||
5635 | hw->mac.ops.check_link(hw, &link_speed, &link_up, false); | ||
5636 | if (link_up) { | ||
5637 | #ifdef CONFIG_DCB | ||
5638 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | ||
5639 | for (i = 0; i < MAX_TRAFFIC_CLASS; i++) | ||
5640 | hw->mac.ops.fc_enable(hw, i); | ||
5641 | } else { | ||
5642 | hw->mac.ops.fc_enable(hw, 0); | ||
5643 | } | ||
5644 | #else | ||
5645 | hw->mac.ops.fc_enable(hw, 0); | ||
5646 | #endif | ||
5647 | } | ||
5648 | |||
5649 | if (link_up || | ||
5650 | time_after(jiffies, (adapter->link_check_timeout + | ||
5651 | IXGBE_TRY_LINK_TIMEOUT))) { | ||
5652 | adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; | ||
5653 | IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC); | ||
5654 | } | ||
5655 | adapter->link_up = link_up; | ||
5656 | adapter->link_speed = link_speed; | ||
5657 | } | ||
5658 | |||
5659 | if (link_up) { | ||
5660 | if (!netif_carrier_ok(netdev)) { | ||
5661 | bool flow_rx, flow_tx; | ||
5662 | |||
5663 | if (hw->mac.type == ixgbe_mac_82599EB) { | ||
5664 | u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); | ||
5665 | u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG); | ||
5666 | flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE); | ||
5667 | flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X); | ||
5668 | } else { | ||
5669 | u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL); | ||
5670 | u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS); | ||
5671 | flow_rx = !!(frctl & IXGBE_FCTRL_RFCE); | ||
5672 | flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X); | ||
5673 | } | ||
5674 | |||
5675 | e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", | ||
5676 | (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? | ||
5677 | "10 Gbps" : | ||
5678 | (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? | ||
5679 | "1 Gbps" : "unknown speed")), | ||
5680 | ((flow_rx && flow_tx) ? "RX/TX" : | ||
5681 | (flow_rx ? "RX" : | ||
5682 | (flow_tx ? "TX" : "None")))); | ||
5683 | |||
5684 | netif_carrier_on(netdev); | ||
5685 | } else { | ||
5686 | /* Force detection of hung controller */ | ||
5687 | adapter->detect_tx_hung = true; | ||
5688 | } | ||
5689 | } else { | ||
5690 | adapter->link_up = false; | ||
5691 | adapter->link_speed = 0; | ||
5692 | if (netif_carrier_ok(netdev)) { | ||
5693 | e_info(drv, "NIC Link is Down\n"); | ||
5694 | netif_carrier_off(netdev); | ||
5695 | } | ||
5696 | } | ||
5697 | |||
5698 | if (!netif_carrier_ok(netdev)) { | ||
5699 | for (i = 0; i < adapter->num_tx_queues; i++) { | 6186 | for (i = 0; i < adapter->num_tx_queues; i++) { |
5700 | tx_ring = adapter->tx_ring[i]; | 6187 | struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; |
5701 | if (tx_ring->next_to_use != tx_ring->next_to_clean) { | 6188 | if (tx_ring->next_to_use != tx_ring->next_to_clean) { |
5702 | some_tx_pending = 1; | 6189 | some_tx_pending = 1; |
5703 | break; | 6190 | break; |
@@ -5710,17 +6197,216 @@ static void ixgbe_watchdog_task(struct work_struct *work) | |||
5710 | * to get done, so reset controller to flush Tx. | 6197 | * to get done, so reset controller to flush Tx. |
5711 | * (Do the reset outside of interrupt context). | 6198 | * (Do the reset outside of interrupt context). |
5712 | */ | 6199 | */ |
5713 | schedule_work(&adapter->reset_task); | 6200 | adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; |
5714 | } | 6201 | } |
5715 | } | 6202 | } |
6203 | } | ||
6204 | |||
6205 | static void ixgbe_spoof_check(struct ixgbe_adapter *adapter) | ||
6206 | { | ||
6207 | u32 ssvpc; | ||
6208 | |||
6209 | /* Do not perform spoof check for 82598 */ | ||
6210 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) | ||
6211 | return; | ||
6212 | |||
6213 | ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC); | ||
6214 | |||
6215 | /* | ||
6216 | * ssvpc register is cleared on read, if zero then no | ||
6217 | * spoofed packets in the last interval. | ||
6218 | */ | ||
6219 | if (!ssvpc) | ||
6220 | return; | ||
6221 | |||
6222 | e_warn(drv, "%d Spoofed packets detected\n", ssvpc); | ||
6223 | } | ||
6224 | |||
6225 | /** | ||
6226 | * ixgbe_watchdog_subtask - check and bring link up | ||
6227 | * @adapter - pointer to the device adapter structure | ||
6228 | **/ | ||
6229 | static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter) | ||
6230 | { | ||
6231 | /* if interface is down do nothing */ | ||
6232 | if (test_bit(__IXGBE_DOWN, &adapter->state)) | ||
6233 | return; | ||
6234 | |||
6235 | ixgbe_watchdog_update_link(adapter); | ||
6236 | |||
6237 | if (adapter->link_up) | ||
6238 | ixgbe_watchdog_link_is_up(adapter); | ||
6239 | else | ||
6240 | ixgbe_watchdog_link_is_down(adapter); | ||
5716 | 6241 | ||
6242 | ixgbe_spoof_check(adapter); | ||
5717 | ixgbe_update_stats(adapter); | 6243 | ixgbe_update_stats(adapter); |
5718 | mutex_unlock(&ixgbe_watchdog_lock); | 6244 | |
6245 | ixgbe_watchdog_flush_tx(adapter); | ||
6246 | } | ||
6247 | |||
6248 | /** | ||
6249 | * ixgbe_sfp_detection_subtask - poll for SFP+ cable | ||
6250 | * @adapter - the ixgbe adapter structure | ||
6251 | **/ | ||
6252 | static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter) | ||
6253 | { | ||
6254 | struct ixgbe_hw *hw = &adapter->hw; | ||
6255 | s32 err; | ||
6256 | |||
6257 | /* not searching for SFP so there is nothing to do here */ | ||
6258 | if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) && | ||
6259 | !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) | ||
6260 | return; | ||
6261 | |||
6262 | /* someone else is in init, wait until next service event */ | ||
6263 | if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) | ||
6264 | return; | ||
6265 | |||
6266 | err = hw->phy.ops.identify_sfp(hw); | ||
6267 | if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) | ||
6268 | goto sfp_out; | ||
6269 | |||
6270 | if (err == IXGBE_ERR_SFP_NOT_PRESENT) { | ||
6271 | /* If no cable is present, then we need to reset | ||
6272 | * the next time we find a good cable. */ | ||
6273 | adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; | ||
6274 | } | ||
6275 | |||
6276 | /* exit on error */ | ||
6277 | if (err) | ||
6278 | goto sfp_out; | ||
6279 | |||
6280 | /* exit if reset not needed */ | ||
6281 | if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) | ||
6282 | goto sfp_out; | ||
6283 | |||
6284 | adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET; | ||
6285 | |||
6286 | /* | ||
6287 | * A module may be identified correctly, but the EEPROM may not have | ||
6288 | * support for that module. setup_sfp() will fail in that case, so | ||
6289 | * we should not allow that module to load. | ||
6290 | */ | ||
6291 | if (hw->mac.type == ixgbe_mac_82598EB) | ||
6292 | err = hw->phy.ops.reset(hw); | ||
6293 | else | ||
6294 | err = hw->mac.ops.setup_sfp(hw); | ||
6295 | |||
6296 | if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) | ||
6297 | goto sfp_out; | ||
6298 | |||
6299 | adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; | ||
6300 | e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type); | ||
6301 | |||
6302 | sfp_out: | ||
6303 | clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); | ||
6304 | |||
6305 | if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) && | ||
6306 | (adapter->netdev->reg_state == NETREG_REGISTERED)) { | ||
6307 | e_dev_err("failed to initialize because an unsupported " | ||
6308 | "SFP+ module type was detected.\n"); | ||
6309 | e_dev_err("Reload the driver after installing a " | ||
6310 | "supported module.\n"); | ||
6311 | unregister_netdev(adapter->netdev); | ||
6312 | } | ||
6313 | } | ||
6314 | |||
6315 | /** | ||
6316 | * ixgbe_sfp_link_config_subtask - set up link SFP after module install | ||
6317 | * @adapter - the ixgbe adapter structure | ||
6318 | **/ | ||
6319 | static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter) | ||
6320 | { | ||
6321 | struct ixgbe_hw *hw = &adapter->hw; | ||
6322 | u32 autoneg; | ||
6323 | bool negotiation; | ||
6324 | |||
6325 | if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG)) | ||
6326 | return; | ||
6327 | |||
6328 | /* someone else is in init, wait until next service event */ | ||
6329 | if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) | ||
6330 | return; | ||
6331 | |||
6332 | adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; | ||
6333 | |||
6334 | autoneg = hw->phy.autoneg_advertised; | ||
6335 | if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) | ||
6336 | hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation); | ||
6337 | hw->mac.autotry_restart = false; | ||
6338 | if (hw->mac.ops.setup_link) | ||
6339 | hw->mac.ops.setup_link(hw, autoneg, negotiation, true); | ||
6340 | |||
6341 | adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; | ||
6342 | adapter->link_check_timeout = jiffies; | ||
6343 | clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); | ||
6344 | } | ||
6345 | |||
6346 | /** | ||
6347 | * ixgbe_service_timer - Timer Call-back | ||
6348 | * @data: pointer to adapter cast into an unsigned long | ||
6349 | **/ | ||
6350 | static void ixgbe_service_timer(unsigned long data) | ||
6351 | { | ||
6352 | struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; | ||
6353 | unsigned long next_event_offset; | ||
6354 | |||
6355 | /* poll faster when waiting for link */ | ||
6356 | if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) | ||
6357 | next_event_offset = HZ / 10; | ||
6358 | else | ||
6359 | next_event_offset = HZ * 2; | ||
6360 | |||
6361 | /* Reset the timer */ | ||
6362 | mod_timer(&adapter->service_timer, next_event_offset + jiffies); | ||
6363 | |||
6364 | ixgbe_service_event_schedule(adapter); | ||
6365 | } | ||
6366 | |||
6367 | static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter) | ||
6368 | { | ||
6369 | if (!(adapter->flags2 & IXGBE_FLAG2_RESET_REQUESTED)) | ||
6370 | return; | ||
6371 | |||
6372 | adapter->flags2 &= ~IXGBE_FLAG2_RESET_REQUESTED; | ||
6373 | |||
6374 | /* If we're already down or resetting, just bail */ | ||
6375 | if (test_bit(__IXGBE_DOWN, &adapter->state) || | ||
6376 | test_bit(__IXGBE_RESETTING, &adapter->state)) | ||
6377 | return; | ||
6378 | |||
6379 | ixgbe_dump(adapter); | ||
6380 | netdev_err(adapter->netdev, "Reset adapter\n"); | ||
6381 | adapter->tx_timeout_count++; | ||
6382 | |||
6383 | ixgbe_reinit_locked(adapter); | ||
6384 | } | ||
6385 | |||
6386 | /** | ||
6387 | * ixgbe_service_task - manages and runs subtasks | ||
6388 | * @work: pointer to work_struct containing our data | ||
6389 | **/ | ||
6390 | static void ixgbe_service_task(struct work_struct *work) | ||
6391 | { | ||
6392 | struct ixgbe_adapter *adapter = container_of(work, | ||
6393 | struct ixgbe_adapter, | ||
6394 | service_task); | ||
6395 | |||
6396 | ixgbe_reset_subtask(adapter); | ||
6397 | ixgbe_sfp_detection_subtask(adapter); | ||
6398 | ixgbe_sfp_link_config_subtask(adapter); | ||
6399 | ixgbe_check_overtemp_subtask(adapter); | ||
6400 | ixgbe_watchdog_subtask(adapter); | ||
6401 | ixgbe_fdir_reinit_subtask(adapter); | ||
6402 | ixgbe_check_hang_subtask(adapter); | ||
6403 | |||
6404 | ixgbe_service_event_complete(adapter); | ||
5719 | } | 6405 | } |
5720 | 6406 | ||
5721 | static int ixgbe_tso(struct ixgbe_adapter *adapter, | 6407 | static int ixgbe_tso(struct ixgbe_adapter *adapter, |
5722 | struct ixgbe_ring *tx_ring, struct sk_buff *skb, | 6408 | struct ixgbe_ring *tx_ring, struct sk_buff *skb, |
5723 | u32 tx_flags, u8 *hdr_len) | 6409 | u32 tx_flags, u8 *hdr_len, __be16 protocol) |
5724 | { | 6410 | { |
5725 | struct ixgbe_adv_tx_context_desc *context_desc; | 6411 | struct ixgbe_adv_tx_context_desc *context_desc; |
5726 | unsigned int i; | 6412 | unsigned int i; |
@@ -5738,33 +6424,33 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter, | |||
5738 | l4len = tcp_hdrlen(skb); | 6424 | l4len = tcp_hdrlen(skb); |
5739 | *hdr_len += l4len; | 6425 | *hdr_len += l4len; |
5740 | 6426 | ||
5741 | if (skb->protocol == htons(ETH_P_IP)) { | 6427 | if (protocol == htons(ETH_P_IP)) { |
5742 | struct iphdr *iph = ip_hdr(skb); | 6428 | struct iphdr *iph = ip_hdr(skb); |
5743 | iph->tot_len = 0; | 6429 | iph->tot_len = 0; |
5744 | iph->check = 0; | 6430 | iph->check = 0; |
5745 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, | 6431 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, |
5746 | iph->daddr, 0, | 6432 | iph->daddr, 0, |
5747 | IPPROTO_TCP, | 6433 | IPPROTO_TCP, |
5748 | 0); | 6434 | 0); |
5749 | } else if (skb_is_gso_v6(skb)) { | 6435 | } else if (skb_is_gso_v6(skb)) { |
5750 | ipv6_hdr(skb)->payload_len = 0; | 6436 | ipv6_hdr(skb)->payload_len = 0; |
5751 | tcp_hdr(skb)->check = | 6437 | tcp_hdr(skb)->check = |
5752 | ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | 6438 | ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
5753 | &ipv6_hdr(skb)->daddr, | 6439 | &ipv6_hdr(skb)->daddr, |
5754 | 0, IPPROTO_TCP, 0); | 6440 | 0, IPPROTO_TCP, 0); |
5755 | } | 6441 | } |
5756 | 6442 | ||
5757 | i = tx_ring->next_to_use; | 6443 | i = tx_ring->next_to_use; |
5758 | 6444 | ||
5759 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | 6445 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; |
5760 | context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i); | 6446 | context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i); |
5761 | 6447 | ||
5762 | /* VLAN MACLEN IPLEN */ | 6448 | /* VLAN MACLEN IPLEN */ |
5763 | if (tx_flags & IXGBE_TX_FLAGS_VLAN) | 6449 | if (tx_flags & IXGBE_TX_FLAGS_VLAN) |
5764 | vlan_macip_lens |= | 6450 | vlan_macip_lens |= |
5765 | (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); | 6451 | (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); |
5766 | vlan_macip_lens |= ((skb_network_offset(skb)) << | 6452 | vlan_macip_lens |= ((skb_network_offset(skb)) << |
5767 | IXGBE_ADVTXD_MACLEN_SHIFT); | 6453 | IXGBE_ADVTXD_MACLEN_SHIFT); |
5768 | *hdr_len += skb_network_offset(skb); | 6454 | *hdr_len += skb_network_offset(skb); |
5769 | vlan_macip_lens |= | 6455 | vlan_macip_lens |= |
5770 | (skb_transport_header(skb) - skb_network_header(skb)); | 6456 | (skb_transport_header(skb) - skb_network_header(skb)); |
@@ -5775,9 +6461,9 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter, | |||
5775 | 6461 | ||
5776 | /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ | 6462 | /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ |
5777 | type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT | | 6463 | type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT | |
5778 | IXGBE_ADVTXD_DTYP_CTXT); | 6464 | IXGBE_ADVTXD_DTYP_CTXT); |
5779 | 6465 | ||
5780 | if (skb->protocol == htons(ETH_P_IP)) | 6466 | if (protocol == htons(ETH_P_IP)) |
5781 | type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; | 6467 | type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; |
5782 | type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; | 6468 | type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; |
5783 | context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); | 6469 | context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); |
@@ -5803,9 +6489,48 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter, | |||
5803 | return false; | 6489 | return false; |
5804 | } | 6490 | } |
5805 | 6491 | ||
6492 | static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb, | ||
6493 | __be16 protocol) | ||
6494 | { | ||
6495 | u32 rtn = 0; | ||
6496 | |||
6497 | switch (protocol) { | ||
6498 | case cpu_to_be16(ETH_P_IP): | ||
6499 | rtn |= IXGBE_ADVTXD_TUCMD_IPV4; | ||
6500 | switch (ip_hdr(skb)->protocol) { | ||
6501 | case IPPROTO_TCP: | ||
6502 | rtn |= IXGBE_ADVTXD_TUCMD_L4T_TCP; | ||
6503 | break; | ||
6504 | case IPPROTO_SCTP: | ||
6505 | rtn |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; | ||
6506 | break; | ||
6507 | } | ||
6508 | break; | ||
6509 | case cpu_to_be16(ETH_P_IPV6): | ||
6510 | /* XXX what about other V6 headers?? */ | ||
6511 | switch (ipv6_hdr(skb)->nexthdr) { | ||
6512 | case IPPROTO_TCP: | ||
6513 | rtn |= IXGBE_ADVTXD_TUCMD_L4T_TCP; | ||
6514 | break; | ||
6515 | case IPPROTO_SCTP: | ||
6516 | rtn |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; | ||
6517 | break; | ||
6518 | } | ||
6519 | break; | ||
6520 | default: | ||
6521 | if (unlikely(net_ratelimit())) | ||
6522 | e_warn(probe, "partial checksum but proto=%x!\n", | ||
6523 | protocol); | ||
6524 | break; | ||
6525 | } | ||
6526 | |||
6527 | return rtn; | ||
6528 | } | ||
6529 | |||
5806 | static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, | 6530 | static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, |
5807 | struct ixgbe_ring *tx_ring, | 6531 | struct ixgbe_ring *tx_ring, |
5808 | struct sk_buff *skb, u32 tx_flags) | 6532 | struct sk_buff *skb, u32 tx_flags, |
6533 | __be16 protocol) | ||
5809 | { | 6534 | { |
5810 | struct ixgbe_adv_tx_context_desc *context_desc; | 6535 | struct ixgbe_adv_tx_context_desc *context_desc; |
5811 | unsigned int i; | 6536 | unsigned int i; |
@@ -5816,63 +6541,25 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, | |||
5816 | (tx_flags & IXGBE_TX_FLAGS_VLAN)) { | 6541 | (tx_flags & IXGBE_TX_FLAGS_VLAN)) { |
5817 | i = tx_ring->next_to_use; | 6542 | i = tx_ring->next_to_use; |
5818 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | 6543 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; |
5819 | context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i); | 6544 | context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i); |
5820 | 6545 | ||
5821 | if (tx_flags & IXGBE_TX_FLAGS_VLAN) | 6546 | if (tx_flags & IXGBE_TX_FLAGS_VLAN) |
5822 | vlan_macip_lens |= | 6547 | vlan_macip_lens |= |
5823 | (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); | 6548 | (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); |
5824 | vlan_macip_lens |= (skb_network_offset(skb) << | 6549 | vlan_macip_lens |= (skb_network_offset(skb) << |
5825 | IXGBE_ADVTXD_MACLEN_SHIFT); | 6550 | IXGBE_ADVTXD_MACLEN_SHIFT); |
5826 | if (skb->ip_summed == CHECKSUM_PARTIAL) | 6551 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
5827 | vlan_macip_lens |= (skb_transport_header(skb) - | 6552 | vlan_macip_lens |= (skb_transport_header(skb) - |
5828 | skb_network_header(skb)); | 6553 | skb_network_header(skb)); |
5829 | 6554 | ||
5830 | context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); | 6555 | context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); |
5831 | context_desc->seqnum_seed = 0; | 6556 | context_desc->seqnum_seed = 0; |
5832 | 6557 | ||
5833 | type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT | | 6558 | type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT | |
5834 | IXGBE_ADVTXD_DTYP_CTXT); | 6559 | IXGBE_ADVTXD_DTYP_CTXT); |
5835 | |||
5836 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
5837 | __be16 protocol; | ||
5838 | |||
5839 | if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) { | ||
5840 | const struct vlan_ethhdr *vhdr = | ||
5841 | (const struct vlan_ethhdr *)skb->data; | ||
5842 | |||
5843 | protocol = vhdr->h_vlan_encapsulated_proto; | ||
5844 | } else { | ||
5845 | protocol = skb->protocol; | ||
5846 | } | ||
5847 | 6560 | ||
5848 | switch (protocol) { | 6561 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
5849 | case cpu_to_be16(ETH_P_IP): | 6562 | type_tucmd_mlhl |= ixgbe_psum(adapter, skb, protocol); |
5850 | type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; | ||
5851 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) | ||
5852 | type_tucmd_mlhl |= | ||
5853 | IXGBE_ADVTXD_TUCMD_L4T_TCP; | ||
5854 | else if (ip_hdr(skb)->protocol == IPPROTO_SCTP) | ||
5855 | type_tucmd_mlhl |= | ||
5856 | IXGBE_ADVTXD_TUCMD_L4T_SCTP; | ||
5857 | break; | ||
5858 | case cpu_to_be16(ETH_P_IPV6): | ||
5859 | /* XXX what about other V6 headers?? */ | ||
5860 | if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) | ||
5861 | type_tucmd_mlhl |= | ||
5862 | IXGBE_ADVTXD_TUCMD_L4T_TCP; | ||
5863 | else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP) | ||
5864 | type_tucmd_mlhl |= | ||
5865 | IXGBE_ADVTXD_TUCMD_L4T_SCTP; | ||
5866 | break; | ||
5867 | default: | ||
5868 | if (unlikely(net_ratelimit())) { | ||
5869 | e_warn(probe, "partial checksum " | ||
5870 | "but proto=%x!\n", | ||
5871 | skb->protocol); | ||
5872 | } | ||
5873 | break; | ||
5874 | } | ||
5875 | } | ||
5876 | 6563 | ||
5877 | context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); | 6564 | context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); |
5878 | /* use index zero for tx checksum offload */ | 6565 | /* use index zero for tx checksum offload */ |
@@ -5893,17 +6580,19 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, | |||
5893 | } | 6580 | } |
5894 | 6581 | ||
5895 | static int ixgbe_tx_map(struct ixgbe_adapter *adapter, | 6582 | static int ixgbe_tx_map(struct ixgbe_adapter *adapter, |
5896 | struct ixgbe_ring *tx_ring, | 6583 | struct ixgbe_ring *tx_ring, |
5897 | struct sk_buff *skb, u32 tx_flags, | 6584 | struct sk_buff *skb, u32 tx_flags, |
5898 | unsigned int first) | 6585 | unsigned int first, const u8 hdr_len) |
5899 | { | 6586 | { |
5900 | struct pci_dev *pdev = adapter->pdev; | 6587 | struct device *dev = tx_ring->dev; |
5901 | struct ixgbe_tx_buffer *tx_buffer_info; | 6588 | struct ixgbe_tx_buffer *tx_buffer_info; |
5902 | unsigned int len; | 6589 | unsigned int len; |
5903 | unsigned int total = skb->len; | 6590 | unsigned int total = skb->len; |
5904 | unsigned int offset = 0, size, count = 0, i; | 6591 | unsigned int offset = 0, size, count = 0, i; |
5905 | unsigned int nr_frags = skb_shinfo(skb)->nr_frags; | 6592 | unsigned int nr_frags = skb_shinfo(skb)->nr_frags; |
5906 | unsigned int f; | 6593 | unsigned int f; |
6594 | unsigned int bytecount = skb->len; | ||
6595 | u16 gso_segs = 1; | ||
5907 | 6596 | ||
5908 | i = tx_ring->next_to_use; | 6597 | i = tx_ring->next_to_use; |
5909 | 6598 | ||
@@ -5918,10 +6607,10 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, | |||
5918 | 6607 | ||
5919 | tx_buffer_info->length = size; | 6608 | tx_buffer_info->length = size; |
5920 | tx_buffer_info->mapped_as_page = false; | 6609 | tx_buffer_info->mapped_as_page = false; |
5921 | tx_buffer_info->dma = dma_map_single(&pdev->dev, | 6610 | tx_buffer_info->dma = dma_map_single(dev, |
5922 | skb->data + offset, | 6611 | skb->data + offset, |
5923 | size, DMA_TO_DEVICE); | 6612 | size, DMA_TO_DEVICE); |
5924 | if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma)) | 6613 | if (dma_mapping_error(dev, tx_buffer_info->dma)) |
5925 | goto dma_error; | 6614 | goto dma_error; |
5926 | tx_buffer_info->time_stamp = jiffies; | 6615 | tx_buffer_info->time_stamp = jiffies; |
5927 | tx_buffer_info->next_to_watch = i; | 6616 | tx_buffer_info->next_to_watch = i; |
@@ -5954,12 +6643,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, | |||
5954 | size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); | 6643 | size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); |
5955 | 6644 | ||
5956 | tx_buffer_info->length = size; | 6645 | tx_buffer_info->length = size; |
5957 | tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev, | 6646 | tx_buffer_info->dma = dma_map_page(dev, |
5958 | frag->page, | 6647 | frag->page, |
5959 | offset, size, | 6648 | offset, size, |
5960 | DMA_TO_DEVICE); | 6649 | DMA_TO_DEVICE); |
5961 | tx_buffer_info->mapped_as_page = true; | 6650 | tx_buffer_info->mapped_as_page = true; |
5962 | if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma)) | 6651 | if (dma_mapping_error(dev, tx_buffer_info->dma)) |
5963 | goto dma_error; | 6652 | goto dma_error; |
5964 | tx_buffer_info->time_stamp = jiffies; | 6653 | tx_buffer_info->time_stamp = jiffies; |
5965 | tx_buffer_info->next_to_watch = i; | 6654 | tx_buffer_info->next_to_watch = i; |
@@ -5973,6 +6662,19 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, | |||
5973 | break; | 6662 | break; |
5974 | } | 6663 | } |
5975 | 6664 | ||
6665 | if (tx_flags & IXGBE_TX_FLAGS_TSO) | ||
6666 | gso_segs = skb_shinfo(skb)->gso_segs; | ||
6667 | #ifdef IXGBE_FCOE | ||
6668 | /* adjust for FCoE Sequence Offload */ | ||
6669 | else if (tx_flags & IXGBE_TX_FLAGS_FSO) | ||
6670 | gso_segs = DIV_ROUND_UP(skb->len - hdr_len, | ||
6671 | skb_shinfo(skb)->gso_size); | ||
6672 | #endif /* IXGBE_FCOE */ | ||
6673 | bytecount += (gso_segs - 1) * hdr_len; | ||
6674 | |||
6675 | /* multiply data chunks by size of headers */ | ||
6676 | tx_ring->tx_buffer_info[i].bytecount = bytecount; | ||
6677 | tx_ring->tx_buffer_info[i].gso_segs = gso_segs; | ||
5976 | tx_ring->tx_buffer_info[i].skb = skb; | 6678 | tx_ring->tx_buffer_info[i].skb = skb; |
5977 | tx_ring->tx_buffer_info[first].next_to_watch = i; | 6679 | tx_ring->tx_buffer_info[first].next_to_watch = i; |
5978 | 6680 | ||
@@ -5990,19 +6692,18 @@ dma_error: | |||
5990 | 6692 | ||
5991 | /* clear timestamp and dma mappings for remaining portion of packet */ | 6693 | /* clear timestamp and dma mappings for remaining portion of packet */ |
5992 | while (count--) { | 6694 | while (count--) { |
5993 | if (i==0) | 6695 | if (i == 0) |
5994 | i += tx_ring->count; | 6696 | i += tx_ring->count; |
5995 | i--; | 6697 | i--; |
5996 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | 6698 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; |
5997 | ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); | 6699 | ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); |
5998 | } | 6700 | } |
5999 | 6701 | ||
6000 | return 0; | 6702 | return 0; |
6001 | } | 6703 | } |
6002 | 6704 | ||
6003 | static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, | 6705 | static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring, |
6004 | struct ixgbe_ring *tx_ring, | 6706 | int tx_flags, int count, u32 paylen, u8 hdr_len) |
6005 | int tx_flags, int count, u32 paylen, u8 hdr_len) | ||
6006 | { | 6707 | { |
6007 | union ixgbe_adv_tx_desc *tx_desc = NULL; | 6708 | union ixgbe_adv_tx_desc *tx_desc = NULL; |
6008 | struct ixgbe_tx_buffer *tx_buffer_info; | 6709 | struct ixgbe_tx_buffer *tx_buffer_info; |
@@ -6021,17 +6722,17 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, | |||
6021 | cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; | 6722 | cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; |
6022 | 6723 | ||
6023 | olinfo_status |= IXGBE_TXD_POPTS_TXSM << | 6724 | olinfo_status |= IXGBE_TXD_POPTS_TXSM << |
6024 | IXGBE_ADVTXD_POPTS_SHIFT; | 6725 | IXGBE_ADVTXD_POPTS_SHIFT; |
6025 | 6726 | ||
6026 | /* use index 1 context for tso */ | 6727 | /* use index 1 context for tso */ |
6027 | olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); | 6728 | olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); |
6028 | if (tx_flags & IXGBE_TX_FLAGS_IPV4) | 6729 | if (tx_flags & IXGBE_TX_FLAGS_IPV4) |
6029 | olinfo_status |= IXGBE_TXD_POPTS_IXSM << | 6730 | olinfo_status |= IXGBE_TXD_POPTS_IXSM << |
6030 | IXGBE_ADVTXD_POPTS_SHIFT; | 6731 | IXGBE_ADVTXD_POPTS_SHIFT; |
6031 | 6732 | ||
6032 | } else if (tx_flags & IXGBE_TX_FLAGS_CSUM) | 6733 | } else if (tx_flags & IXGBE_TX_FLAGS_CSUM) |
6033 | olinfo_status |= IXGBE_TXD_POPTS_TXSM << | 6734 | olinfo_status |= IXGBE_TXD_POPTS_TXSM << |
6034 | IXGBE_ADVTXD_POPTS_SHIFT; | 6735 | IXGBE_ADVTXD_POPTS_SHIFT; |
6035 | 6736 | ||
6036 | if (tx_flags & IXGBE_TX_FLAGS_FCOE) { | 6737 | if (tx_flags & IXGBE_TX_FLAGS_FCOE) { |
6037 | olinfo_status |= IXGBE_ADVTXD_CC; | 6738 | olinfo_status |= IXGBE_ADVTXD_CC; |
@@ -6045,10 +6746,10 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, | |||
6045 | i = tx_ring->next_to_use; | 6746 | i = tx_ring->next_to_use; |
6046 | while (count--) { | 6747 | while (count--) { |
6047 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | 6748 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; |
6048 | tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); | 6749 | tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); |
6049 | tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); | 6750 | tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); |
6050 | tx_desc->read.cmd_type_len = | 6751 | tx_desc->read.cmd_type_len = |
6051 | cpu_to_le32(cmd_type_len | tx_buffer_info->length); | 6752 | cpu_to_le32(cmd_type_len | tx_buffer_info->length); |
6052 | tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); | 6753 | tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); |
6053 | i++; | 6754 | i++; |
6054 | if (i == tx_ring->count) | 6755 | if (i == tx_ring->count) |
@@ -6066,60 +6767,100 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, | |||
6066 | wmb(); | 6767 | wmb(); |
6067 | 6768 | ||
6068 | tx_ring->next_to_use = i; | 6769 | tx_ring->next_to_use = i; |
6069 | writel(i, adapter->hw.hw_addr + tx_ring->tail); | 6770 | writel(i, tx_ring->tail); |
6070 | } | 6771 | } |
6071 | 6772 | ||
6072 | static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, | 6773 | static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb, |
6073 | int queue, u32 tx_flags) | 6774 | u32 tx_flags, __be16 protocol) |
6074 | { | 6775 | { |
6075 | struct ixgbe_atr_input atr_input; | 6776 | struct ixgbe_q_vector *q_vector = ring->q_vector; |
6777 | union ixgbe_atr_hash_dword input = { .dword = 0 }; | ||
6778 | union ixgbe_atr_hash_dword common = { .dword = 0 }; | ||
6779 | union { | ||
6780 | unsigned char *network; | ||
6781 | struct iphdr *ipv4; | ||
6782 | struct ipv6hdr *ipv6; | ||
6783 | } hdr; | ||
6076 | struct tcphdr *th; | 6784 | struct tcphdr *th; |
6077 | struct iphdr *iph = ip_hdr(skb); | 6785 | __be16 vlan_id; |
6078 | struct ethhdr *eth = (struct ethhdr *)skb->data; | 6786 | |
6079 | u16 vlan_id, src_port, dst_port, flex_bytes; | 6787 | /* if ring doesn't have a interrupt vector, cannot perform ATR */ |
6080 | u32 src_ipv4_addr, dst_ipv4_addr; | 6788 | if (!q_vector) |
6081 | u8 l4type = 0; | ||
6082 | |||
6083 | /* Right now, we support IPv4 only */ | ||
6084 | if (skb->protocol != htons(ETH_P_IP)) | ||
6085 | return; | 6789 | return; |
6086 | /* check if we're UDP or TCP */ | 6790 | |
6087 | if (iph->protocol == IPPROTO_TCP) { | 6791 | /* do nothing if sampling is disabled */ |
6088 | th = tcp_hdr(skb); | 6792 | if (!ring->atr_sample_rate) |
6089 | src_port = th->source; | 6793 | return; |
6090 | dst_port = th->dest; | 6794 | |
6091 | l4type |= IXGBE_ATR_L4TYPE_TCP; | 6795 | ring->atr_count++; |
6092 | /* l4type IPv4 type is 0, no need to assign */ | 6796 | |
6093 | } else { | 6797 | /* snag network header to get L4 type and address */ |
6094 | /* Unsupported L4 header, just bail here */ | 6798 | hdr.network = skb_network_header(skb); |
6799 | |||
6800 | /* Currently only IPv4/IPv6 with TCP is supported */ | ||
6801 | if ((protocol != __constant_htons(ETH_P_IPV6) || | ||
6802 | hdr.ipv6->nexthdr != IPPROTO_TCP) && | ||
6803 | (protocol != __constant_htons(ETH_P_IP) || | ||
6804 | hdr.ipv4->protocol != IPPROTO_TCP)) | ||
6805 | return; | ||
6806 | |||
6807 | th = tcp_hdr(skb); | ||
6808 | |||
6809 | /* skip this packet since the socket is closing */ | ||
6810 | if (th->fin) | ||
6811 | return; | ||
6812 | |||
6813 | /* sample on all syn packets or once every atr sample count */ | ||
6814 | if (!th->syn && (ring->atr_count < ring->atr_sample_rate)) | ||
6095 | return; | 6815 | return; |
6096 | } | ||
6097 | 6816 | ||
6098 | memset(&atr_input, 0, sizeof(struct ixgbe_atr_input)); | 6817 | /* reset sample count */ |
6818 | ring->atr_count = 0; | ||
6099 | 6819 | ||
6100 | vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >> | 6820 | vlan_id = htons(tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT); |
6101 | IXGBE_TX_FLAGS_VLAN_SHIFT; | ||
6102 | src_ipv4_addr = iph->saddr; | ||
6103 | dst_ipv4_addr = iph->daddr; | ||
6104 | flex_bytes = eth->h_proto; | ||
6105 | 6821 | ||
6106 | ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id); | 6822 | /* |
6107 | ixgbe_atr_set_src_port_82599(&atr_input, dst_port); | 6823 | * src and dst are inverted, think how the receiver sees them |
6108 | ixgbe_atr_set_dst_port_82599(&atr_input, src_port); | 6824 | * |
6109 | ixgbe_atr_set_flex_byte_82599(&atr_input, flex_bytes); | 6825 | * The input is broken into two sections, a non-compressed section |
6110 | ixgbe_atr_set_l4type_82599(&atr_input, l4type); | 6826 | * containing vm_pool, vlan_id, and flow_type. The rest of the data |
6111 | /* src and dst are inverted, think how the receiver sees them */ | 6827 | * is XORed together and stored in the compressed dword. |
6112 | ixgbe_atr_set_src_ipv4_82599(&atr_input, dst_ipv4_addr); | 6828 | */ |
6113 | ixgbe_atr_set_dst_ipv4_82599(&atr_input, src_ipv4_addr); | 6829 | input.formatted.vlan_id = vlan_id; |
6830 | |||
6831 | /* | ||
6832 | * since src port and flex bytes occupy the same word XOR them together | ||
6833 | * and write the value to source port portion of compressed dword | ||
6834 | */ | ||
6835 | if (vlan_id) | ||
6836 | common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q); | ||
6837 | else | ||
6838 | common.port.src ^= th->dest ^ protocol; | ||
6839 | common.port.dst ^= th->source; | ||
6840 | |||
6841 | if (protocol == __constant_htons(ETH_P_IP)) { | ||
6842 | input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; | ||
6843 | common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr; | ||
6844 | } else { | ||
6845 | input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6; | ||
6846 | common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^ | ||
6847 | hdr.ipv6->saddr.s6_addr32[1] ^ | ||
6848 | hdr.ipv6->saddr.s6_addr32[2] ^ | ||
6849 | hdr.ipv6->saddr.s6_addr32[3] ^ | ||
6850 | hdr.ipv6->daddr.s6_addr32[0] ^ | ||
6851 | hdr.ipv6->daddr.s6_addr32[1] ^ | ||
6852 | hdr.ipv6->daddr.s6_addr32[2] ^ | ||
6853 | hdr.ipv6->daddr.s6_addr32[3]; | ||
6854 | } | ||
6114 | 6855 | ||
6115 | /* This assumes the Rx queue and Tx queue are bound to the same CPU */ | 6856 | /* This assumes the Rx queue and Tx queue are bound to the same CPU */ |
6116 | ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue); | 6857 | ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw, |
6858 | input, common, ring->queue_index); | ||
6117 | } | 6859 | } |
6118 | 6860 | ||
6119 | static int __ixgbe_maybe_stop_tx(struct net_device *netdev, | 6861 | static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size) |
6120 | struct ixgbe_ring *tx_ring, int size) | ||
6121 | { | 6862 | { |
6122 | netif_stop_subqueue(netdev, tx_ring->queue_index); | 6863 | netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); |
6123 | /* Herbert's original patch had: | 6864 | /* Herbert's original patch had: |
6124 | * smp_mb__after_netif_stop_queue(); | 6865 | * smp_mb__after_netif_stop_queue(); |
6125 | * but since that doesn't exist yet, just open code it. */ | 6866 | * but since that doesn't exist yet, just open code it. */ |
@@ -6131,37 +6872,33 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev, | |||
6131 | return -EBUSY; | 6872 | return -EBUSY; |
6132 | 6873 | ||
6133 | /* A reprieve! - use start_queue because it doesn't call schedule */ | 6874 | /* A reprieve! - use start_queue because it doesn't call schedule */ |
6134 | netif_start_subqueue(netdev, tx_ring->queue_index); | 6875 | netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); |
6135 | ++tx_ring->restart_queue; | 6876 | ++tx_ring->tx_stats.restart_queue; |
6136 | return 0; | 6877 | return 0; |
6137 | } | 6878 | } |
6138 | 6879 | ||
6139 | static int ixgbe_maybe_stop_tx(struct net_device *netdev, | 6880 | static int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size) |
6140 | struct ixgbe_ring *tx_ring, int size) | ||
6141 | { | 6881 | { |
6142 | if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) | 6882 | if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) |
6143 | return 0; | 6883 | return 0; |
6144 | return __ixgbe_maybe_stop_tx(netdev, tx_ring, size); | 6884 | return __ixgbe_maybe_stop_tx(tx_ring, size); |
6145 | } | 6885 | } |
6146 | 6886 | ||
6147 | static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) | 6887 | static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) |
6148 | { | 6888 | { |
6149 | struct ixgbe_adapter *adapter = netdev_priv(dev); | 6889 | struct ixgbe_adapter *adapter = netdev_priv(dev); |
6150 | int txq = smp_processor_id(); | 6890 | int txq = smp_processor_id(); |
6151 | |||
6152 | #ifdef IXGBE_FCOE | 6891 | #ifdef IXGBE_FCOE |
6153 | if ((skb->protocol == htons(ETH_P_FCOE)) || | 6892 | __be16 protocol; |
6154 | (skb->protocol == htons(ETH_P_FIP))) { | 6893 | |
6155 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { | 6894 | protocol = vlan_get_protocol(skb); |
6156 | txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); | 6895 | |
6157 | txq += adapter->ring_feature[RING_F_FCOE].mask; | 6896 | if (((protocol == htons(ETH_P_FCOE)) || |
6158 | return txq; | 6897 | (protocol == htons(ETH_P_FIP))) && |
6159 | #ifdef CONFIG_IXGBE_DCB | 6898 | (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) { |
6160 | } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | 6899 | txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); |
6161 | txq = adapter->fcoe.up; | 6900 | txq += adapter->ring_feature[RING_F_FCOE].mask; |
6162 | return txq; | 6901 | return txq; |
6163 | #endif | ||
6164 | } | ||
6165 | } | 6902 | } |
6166 | #endif | 6903 | #endif |
6167 | 6904 | ||
@@ -6171,66 +6908,44 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) | |||
6171 | return txq; | 6908 | return txq; |
6172 | } | 6909 | } |
6173 | 6910 | ||
6174 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | ||
6175 | if (skb->priority == TC_PRIO_CONTROL) | ||
6176 | txq = adapter->ring_feature[RING_F_DCB].indices-1; | ||
6177 | else | ||
6178 | txq = (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK) | ||
6179 | >> 13; | ||
6180 | return txq; | ||
6181 | } | ||
6182 | |||
6183 | return skb_tx_hash(dev, skb); | 6911 | return skb_tx_hash(dev, skb); |
6184 | } | 6912 | } |
6185 | 6913 | ||
6186 | static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, | 6914 | netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, |
6187 | struct net_device *netdev) | 6915 | struct ixgbe_adapter *adapter, |
6916 | struct ixgbe_ring *tx_ring) | ||
6188 | { | 6917 | { |
6189 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
6190 | struct ixgbe_ring *tx_ring; | ||
6191 | struct netdev_queue *txq; | ||
6192 | unsigned int first; | 6918 | unsigned int first; |
6193 | unsigned int tx_flags = 0; | 6919 | unsigned int tx_flags = 0; |
6194 | u8 hdr_len = 0; | 6920 | u8 hdr_len = 0; |
6195 | int tso; | 6921 | int tso; |
6196 | int count = 0; | 6922 | int count = 0; |
6197 | unsigned int f; | 6923 | unsigned int f; |
6924 | __be16 protocol; | ||
6198 | 6925 | ||
6199 | if (adapter->vlgrp && vlan_tx_tag_present(skb)) { | 6926 | protocol = vlan_get_protocol(skb); |
6927 | |||
6928 | if (vlan_tx_tag_present(skb)) { | ||
6200 | tx_flags |= vlan_tx_tag_get(skb); | 6929 | tx_flags |= vlan_tx_tag_get(skb); |
6201 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | 6930 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { |
6202 | tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK; | 6931 | tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK; |
6203 | tx_flags |= ((skb->queue_mapping & 0x7) << 13); | 6932 | tx_flags |= tx_ring->dcb_tc << 13; |
6204 | } | 6933 | } |
6205 | tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; | 6934 | tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; |
6206 | tx_flags |= IXGBE_TX_FLAGS_VLAN; | 6935 | tx_flags |= IXGBE_TX_FLAGS_VLAN; |
6207 | } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED && | 6936 | } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED && |
6208 | skb->priority != TC_PRIO_CONTROL) { | 6937 | skb->priority != TC_PRIO_CONTROL) { |
6209 | tx_flags |= ((skb->queue_mapping & 0x7) << 13); | 6938 | tx_flags |= tx_ring->dcb_tc << 13; |
6210 | tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; | 6939 | tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; |
6211 | tx_flags |= IXGBE_TX_FLAGS_VLAN; | 6940 | tx_flags |= IXGBE_TX_FLAGS_VLAN; |
6212 | } | 6941 | } |
6213 | 6942 | ||
6214 | tx_ring = adapter->tx_ring[skb->queue_mapping]; | ||
6215 | |||
6216 | #ifdef IXGBE_FCOE | 6943 | #ifdef IXGBE_FCOE |
6217 | /* for FCoE with DCB, we force the priority to what | 6944 | /* for FCoE with DCB, we force the priority to what |
6218 | * was specified by the switch */ | 6945 | * was specified by the switch */ |
6219 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED && | 6946 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED && |
6220 | (skb->protocol == htons(ETH_P_FCOE) || | 6947 | (protocol == htons(ETH_P_FCOE))) |
6221 | skb->protocol == htons(ETH_P_FIP))) { | 6948 | tx_flags |= IXGBE_TX_FLAGS_FCOE; |
6222 | #ifdef CONFIG_IXGBE_DCB | ||
6223 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | ||
6224 | tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK | ||
6225 | << IXGBE_TX_FLAGS_VLAN_SHIFT); | ||
6226 | tx_flags |= ((adapter->fcoe.up << 13) | ||
6227 | << IXGBE_TX_FLAGS_VLAN_SHIFT); | ||
6228 | } | ||
6229 | #endif | ||
6230 | /* flag for FCoE offloads */ | ||
6231 | if (skb->protocol == htons(ETH_P_FCOE)) | ||
6232 | tx_flags |= IXGBE_TX_FLAGS_FCOE; | ||
6233 | } | ||
6234 | #endif | 6949 | #endif |
6235 | 6950 | ||
6236 | /* four things can cause us to need a context descriptor */ | 6951 | /* four things can cause us to need a context descriptor */ |
@@ -6244,8 +6959,8 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, | |||
6244 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) | 6959 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) |
6245 | count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); | 6960 | count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); |
6246 | 6961 | ||
6247 | if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) { | 6962 | if (ixgbe_maybe_stop_tx(tx_ring, count)) { |
6248 | adapter->tx_busy++; | 6963 | tx_ring->tx_stats.tx_busy++; |
6249 | return NETDEV_TX_BUSY; | 6964 | return NETDEV_TX_BUSY; |
6250 | } | 6965 | } |
6251 | 6966 | ||
@@ -6262,9 +6977,10 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, | |||
6262 | tx_flags |= IXGBE_TX_FLAGS_FSO; | 6977 | tx_flags |= IXGBE_TX_FLAGS_FSO; |
6263 | #endif /* IXGBE_FCOE */ | 6978 | #endif /* IXGBE_FCOE */ |
6264 | } else { | 6979 | } else { |
6265 | if (skb->protocol == htons(ETH_P_IP)) | 6980 | if (protocol == htons(ETH_P_IP)) |
6266 | tx_flags |= IXGBE_TX_FLAGS_IPV4; | 6981 | tx_flags |= IXGBE_TX_FLAGS_IPV4; |
6267 | tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len); | 6982 | tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len, |
6983 | protocol); | ||
6268 | if (tso < 0) { | 6984 | if (tso < 0) { |
6269 | dev_kfree_skb_any(skb); | 6985 | dev_kfree_skb_any(skb); |
6270 | return NETDEV_TX_OK; | 6986 | return NETDEV_TX_OK; |
@@ -6272,30 +6988,19 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, | |||
6272 | 6988 | ||
6273 | if (tso) | 6989 | if (tso) |
6274 | tx_flags |= IXGBE_TX_FLAGS_TSO; | 6990 | tx_flags |= IXGBE_TX_FLAGS_TSO; |
6275 | else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) && | 6991 | else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags, |
6992 | protocol) && | ||
6276 | (skb->ip_summed == CHECKSUM_PARTIAL)) | 6993 | (skb->ip_summed == CHECKSUM_PARTIAL)) |
6277 | tx_flags |= IXGBE_TX_FLAGS_CSUM; | 6994 | tx_flags |= IXGBE_TX_FLAGS_CSUM; |
6278 | } | 6995 | } |
6279 | 6996 | ||
6280 | count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first); | 6997 | count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len); |
6281 | if (count) { | 6998 | if (count) { |
6282 | /* add the ATR filter if ATR is on */ | 6999 | /* add the ATR filter if ATR is on */ |
6283 | if (tx_ring->atr_sample_rate) { | 7000 | if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) |
6284 | ++tx_ring->atr_count; | 7001 | ixgbe_atr(tx_ring, skb, tx_flags, protocol); |
6285 | if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) && | 7002 | ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len); |
6286 | test_bit(__IXGBE_FDIR_INIT_DONE, | 7003 | ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); |
6287 | &tx_ring->reinit_state)) { | ||
6288 | ixgbe_atr(adapter, skb, tx_ring->queue_index, | ||
6289 | tx_flags); | ||
6290 | tx_ring->atr_count = 0; | ||
6291 | } | ||
6292 | } | ||
6293 | txq = netdev_get_tx_queue(netdev, tx_ring->queue_index); | ||
6294 | txq->tx_bytes += skb->len; | ||
6295 | txq->tx_packets++; | ||
6296 | ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len, | ||
6297 | hdr_len); | ||
6298 | ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED); | ||
6299 | 7004 | ||
6300 | } else { | 7005 | } else { |
6301 | dev_kfree_skb_any(skb); | 7006 | dev_kfree_skb_any(skb); |
@@ -6306,6 +7011,15 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, | |||
6306 | return NETDEV_TX_OK; | 7011 | return NETDEV_TX_OK; |
6307 | } | 7012 | } |
6308 | 7013 | ||
7014 | static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | ||
7015 | { | ||
7016 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
7017 | struct ixgbe_ring *tx_ring; | ||
7018 | |||
7019 | tx_ring = adapter->tx_ring[skb->queue_mapping]; | ||
7020 | return ixgbe_xmit_frame_ring(skb, adapter, tx_ring); | ||
7021 | } | ||
7022 | |||
6309 | /** | 7023 | /** |
6310 | * ixgbe_set_mac - Change the Ethernet Address of the NIC | 7024 | * ixgbe_set_mac - Change the Ethernet Address of the NIC |
6311 | * @netdev: network interface device structure | 7025 | * @netdev: network interface device structure |
@@ -6436,8 +7150,57 @@ static void ixgbe_netpoll(struct net_device *netdev) | |||
6436 | } | 7150 | } |
6437 | #endif | 7151 | #endif |
6438 | 7152 | ||
7153 | static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, | ||
7154 | struct rtnl_link_stats64 *stats) | ||
7155 | { | ||
7156 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
7157 | int i; | ||
7158 | |||
7159 | rcu_read_lock(); | ||
7160 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
7161 | struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]); | ||
7162 | u64 bytes, packets; | ||
7163 | unsigned int start; | ||
7164 | |||
7165 | if (ring) { | ||
7166 | do { | ||
7167 | start = u64_stats_fetch_begin_bh(&ring->syncp); | ||
7168 | packets = ring->stats.packets; | ||
7169 | bytes = ring->stats.bytes; | ||
7170 | } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); | ||
7171 | stats->rx_packets += packets; | ||
7172 | stats->rx_bytes += bytes; | ||
7173 | } | ||
7174 | } | ||
7175 | |||
7176 | for (i = 0; i < adapter->num_tx_queues; i++) { | ||
7177 | struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]); | ||
7178 | u64 bytes, packets; | ||
7179 | unsigned int start; | ||
7180 | |||
7181 | if (ring) { | ||
7182 | do { | ||
7183 | start = u64_stats_fetch_begin_bh(&ring->syncp); | ||
7184 | packets = ring->stats.packets; | ||
7185 | bytes = ring->stats.bytes; | ||
7186 | } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); | ||
7187 | stats->tx_packets += packets; | ||
7188 | stats->tx_bytes += bytes; | ||
7189 | } | ||
7190 | } | ||
7191 | rcu_read_unlock(); | ||
7192 | /* following stats updated by ixgbe_watchdog_task() */ | ||
7193 | stats->multicast = netdev->stats.multicast; | ||
7194 | stats->rx_errors = netdev->stats.rx_errors; | ||
7195 | stats->rx_length_errors = netdev->stats.rx_length_errors; | ||
7196 | stats->rx_crc_errors = netdev->stats.rx_crc_errors; | ||
7197 | stats->rx_missed_errors = netdev->stats.rx_missed_errors; | ||
7198 | return stats; | ||
7199 | } | ||
7200 | |||
7201 | |||
6439 | static const struct net_device_ops ixgbe_netdev_ops = { | 7202 | static const struct net_device_ops ixgbe_netdev_ops = { |
6440 | .ndo_open = ixgbe_open, | 7203 | .ndo_open = ixgbe_open, |
6441 | .ndo_stop = ixgbe_close, | 7204 | .ndo_stop = ixgbe_close, |
6442 | .ndo_start_xmit = ixgbe_xmit_frame, | 7205 | .ndo_start_xmit = ixgbe_xmit_frame, |
6443 | .ndo_select_queue = ixgbe_select_queue, | 7206 | .ndo_select_queue = ixgbe_select_queue, |
@@ -6447,7 +7210,6 @@ static const struct net_device_ops ixgbe_netdev_ops = { | |||
6447 | .ndo_set_mac_address = ixgbe_set_mac, | 7210 | .ndo_set_mac_address = ixgbe_set_mac, |
6448 | .ndo_change_mtu = ixgbe_change_mtu, | 7211 | .ndo_change_mtu = ixgbe_change_mtu, |
6449 | .ndo_tx_timeout = ixgbe_tx_timeout, | 7212 | .ndo_tx_timeout = ixgbe_tx_timeout, |
6450 | .ndo_vlan_rx_register = ixgbe_vlan_rx_register, | ||
6451 | .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid, | 7213 | .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid, |
6452 | .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid, | 7214 | .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid, |
6453 | .ndo_do_ioctl = ixgbe_ioctl, | 7215 | .ndo_do_ioctl = ixgbe_ioctl, |
@@ -6455,11 +7217,16 @@ static const struct net_device_ops ixgbe_netdev_ops = { | |||
6455 | .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, | 7217 | .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, |
6456 | .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw, | 7218 | .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw, |
6457 | .ndo_get_vf_config = ixgbe_ndo_get_vf_config, | 7219 | .ndo_get_vf_config = ixgbe_ndo_get_vf_config, |
7220 | .ndo_get_stats64 = ixgbe_get_stats64, | ||
7221 | #ifdef CONFIG_IXGBE_DCB | ||
7222 | .ndo_setup_tc = ixgbe_setup_tc, | ||
7223 | #endif | ||
6458 | #ifdef CONFIG_NET_POLL_CONTROLLER | 7224 | #ifdef CONFIG_NET_POLL_CONTROLLER |
6459 | .ndo_poll_controller = ixgbe_netpoll, | 7225 | .ndo_poll_controller = ixgbe_netpoll, |
6460 | #endif | 7226 | #endif |
6461 | #ifdef IXGBE_FCOE | 7227 | #ifdef IXGBE_FCOE |
6462 | .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, | 7228 | .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, |
7229 | .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target, | ||
6463 | .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put, | 7230 | .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put, |
6464 | .ndo_fcoe_enable = ixgbe_fcoe_enable, | 7231 | .ndo_fcoe_enable = ixgbe_fcoe_enable, |
6465 | .ndo_fcoe_disable = ixgbe_fcoe_disable, | 7232 | .ndo_fcoe_disable = ixgbe_fcoe_disable, |
@@ -6473,8 +7240,10 @@ static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter, | |||
6473 | #ifdef CONFIG_PCI_IOV | 7240 | #ifdef CONFIG_PCI_IOV |
6474 | struct ixgbe_hw *hw = &adapter->hw; | 7241 | struct ixgbe_hw *hw = &adapter->hw; |
6475 | int err; | 7242 | int err; |
7243 | int num_vf_macvlans, i; | ||
7244 | struct vf_macvlans *mv_list; | ||
6476 | 7245 | ||
6477 | if (hw->mac.type != ixgbe_mac_82599EB || !max_vfs) | 7246 | if (hw->mac.type == ixgbe_mac_82598EB || !max_vfs) |
6478 | return; | 7247 | return; |
6479 | 7248 | ||
6480 | /* The 82599 supports up to 64 VFs per physical function | 7249 | /* The 82599 supports up to 64 VFs per physical function |
@@ -6489,6 +7258,26 @@ static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter, | |||
6489 | e_err(probe, "Failed to enable PCI sriov: %d\n", err); | 7258 | e_err(probe, "Failed to enable PCI sriov: %d\n", err); |
6490 | goto err_novfs; | 7259 | goto err_novfs; |
6491 | } | 7260 | } |
7261 | |||
7262 | num_vf_macvlans = hw->mac.num_rar_entries - | ||
7263 | (IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs); | ||
7264 | |||
7265 | adapter->mv_list = mv_list = kcalloc(num_vf_macvlans, | ||
7266 | sizeof(struct vf_macvlans), | ||
7267 | GFP_KERNEL); | ||
7268 | if (mv_list) { | ||
7269 | /* Initialize list of VF macvlans */ | ||
7270 | INIT_LIST_HEAD(&adapter->vf_mvs.l); | ||
7271 | for (i = 0; i < num_vf_macvlans; i++) { | ||
7272 | mv_list->vf = -1; | ||
7273 | mv_list->free = true; | ||
7274 | mv_list->rar_entry = hw->mac.num_rar_entries - | ||
7275 | (i + adapter->num_vfs + 1); | ||
7276 | list_add(&mv_list->l, &adapter->vf_mvs.l); | ||
7277 | mv_list++; | ||
7278 | } | ||
7279 | } | ||
7280 | |||
6492 | /* If call to enable VFs succeeded then allocate memory | 7281 | /* If call to enable VFs succeeded then allocate memory |
6493 | * for per VF control structures. | 7282 | * for per VF control structures. |
6494 | */ | 7283 | */ |
@@ -6532,7 +7321,7 @@ err_novfs: | |||
6532 | * and a hardware reset occur. | 7321 | * and a hardware reset occur. |
6533 | **/ | 7322 | **/ |
6534 | static int __devinit ixgbe_probe(struct pci_dev *pdev, | 7323 | static int __devinit ixgbe_probe(struct pci_dev *pdev, |
6535 | const struct pci_device_id *ent) | 7324 | const struct pci_device_id *ent) |
6536 | { | 7325 | { |
6537 | struct net_device *netdev; | 7326 | struct net_device *netdev; |
6538 | struct ixgbe_adapter *adapter = NULL; | 7327 | struct ixgbe_adapter *adapter = NULL; |
@@ -6540,11 +7329,12 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
6540 | const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; | 7329 | const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; |
6541 | static int cards_found; | 7330 | static int cards_found; |
6542 | int i, err, pci_using_dac; | 7331 | int i, err, pci_using_dac; |
7332 | u8 part_str[IXGBE_PBANUM_LENGTH]; | ||
6543 | unsigned int indices = num_possible_cpus(); | 7333 | unsigned int indices = num_possible_cpus(); |
6544 | #ifdef IXGBE_FCOE | 7334 | #ifdef IXGBE_FCOE |
6545 | u16 device_caps; | 7335 | u16 device_caps; |
6546 | #endif | 7336 | #endif |
6547 | u32 part_num, eec; | 7337 | u32 eec; |
6548 | 7338 | ||
6549 | /* Catch broken hardware that put the wrong VF device ID in | 7339 | /* Catch broken hardware that put the wrong VF device ID in |
6550 | * the PCIe SR-IOV capability. | 7340 | * the PCIe SR-IOV capability. |
@@ -6577,7 +7367,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
6577 | } | 7367 | } |
6578 | 7368 | ||
6579 | err = pci_request_selected_regions(pdev, pci_select_bars(pdev, | 7369 | err = pci_request_selected_regions(pdev, pci_select_bars(pdev, |
6580 | IORESOURCE_MEM), ixgbe_driver_name); | 7370 | IORESOURCE_MEM), ixgbe_driver_name); |
6581 | if (err) { | 7371 | if (err) { |
6582 | dev_err(&pdev->dev, | 7372 | dev_err(&pdev->dev, |
6583 | "pci_request_selected_regions failed 0x%x\n", err); | 7373 | "pci_request_selected_regions failed 0x%x\n", err); |
@@ -6594,8 +7384,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
6594 | else | 7384 | else |
6595 | indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES); | 7385 | indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES); |
6596 | 7386 | ||
7387 | #if defined(CONFIG_DCB) | ||
6597 | indices = max_t(unsigned int, indices, IXGBE_MAX_DCB_INDICES); | 7388 | indices = max_t(unsigned int, indices, IXGBE_MAX_DCB_INDICES); |
6598 | #ifdef IXGBE_FCOE | 7389 | #elif defined(IXGBE_FCOE) |
6599 | indices += min_t(unsigned int, num_possible_cpus(), | 7390 | indices += min_t(unsigned int, num_possible_cpus(), |
6600 | IXGBE_MAX_FCOE_INDICES); | 7391 | IXGBE_MAX_FCOE_INDICES); |
6601 | #endif | 7392 | #endif |
@@ -6607,8 +7398,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
6607 | 7398 | ||
6608 | SET_NETDEV_DEV(netdev, &pdev->dev); | 7399 | SET_NETDEV_DEV(netdev, &pdev->dev); |
6609 | 7400 | ||
6610 | pci_set_drvdata(pdev, netdev); | ||
6611 | adapter = netdev_priv(netdev); | 7401 | adapter = netdev_priv(netdev); |
7402 | pci_set_drvdata(pdev, adapter); | ||
6612 | 7403 | ||
6613 | adapter->netdev = netdev; | 7404 | adapter->netdev = netdev; |
6614 | adapter->pdev = pdev; | 7405 | adapter->pdev = pdev; |
@@ -6617,7 +7408,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
6617 | adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; | 7408 | adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; |
6618 | 7409 | ||
6619 | hw->hw_addr = ioremap(pci_resource_start(pdev, 0), | 7410 | hw->hw_addr = ioremap(pci_resource_start(pdev, 0), |
6620 | pci_resource_len(pdev, 0)); | 7411 | pci_resource_len(pdev, 0)); |
6621 | if (!hw->hw_addr) { | 7412 | if (!hw->hw_addr) { |
6622 | err = -EIO; | 7413 | err = -EIO; |
6623 | goto err_ioremap; | 7414 | goto err_ioremap; |
@@ -6631,7 +7422,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
6631 | netdev->netdev_ops = &ixgbe_netdev_ops; | 7422 | netdev->netdev_ops = &ixgbe_netdev_ops; |
6632 | ixgbe_set_ethtool_ops(netdev); | 7423 | ixgbe_set_ethtool_ops(netdev); |
6633 | netdev->watchdog_timeo = 5 * HZ; | 7424 | netdev->watchdog_timeo = 5 * HZ; |
6634 | strcpy(netdev->name, pci_name(pdev)); | 7425 | strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); |
6635 | 7426 | ||
6636 | adapter->bd_number = cards_found; | 7427 | adapter->bd_number = cards_found; |
6637 | 7428 | ||
@@ -6657,22 +7448,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
6657 | hw->phy.mdio.mdio_read = ixgbe_mdio_read; | 7448 | hw->phy.mdio.mdio_read = ixgbe_mdio_read; |
6658 | hw->phy.mdio.mdio_write = ixgbe_mdio_write; | 7449 | hw->phy.mdio.mdio_write = ixgbe_mdio_write; |
6659 | 7450 | ||
6660 | /* set up this timer and work struct before calling get_invariants | ||
6661 | * which might start the timer | ||
6662 | */ | ||
6663 | init_timer(&adapter->sfp_timer); | ||
6664 | adapter->sfp_timer.function = &ixgbe_sfp_timer; | ||
6665 | adapter->sfp_timer.data = (unsigned long) adapter; | ||
6666 | |||
6667 | INIT_WORK(&adapter->sfp_task, ixgbe_sfp_task); | ||
6668 | |||
6669 | /* multispeed fiber has its own tasklet, called from GPI SDP1 context */ | ||
6670 | INIT_WORK(&adapter->multispeed_fiber_task, ixgbe_multispeed_fiber_task); | ||
6671 | |||
6672 | /* a new SFP+ module arrival, called from GPI SDP2 context */ | ||
6673 | INIT_WORK(&adapter->sfp_config_module_task, | ||
6674 | ixgbe_sfp_config_module_task); | ||
6675 | |||
6676 | ii->get_invariants(hw); | 7451 | ii->get_invariants(hw); |
6677 | 7452 | ||
6678 | /* setup the private structure */ | 7453 | /* setup the private structure */ |
@@ -6681,8 +7456,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
6681 | goto err_sw_init; | 7456 | goto err_sw_init; |
6682 | 7457 | ||
6683 | /* Make it possible the adapter to be woken up via WOL */ | 7458 | /* Make it possible the adapter to be woken up via WOL */ |
6684 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) | 7459 | switch (adapter->hw.mac.type) { |
7460 | case ixgbe_mac_82599EB: | ||
7461 | case ixgbe_mac_X540: | ||
6685 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); | 7462 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); |
7463 | break; | ||
7464 | default: | ||
7465 | break; | ||
7466 | } | ||
6686 | 7467 | ||
6687 | /* | 7468 | /* |
6688 | * If there is a fan on this device and it has failed log the | 7469 | * If there is a fan on this device and it has failed log the |
@@ -6700,17 +7481,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
6700 | hw->phy.reset_if_overtemp = false; | 7481 | hw->phy.reset_if_overtemp = false; |
6701 | if (err == IXGBE_ERR_SFP_NOT_PRESENT && | 7482 | if (err == IXGBE_ERR_SFP_NOT_PRESENT && |
6702 | hw->mac.type == ixgbe_mac_82598EB) { | 7483 | hw->mac.type == ixgbe_mac_82598EB) { |
6703 | /* | ||
6704 | * Start a kernel thread to watch for a module to arrive. | ||
6705 | * Only do this for 82598, since 82599 will generate | ||
6706 | * interrupts on module arrival. | ||
6707 | */ | ||
6708 | set_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); | ||
6709 | mod_timer(&adapter->sfp_timer, | ||
6710 | round_jiffies(jiffies + (2 * HZ))); | ||
6711 | err = 0; | 7484 | err = 0; |
6712 | } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { | 7485 | } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { |
6713 | e_dev_err("failed to initialize because an unsupported SFP+ " | 7486 | e_dev_err("failed to load because an unsupported SFP+ " |
6714 | "module type was detected.\n"); | 7487 | "module type was detected.\n"); |
6715 | e_dev_err("Reload the driver after installing a supported " | 7488 | e_dev_err("Reload the driver after installing a supported " |
6716 | "module.\n"); | 7489 | "module.\n"); |
@@ -6723,18 +7496,25 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
6723 | ixgbe_probe_vf(adapter, ii); | 7496 | ixgbe_probe_vf(adapter, ii); |
6724 | 7497 | ||
6725 | netdev->features = NETIF_F_SG | | 7498 | netdev->features = NETIF_F_SG | |
6726 | NETIF_F_IP_CSUM | | 7499 | NETIF_F_IP_CSUM | |
6727 | NETIF_F_HW_VLAN_TX | | 7500 | NETIF_F_HW_VLAN_TX | |
6728 | NETIF_F_HW_VLAN_RX | | 7501 | NETIF_F_HW_VLAN_RX | |
6729 | NETIF_F_HW_VLAN_FILTER; | 7502 | NETIF_F_HW_VLAN_FILTER; |
6730 | 7503 | ||
6731 | netdev->features |= NETIF_F_IPV6_CSUM; | 7504 | netdev->features |= NETIF_F_IPV6_CSUM; |
6732 | netdev->features |= NETIF_F_TSO; | 7505 | netdev->features |= NETIF_F_TSO; |
6733 | netdev->features |= NETIF_F_TSO6; | 7506 | netdev->features |= NETIF_F_TSO6; |
6734 | netdev->features |= NETIF_F_GRO; | 7507 | netdev->features |= NETIF_F_GRO; |
7508 | netdev->features |= NETIF_F_RXHASH; | ||
6735 | 7509 | ||
6736 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) | 7510 | switch (adapter->hw.mac.type) { |
7511 | case ixgbe_mac_82599EB: | ||
7512 | case ixgbe_mac_X540: | ||
6737 | netdev->features |= NETIF_F_SCTP_CSUM; | 7513 | netdev->features |= NETIF_F_SCTP_CSUM; |
7514 | break; | ||
7515 | default: | ||
7516 | break; | ||
7517 | } | ||
6738 | 7518 | ||
6739 | netdev->vlan_features |= NETIF_F_TSO; | 7519 | netdev->vlan_features |= NETIF_F_TSO; |
6740 | netdev->vlan_features |= NETIF_F_TSO6; | 7520 | netdev->vlan_features |= NETIF_F_TSO6; |
@@ -6745,8 +7525,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
6745 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) | 7525 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) |
6746 | adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED | | 7526 | adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED | |
6747 | IXGBE_FLAG_DCB_ENABLED); | 7527 | IXGBE_FLAG_DCB_ENABLED); |
6748 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) | ||
6749 | adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; | ||
6750 | 7528 | ||
6751 | #ifdef CONFIG_IXGBE_DCB | 7529 | #ifdef CONFIG_IXGBE_DCB |
6752 | netdev->dcbnl_ops = &dcbnl_ops; | 7530 | netdev->dcbnl_ops = &dcbnl_ops; |
@@ -6766,8 +7544,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
6766 | netdev->vlan_features |= NETIF_F_FCOE_MTU; | 7544 | netdev->vlan_features |= NETIF_F_FCOE_MTU; |
6767 | } | 7545 | } |
6768 | #endif /* IXGBE_FCOE */ | 7546 | #endif /* IXGBE_FCOE */ |
6769 | if (pci_using_dac) | 7547 | if (pci_using_dac) { |
6770 | netdev->features |= NETIF_F_HIGHDMA; | 7548 | netdev->features |= NETIF_F_HIGHDMA; |
7549 | netdev->vlan_features |= NETIF_F_HIGHDMA; | ||
7550 | } | ||
6771 | 7551 | ||
6772 | if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) | 7552 | if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) |
6773 | netdev->features |= NETIF_F_LRO; | 7553 | netdev->features |= NETIF_F_LRO; |
@@ -6788,25 +7568,42 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
6788 | goto err_eeprom; | 7568 | goto err_eeprom; |
6789 | } | 7569 | } |
6790 | 7570 | ||
6791 | /* power down the optics */ | 7571 | /* power down the optics for multispeed fiber and 82599 SFP+ fiber */ |
6792 | if (hw->phy.multispeed_fiber) | 7572 | if (hw->mac.ops.disable_tx_laser && |
7573 | ((hw->phy.multispeed_fiber) || | ||
7574 | ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) && | ||
7575 | (hw->mac.type == ixgbe_mac_82599EB)))) | ||
6793 | hw->mac.ops.disable_tx_laser(hw); | 7576 | hw->mac.ops.disable_tx_laser(hw); |
6794 | 7577 | ||
6795 | init_timer(&adapter->watchdog_timer); | 7578 | setup_timer(&adapter->service_timer, &ixgbe_service_timer, |
6796 | adapter->watchdog_timer.function = &ixgbe_watchdog; | 7579 | (unsigned long) adapter); |
6797 | adapter->watchdog_timer.data = (unsigned long)adapter; | ||
6798 | 7580 | ||
6799 | INIT_WORK(&adapter->reset_task, ixgbe_reset_task); | 7581 | INIT_WORK(&adapter->service_task, ixgbe_service_task); |
6800 | INIT_WORK(&adapter->watchdog_task, ixgbe_watchdog_task); | 7582 | clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); |
6801 | 7583 | ||
6802 | err = ixgbe_init_interrupt_scheme(adapter); | 7584 | err = ixgbe_init_interrupt_scheme(adapter); |
6803 | if (err) | 7585 | if (err) |
6804 | goto err_sw_init; | 7586 | goto err_sw_init; |
6805 | 7587 | ||
7588 | if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) | ||
7589 | netdev->features &= ~NETIF_F_RXHASH; | ||
7590 | |||
6806 | switch (pdev->device) { | 7591 | switch (pdev->device) { |
7592 | case IXGBE_DEV_ID_82599_SFP: | ||
7593 | /* Only this subdevice supports WOL */ | ||
7594 | if (pdev->subsystem_device == IXGBE_SUBDEV_ID_82599_SFP) | ||
7595 | adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX | | ||
7596 | IXGBE_WUFC_MC | IXGBE_WUFC_BC); | ||
7597 | break; | ||
7598 | case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: | ||
7599 | /* All except this subdevice support WOL */ | ||
7600 | if (pdev->subsystem_device != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) | ||
7601 | adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX | | ||
7602 | IXGBE_WUFC_MC | IXGBE_WUFC_BC); | ||
7603 | break; | ||
6807 | case IXGBE_DEV_ID_82599_KX4: | 7604 | case IXGBE_DEV_ID_82599_KX4: |
6808 | adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX | | 7605 | adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX | |
6809 | IXGBE_WUFC_MC | IXGBE_WUFC_BC); | 7606 | IXGBE_WUFC_MC | IXGBE_WUFC_BC); |
6810 | break; | 7607 | break; |
6811 | default: | 7608 | default: |
6812 | adapter->wol = 0; | 7609 | adapter->wol = 0; |
@@ -6819,23 +7616,25 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
6819 | 7616 | ||
6820 | /* print bus type/speed/width info */ | 7617 | /* print bus type/speed/width info */ |
6821 | e_dev_info("(PCI Express:%s:%s) %pM\n", | 7618 | e_dev_info("(PCI Express:%s:%s) %pM\n", |
6822 | ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s": | 7619 | (hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0GT/s" : |
6823 | (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"), | 7620 | hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5GT/s" : |
6824 | ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" : | 7621 | "Unknown"), |
6825 | (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" : | 7622 | (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" : |
6826 | (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" : | 7623 | hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" : |
6827 | "Unknown"), | 7624 | hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" : |
6828 | netdev->dev_addr); | 7625 | "Unknown"), |
6829 | ixgbe_read_pba_num_generic(hw, &part_num); | 7626 | netdev->dev_addr); |
7627 | |||
7628 | err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH); | ||
7629 | if (err) | ||
7630 | strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH); | ||
6830 | if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) | 7631 | if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) |
6831 | e_dev_info("MAC: %d, PHY: %d, SFP+: %d, " | 7632 | e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n", |
6832 | "PBA No: %06x-%03x\n", | ||
6833 | hw->mac.type, hw->phy.type, hw->phy.sfp_type, | 7633 | hw->mac.type, hw->phy.type, hw->phy.sfp_type, |
6834 | (part_num >> 8), (part_num & 0xff)); | 7634 | part_str); |
6835 | else | 7635 | else |
6836 | e_dev_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n", | 7636 | e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n", |
6837 | hw->mac.type, hw->phy.type, | 7637 | hw->mac.type, hw->phy.type, part_str); |
6838 | (part_num >> 8), (part_num & 0xff)); | ||
6839 | 7638 | ||
6840 | if (hw->bus.width <= ixgbe_bus_width_pcie_x4) { | 7639 | if (hw->bus.width <= ixgbe_bus_width_pcie_x4) { |
6841 | e_dev_warn("PCI-Express bandwidth available for this card is " | 7640 | e_dev_warn("PCI-Express bandwidth available for this card is " |
@@ -6867,12 +7666,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
6867 | /* carrier off reporting is important to ethtool even BEFORE open */ | 7666 | /* carrier off reporting is important to ethtool even BEFORE open */ |
6868 | netif_carrier_off(netdev); | 7667 | netif_carrier_off(netdev); |
6869 | 7668 | ||
6870 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || | ||
6871 | adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) | ||
6872 | INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task); | ||
6873 | |||
6874 | if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) | ||
6875 | INIT_WORK(&adapter->check_overtemp_task, ixgbe_check_overtemp_task); | ||
6876 | #ifdef CONFIG_IXGBE_DCA | 7669 | #ifdef CONFIG_IXGBE_DCA |
6877 | if (dca_add_requester(&pdev->dev) == 0) { | 7670 | if (dca_add_requester(&pdev->dev) == 0) { |
6878 | adapter->flags |= IXGBE_FLAG_DCA_ENABLED; | 7671 | adapter->flags |= IXGBE_FLAG_DCA_ENABLED; |
@@ -6899,17 +7692,13 @@ err_sw_init: | |||
6899 | err_eeprom: | 7692 | err_eeprom: |
6900 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) | 7693 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) |
6901 | ixgbe_disable_sriov(adapter); | 7694 | ixgbe_disable_sriov(adapter); |
6902 | clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); | 7695 | adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; |
6903 | del_timer_sync(&adapter->sfp_timer); | ||
6904 | cancel_work_sync(&adapter->sfp_task); | ||
6905 | cancel_work_sync(&adapter->multispeed_fiber_task); | ||
6906 | cancel_work_sync(&adapter->sfp_config_module_task); | ||
6907 | iounmap(hw->hw_addr); | 7696 | iounmap(hw->hw_addr); |
6908 | err_ioremap: | 7697 | err_ioremap: |
6909 | free_netdev(netdev); | 7698 | free_netdev(netdev); |
6910 | err_alloc_etherdev: | 7699 | err_alloc_etherdev: |
6911 | pci_release_selected_regions(pdev, pci_select_bars(pdev, | 7700 | pci_release_selected_regions(pdev, |
6912 | IORESOURCE_MEM)); | 7701 | pci_select_bars(pdev, IORESOURCE_MEM)); |
6913 | err_pci_reg: | 7702 | err_pci_reg: |
6914 | err_dma: | 7703 | err_dma: |
6915 | pci_disable_device(pdev); | 7704 | pci_disable_device(pdev); |
@@ -6927,25 +7716,11 @@ err_dma: | |||
6927 | **/ | 7716 | **/ |
6928 | static void __devexit ixgbe_remove(struct pci_dev *pdev) | 7717 | static void __devexit ixgbe_remove(struct pci_dev *pdev) |
6929 | { | 7718 | { |
6930 | struct net_device *netdev = pci_get_drvdata(pdev); | 7719 | struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); |
6931 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 7720 | struct net_device *netdev = adapter->netdev; |
6932 | 7721 | ||
6933 | set_bit(__IXGBE_DOWN, &adapter->state); | 7722 | set_bit(__IXGBE_DOWN, &adapter->state); |
6934 | /* clear the module not found bit to make sure the worker won't | 7723 | cancel_work_sync(&adapter->service_task); |
6935 | * reschedule | ||
6936 | */ | ||
6937 | clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); | ||
6938 | del_timer_sync(&adapter->watchdog_timer); | ||
6939 | |||
6940 | del_timer_sync(&adapter->sfp_timer); | ||
6941 | cancel_work_sync(&adapter->watchdog_task); | ||
6942 | cancel_work_sync(&adapter->sfp_task); | ||
6943 | cancel_work_sync(&adapter->multispeed_fiber_task); | ||
6944 | cancel_work_sync(&adapter->sfp_config_module_task); | ||
6945 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || | ||
6946 | adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) | ||
6947 | cancel_work_sync(&adapter->fdir_reinit_task); | ||
6948 | flush_scheduled_work(); | ||
6949 | 7724 | ||
6950 | #ifdef CONFIG_IXGBE_DCA | 7725 | #ifdef CONFIG_IXGBE_DCA |
6951 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { | 7726 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { |
@@ -6976,7 +7751,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) | |||
6976 | 7751 | ||
6977 | iounmap(adapter->hw.hw_addr); | 7752 | iounmap(adapter->hw.hw_addr); |
6978 | pci_release_selected_regions(pdev, pci_select_bars(pdev, | 7753 | pci_release_selected_regions(pdev, pci_select_bars(pdev, |
6979 | IORESOURCE_MEM)); | 7754 | IORESOURCE_MEM)); |
6980 | 7755 | ||
6981 | e_dev_info("complete\n"); | 7756 | e_dev_info("complete\n"); |
6982 | 7757 | ||
@@ -6996,10 +7771,10 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) | |||
6996 | * this device has been detected. | 7771 | * this device has been detected. |
6997 | */ | 7772 | */ |
6998 | static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, | 7773 | static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, |
6999 | pci_channel_state_t state) | 7774 | pci_channel_state_t state) |
7000 | { | 7775 | { |
7001 | struct net_device *netdev = pci_get_drvdata(pdev); | 7776 | struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); |
7002 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 7777 | struct net_device *netdev = adapter->netdev; |
7003 | 7778 | ||
7004 | netif_device_detach(netdev); | 7779 | netif_device_detach(netdev); |
7005 | 7780 | ||
@@ -7022,8 +7797,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, | |||
7022 | */ | 7797 | */ |
7023 | static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) | 7798 | static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) |
7024 | { | 7799 | { |
7025 | struct net_device *netdev = pci_get_drvdata(pdev); | 7800 | struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); |
7026 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
7027 | pci_ers_result_t result; | 7801 | pci_ers_result_t result; |
7028 | int err; | 7802 | int err; |
7029 | 7803 | ||
@@ -7061,8 +7835,8 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) | |||
7061 | */ | 7835 | */ |
7062 | static void ixgbe_io_resume(struct pci_dev *pdev) | 7836 | static void ixgbe_io_resume(struct pci_dev *pdev) |
7063 | { | 7837 | { |
7064 | struct net_device *netdev = pci_get_drvdata(pdev); | 7838 | struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); |
7065 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 7839 | struct net_device *netdev = adapter->netdev; |
7066 | 7840 | ||
7067 | if (netif_running(netdev)) { | 7841 | if (netif_running(netdev)) { |
7068 | if (ixgbe_up(adapter)) { | 7842 | if (ixgbe_up(adapter)) { |
@@ -7102,8 +7876,7 @@ static struct pci_driver ixgbe_driver = { | |||
7102 | static int __init ixgbe_init_module(void) | 7876 | static int __init ixgbe_init_module(void) |
7103 | { | 7877 | { |
7104 | int ret; | 7878 | int ret; |
7105 | pr_info("%s - version %s\n", ixgbe_driver_string, | 7879 | pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version); |
7106 | ixgbe_driver_version); | ||
7107 | pr_info("%s\n", ixgbe_copyright); | 7880 | pr_info("%s\n", ixgbe_copyright); |
7108 | 7881 | ||
7109 | #ifdef CONFIG_IXGBE_DCA | 7882 | #ifdef CONFIG_IXGBE_DCA |
@@ -7128,32 +7901,23 @@ static void __exit ixgbe_exit_module(void) | |||
7128 | dca_unregister_notify(&dca_notifier); | 7901 | dca_unregister_notify(&dca_notifier); |
7129 | #endif | 7902 | #endif |
7130 | pci_unregister_driver(&ixgbe_driver); | 7903 | pci_unregister_driver(&ixgbe_driver); |
7904 | rcu_barrier(); /* Wait for completion of call_rcu()'s */ | ||
7131 | } | 7905 | } |
7132 | 7906 | ||
7133 | #ifdef CONFIG_IXGBE_DCA | 7907 | #ifdef CONFIG_IXGBE_DCA |
7134 | static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, | 7908 | static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, |
7135 | void *p) | 7909 | void *p) |
7136 | { | 7910 | { |
7137 | int ret_val; | 7911 | int ret_val; |
7138 | 7912 | ||
7139 | ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event, | 7913 | ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event, |
7140 | __ixgbe_notify_dca); | 7914 | __ixgbe_notify_dca); |
7141 | 7915 | ||
7142 | return ret_val ? NOTIFY_BAD : NOTIFY_DONE; | 7916 | return ret_val ? NOTIFY_BAD : NOTIFY_DONE; |
7143 | } | 7917 | } |
7144 | 7918 | ||
7145 | #endif /* CONFIG_IXGBE_DCA */ | 7919 | #endif /* CONFIG_IXGBE_DCA */ |
7146 | 7920 | ||
7147 | /** | ||
7148 | * ixgbe_get_hw_dev return device | ||
7149 | * used by hardware layer to print debugging information | ||
7150 | **/ | ||
7151 | struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw) | ||
7152 | { | ||
7153 | struct ixgbe_adapter *adapter = hw->back; | ||
7154 | return adapter->netdev; | ||
7155 | } | ||
7156 | |||
7157 | module_exit(ixgbe_exit_module); | 7921 | module_exit(ixgbe_exit_module); |
7158 | 7922 | ||
7159 | /* ixgbe_main.c */ | 7923 | /* ixgbe_main.c */ |