aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/igb/igb_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/igb/igb_main.c')
-rw-r--r--drivers/net/igb/igb_main.c637
1 files changed, 490 insertions, 147 deletions
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 583a21c1def3..589cf4a6427a 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -32,6 +32,7 @@
32#include <linux/pagemap.h> 32#include <linux/pagemap.h>
33#include <linux/netdevice.h> 33#include <linux/netdevice.h>
34#include <linux/ipv6.h> 34#include <linux/ipv6.h>
35#include <linux/slab.h>
35#include <net/checksum.h> 36#include <net/checksum.h>
36#include <net/ip6_checksum.h> 37#include <net/ip6_checksum.h>
37#include <linux/net_tstamp.h> 38#include <linux/net_tstamp.h>
@@ -61,6 +62,10 @@ static const struct e1000_info *igb_info_tbl[] = {
61}; 62};
62 63
63static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = { 64static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
67 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
64 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 }, 69 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 }, 70 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 }, 71 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
@@ -72,6 +77,7 @@ static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
72 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 }, 77 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 }, 78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
74 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 }, 79 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
75 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 }, 81 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
76 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 }, 82 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
77 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 }, 83 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
@@ -195,6 +201,336 @@ MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
195MODULE_LICENSE("GPL"); 201MODULE_LICENSE("GPL");
196MODULE_VERSION(DRV_VERSION); 202MODULE_VERSION(DRV_VERSION);
197 203
204struct igb_reg_info {
205 u32 ofs;
206 char *name;
207};
208
209static const struct igb_reg_info igb_reg_info_tbl[] = {
210
211 /* General Registers */
212 {E1000_CTRL, "CTRL"},
213 {E1000_STATUS, "STATUS"},
214 {E1000_CTRL_EXT, "CTRL_EXT"},
215
216 /* Interrupt Registers */
217 {E1000_ICR, "ICR"},
218
219 /* RX Registers */
220 {E1000_RCTL, "RCTL"},
221 {E1000_RDLEN(0), "RDLEN"},
222 {E1000_RDH(0), "RDH"},
223 {E1000_RDT(0), "RDT"},
224 {E1000_RXDCTL(0), "RXDCTL"},
225 {E1000_RDBAL(0), "RDBAL"},
226 {E1000_RDBAH(0), "RDBAH"},
227
228 /* TX Registers */
229 {E1000_TCTL, "TCTL"},
230 {E1000_TDBAL(0), "TDBAL"},
231 {E1000_TDBAH(0), "TDBAH"},
232 {E1000_TDLEN(0), "TDLEN"},
233 {E1000_TDH(0), "TDH"},
234 {E1000_TDT(0), "TDT"},
235 {E1000_TXDCTL(0), "TXDCTL"},
236 {E1000_TDFH, "TDFH"},
237 {E1000_TDFT, "TDFT"},
238 {E1000_TDFHS, "TDFHS"},
239 {E1000_TDFPC, "TDFPC"},
240
241 /* List Terminator */
242 {}
243};
244
245/*
246 * igb_regdump - register printout routine
247 */
248static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
249{
250 int n = 0;
251 char rname[16];
252 u32 regs[8];
253
254 switch (reginfo->ofs) {
255 case E1000_RDLEN(0):
256 for (n = 0; n < 4; n++)
257 regs[n] = rd32(E1000_RDLEN(n));
258 break;
259 case E1000_RDH(0):
260 for (n = 0; n < 4; n++)
261 regs[n] = rd32(E1000_RDH(n));
262 break;
263 case E1000_RDT(0):
264 for (n = 0; n < 4; n++)
265 regs[n] = rd32(E1000_RDT(n));
266 break;
267 case E1000_RXDCTL(0):
268 for (n = 0; n < 4; n++)
269 regs[n] = rd32(E1000_RXDCTL(n));
270 break;
271 case E1000_RDBAL(0):
272 for (n = 0; n < 4; n++)
273 regs[n] = rd32(E1000_RDBAL(n));
274 break;
275 case E1000_RDBAH(0):
276 for (n = 0; n < 4; n++)
277 regs[n] = rd32(E1000_RDBAH(n));
278 break;
279 case E1000_TDBAL(0):
280 for (n = 0; n < 4; n++)
281 regs[n] = rd32(E1000_RDBAL(n));
282 break;
283 case E1000_TDBAH(0):
284 for (n = 0; n < 4; n++)
285 regs[n] = rd32(E1000_TDBAH(n));
286 break;
287 case E1000_TDLEN(0):
288 for (n = 0; n < 4; n++)
289 regs[n] = rd32(E1000_TDLEN(n));
290 break;
291 case E1000_TDH(0):
292 for (n = 0; n < 4; n++)
293 regs[n] = rd32(E1000_TDH(n));
294 break;
295 case E1000_TDT(0):
296 for (n = 0; n < 4; n++)
297 regs[n] = rd32(E1000_TDT(n));
298 break;
299 case E1000_TXDCTL(0):
300 for (n = 0; n < 4; n++)
301 regs[n] = rd32(E1000_TXDCTL(n));
302 break;
303 default:
304 printk(KERN_INFO "%-15s %08x\n",
305 reginfo->name, rd32(reginfo->ofs));
306 return;
307 }
308
309 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
310 printk(KERN_INFO "%-15s ", rname);
311 for (n = 0; n < 4; n++)
312 printk(KERN_CONT "%08x ", regs[n]);
313 printk(KERN_CONT "\n");
314}
315
316/*
317 * igb_dump - Print registers, tx-rings and rx-rings
318 */
319static void igb_dump(struct igb_adapter *adapter)
320{
321 struct net_device *netdev = adapter->netdev;
322 struct e1000_hw *hw = &adapter->hw;
323 struct igb_reg_info *reginfo;
324 int n = 0;
325 struct igb_ring *tx_ring;
326 union e1000_adv_tx_desc *tx_desc;
327 struct my_u0 { u64 a; u64 b; } *u0;
328 struct igb_buffer *buffer_info;
329 struct igb_ring *rx_ring;
330 union e1000_adv_rx_desc *rx_desc;
331 u32 staterr;
332 int i = 0;
333
334 if (!netif_msg_hw(adapter))
335 return;
336
337 /* Print netdevice Info */
338 if (netdev) {
339 dev_info(&adapter->pdev->dev, "Net device Info\n");
340 printk(KERN_INFO "Device Name state "
341 "trans_start last_rx\n");
342 printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
343 netdev->name,
344 netdev->state,
345 netdev->trans_start,
346 netdev->last_rx);
347 }
348
349 /* Print Registers */
350 dev_info(&adapter->pdev->dev, "Register Dump\n");
351 printk(KERN_INFO " Register Name Value\n");
352 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
353 reginfo->name; reginfo++) {
354 igb_regdump(hw, reginfo);
355 }
356
357 /* Print TX Ring Summary */
358 if (!netdev || !netif_running(netdev))
359 goto exit;
360
361 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
362 printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
363 " leng ntw timestamp\n");
364 for (n = 0; n < adapter->num_tx_queues; n++) {
365 tx_ring = adapter->tx_ring[n];
366 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
367 printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
368 n, tx_ring->next_to_use, tx_ring->next_to_clean,
369 (u64)buffer_info->dma,
370 buffer_info->length,
371 buffer_info->next_to_watch,
372 (u64)buffer_info->time_stamp);
373 }
374
375 /* Print TX Rings */
376 if (!netif_msg_tx_done(adapter))
377 goto rx_ring_summary;
378
379 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
380
381 /* Transmit Descriptor Formats
382 *
383 * Advanced Transmit Descriptor
384 * +--------------------------------------------------------------+
385 * 0 | Buffer Address [63:0] |
386 * +--------------------------------------------------------------+
387 * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
388 * +--------------------------------------------------------------+
389 * 63 46 45 40 39 38 36 35 32 31 24 15 0
390 */
391
392 for (n = 0; n < adapter->num_tx_queues; n++) {
393 tx_ring = adapter->tx_ring[n];
394 printk(KERN_INFO "------------------------------------\n");
395 printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index);
396 printk(KERN_INFO "------------------------------------\n");
397 printk(KERN_INFO "T [desc] [address 63:0 ] "
398 "[PlPOCIStDDM Ln] [bi->dma ] "
399 "leng ntw timestamp bi->skb\n");
400
401 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
402 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
403 buffer_info = &tx_ring->buffer_info[i];
404 u0 = (struct my_u0 *)tx_desc;
405 printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX"
406 " %04X %3X %016llX %p", i,
407 le64_to_cpu(u0->a),
408 le64_to_cpu(u0->b),
409 (u64)buffer_info->dma,
410 buffer_info->length,
411 buffer_info->next_to_watch,
412 (u64)buffer_info->time_stamp,
413 buffer_info->skb);
414 if (i == tx_ring->next_to_use &&
415 i == tx_ring->next_to_clean)
416 printk(KERN_CONT " NTC/U\n");
417 else if (i == tx_ring->next_to_use)
418 printk(KERN_CONT " NTU\n");
419 else if (i == tx_ring->next_to_clean)
420 printk(KERN_CONT " NTC\n");
421 else
422 printk(KERN_CONT "\n");
423
424 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
425 print_hex_dump(KERN_INFO, "",
426 DUMP_PREFIX_ADDRESS,
427 16, 1, phys_to_virt(buffer_info->dma),
428 buffer_info->length, true);
429 }
430 }
431
432 /* Print RX Rings Summary */
433rx_ring_summary:
434 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
435 printk(KERN_INFO "Queue [NTU] [NTC]\n");
436 for (n = 0; n < adapter->num_rx_queues; n++) {
437 rx_ring = adapter->rx_ring[n];
438 printk(KERN_INFO " %5d %5X %5X\n", n,
439 rx_ring->next_to_use, rx_ring->next_to_clean);
440 }
441
442 /* Print RX Rings */
443 if (!netif_msg_rx_status(adapter))
444 goto exit;
445
446 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
447
448 /* Advanced Receive Descriptor (Read) Format
449 * 63 1 0
450 * +-----------------------------------------------------+
451 * 0 | Packet Buffer Address [63:1] |A0/NSE|
452 * +----------------------------------------------+------+
453 * 8 | Header Buffer Address [63:1] | DD |
454 * +-----------------------------------------------------+
455 *
456 *
457 * Advanced Receive Descriptor (Write-Back) Format
458 *
459 * 63 48 47 32 31 30 21 20 17 16 4 3 0
460 * +------------------------------------------------------+
461 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
462 * | Checksum Ident | | | | Type | Type |
463 * +------------------------------------------------------+
464 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
465 * +------------------------------------------------------+
466 * 63 48 47 32 31 20 19 0
467 */
468
469 for (n = 0; n < adapter->num_rx_queues; n++) {
470 rx_ring = adapter->rx_ring[n];
471 printk(KERN_INFO "------------------------------------\n");
472 printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index);
473 printk(KERN_INFO "------------------------------------\n");
474 printk(KERN_INFO "R [desc] [ PktBuf A0] "
475 "[ HeadBuf DD] [bi->dma ] [bi->skb] "
476 "<-- Adv Rx Read format\n");
477 printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] "
478 "[vl er S cks ln] ---------------- [bi->skb] "
479 "<-- Adv Rx Write-Back format\n");
480
481 for (i = 0; i < rx_ring->count; i++) {
482 buffer_info = &rx_ring->buffer_info[i];
483 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
484 u0 = (struct my_u0 *)rx_desc;
485 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
486 if (staterr & E1000_RXD_STAT_DD) {
487 /* Descriptor Done */
488 printk(KERN_INFO "RWB[0x%03X] %016llX "
489 "%016llX ---------------- %p", i,
490 le64_to_cpu(u0->a),
491 le64_to_cpu(u0->b),
492 buffer_info->skb);
493 } else {
494 printk(KERN_INFO "R [0x%03X] %016llX "
495 "%016llX %016llX %p", i,
496 le64_to_cpu(u0->a),
497 le64_to_cpu(u0->b),
498 (u64)buffer_info->dma,
499 buffer_info->skb);
500
501 if (netif_msg_pktdata(adapter)) {
502 print_hex_dump(KERN_INFO, "",
503 DUMP_PREFIX_ADDRESS,
504 16, 1,
505 phys_to_virt(buffer_info->dma),
506 rx_ring->rx_buffer_len, true);
507 if (rx_ring->rx_buffer_len
508 < IGB_RXBUFFER_1024)
509 print_hex_dump(KERN_INFO, "",
510 DUMP_PREFIX_ADDRESS,
511 16, 1,
512 phys_to_virt(
513 buffer_info->page_dma +
514 buffer_info->page_offset),
515 PAGE_SIZE/2, true);
516 }
517 }
518
519 if (i == rx_ring->next_to_use)
520 printk(KERN_CONT " NTU\n");
521 else if (i == rx_ring->next_to_clean)
522 printk(KERN_CONT " NTC\n");
523 else
524 printk(KERN_CONT "\n");
525
526 }
527 }
528
529exit:
530 return;
531}
532
533
198/** 534/**
199 * igb_read_clock - read raw cycle counter (to be used by time counter) 535 * igb_read_clock - read raw cycle counter (to be used by time counter)
200 */ 536 */
@@ -221,41 +557,15 @@ static cycle_t igb_read_clock(const struct cyclecounter *tc)
221 return stamp; 557 return stamp;
222} 558}
223 559
224#ifdef DEBUG
225/** 560/**
226 * igb_get_hw_dev_name - return device name string 561 * igb_get_hw_dev - return device
227 * used by hardware layer to print debugging information 562 * used by hardware layer to print debugging information
228 **/ 563 **/
229char *igb_get_hw_dev_name(struct e1000_hw *hw) 564struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
230{ 565{
231 struct igb_adapter *adapter = hw->back; 566 struct igb_adapter *adapter = hw->back;
232 return adapter->netdev->name; 567 return adapter->netdev;
233}
234
235/**
236 * igb_get_time_str - format current NIC and system time as string
237 */
238static char *igb_get_time_str(struct igb_adapter *adapter,
239 char buffer[160])
240{
241 cycle_t hw = adapter->cycles.read(&adapter->cycles);
242 struct timespec nic = ns_to_timespec(timecounter_read(&adapter->clock));
243 struct timespec sys;
244 struct timespec delta;
245 getnstimeofday(&sys);
246
247 delta = timespec_sub(nic, sys);
248
249 sprintf(buffer,
250 "HW %llu, NIC %ld.%09lus, SYS %ld.%09lus, NIC-SYS %lds + %09luns",
251 hw,
252 (long)nic.tv_sec, nic.tv_nsec,
253 (long)sys.tv_sec, sys.tv_nsec,
254 (long)delta.tv_sec, delta.tv_nsec);
255
256 return buffer;
257} 568}
258#endif
259 569
260/** 570/**
261 * igb_init_module - Driver Registration Routine 571 * igb_init_module - Driver Registration Routine
@@ -326,6 +636,7 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
326 } 636 }
327 case e1000_82575: 637 case e1000_82575:
328 case e1000_82580: 638 case e1000_82580:
639 case e1000_i350:
329 default: 640 default:
330 for (; i < adapter->num_rx_queues; i++) 641 for (; i < adapter->num_rx_queues; i++)
331 adapter->rx_ring[i]->reg_idx = rbase_offset + i; 642 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
@@ -369,7 +680,7 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
369 goto err; 680 goto err;
370 ring->count = adapter->tx_ring_count; 681 ring->count = adapter->tx_ring_count;
371 ring->queue_index = i; 682 ring->queue_index = i;
372 ring->pdev = adapter->pdev; 683 ring->dev = &adapter->pdev->dev;
373 ring->netdev = adapter->netdev; 684 ring->netdev = adapter->netdev;
374 /* For 82575, context index must be unique per ring. */ 685 /* For 82575, context index must be unique per ring. */
375 if (adapter->hw.mac.type == e1000_82575) 686 if (adapter->hw.mac.type == e1000_82575)
@@ -383,7 +694,7 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
383 goto err; 694 goto err;
384 ring->count = adapter->rx_ring_count; 695 ring->count = adapter->rx_ring_count;
385 ring->queue_index = i; 696 ring->queue_index = i;
386 ring->pdev = adapter->pdev; 697 ring->dev = &adapter->pdev->dev;
387 ring->netdev = adapter->netdev; 698 ring->netdev = adapter->netdev;
388 ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 699 ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
389 ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */ 700 ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
@@ -469,6 +780,7 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
469 q_vector->eims_value = 1 << msix_vector; 780 q_vector->eims_value = 1 << msix_vector;
470 break; 781 break;
471 case e1000_82580: 782 case e1000_82580:
783 case e1000_i350:
472 /* 82580 uses the same table-based approach as 82576 but has fewer 784 /* 82580 uses the same table-based approach as 82576 but has fewer
473 entries as a result we carry over for queues greater than 4. */ 785 entries as a result we carry over for queues greater than 4. */
474 if (rx_queue > IGB_N0_QUEUE) { 786 if (rx_queue > IGB_N0_QUEUE) {
@@ -549,6 +861,7 @@ static void igb_configure_msix(struct igb_adapter *adapter)
549 861
550 case e1000_82576: 862 case e1000_82576:
551 case e1000_82580: 863 case e1000_82580:
864 case e1000_i350:
552 /* Turn on MSI-X capability first, or our settings 865 /* Turn on MSI-X capability first, or our settings
553 * won't stick. And it will take days to debug. */ 866 * won't stick. And it will take days to debug. */
554 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE | 867 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
@@ -688,7 +1001,7 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter)
688 /* start with one vector for every rx queue */ 1001 /* start with one vector for every rx queue */
689 numvecs = adapter->num_rx_queues; 1002 numvecs = adapter->num_rx_queues;
690 1003
691 /* if tx handler is seperate add 1 for every tx queue */ 1004 /* if tx handler is separate add 1 for every tx queue */
692 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) 1005 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
693 numvecs += adapter->num_tx_queues; 1006 numvecs += adapter->num_tx_queues;
694 1007
@@ -1104,9 +1417,6 @@ static void igb_configure(struct igb_adapter *adapter)
1104 struct igb_ring *ring = adapter->rx_ring[i]; 1417 struct igb_ring *ring = adapter->rx_ring[i];
1105 igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring)); 1418 igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring));
1106 } 1419 }
1107
1108
1109 adapter->tx_queue_len = netdev->tx_queue_len;
1110} 1420}
1111 1421
1112/** 1422/**
@@ -1212,7 +1522,6 @@ void igb_down(struct igb_adapter *adapter)
1212 del_timer_sync(&adapter->watchdog_timer); 1522 del_timer_sync(&adapter->watchdog_timer);
1213 del_timer_sync(&adapter->phy_info_timer); 1523 del_timer_sync(&adapter->phy_info_timer);
1214 1524
1215 netdev->tx_queue_len = adapter->tx_queue_len;
1216 netif_carrier_off(netdev); 1525 netif_carrier_off(netdev);
1217 1526
1218 /* record the stats before reset*/ 1527 /* record the stats before reset*/
@@ -1255,6 +1564,7 @@ void igb_reset(struct igb_adapter *adapter)
1255 * To take effect CTRL.RST is required. 1564 * To take effect CTRL.RST is required.
1256 */ 1565 */
1257 switch (mac->type) { 1566 switch (mac->type) {
1567 case e1000_i350:
1258 case e1000_82580: 1568 case e1000_82580:
1259 pba = rd32(E1000_RXPBS); 1569 pba = rd32(E1000_RXPBS);
1260 pba = igb_rxpbs_adjust_82580(pba); 1570 pba = igb_rxpbs_adjust_82580(pba);
@@ -1418,15 +1728,15 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1418 return err; 1728 return err;
1419 1729
1420 pci_using_dac = 0; 1730 pci_using_dac = 0;
1421 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 1731 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
1422 if (!err) { 1732 if (!err) {
1423 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 1733 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
1424 if (!err) 1734 if (!err)
1425 pci_using_dac = 1; 1735 pci_using_dac = 1;
1426 } else { 1736 } else {
1427 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1737 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1428 if (err) { 1738 if (err) {
1429 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 1739 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1430 if (err) { 1740 if (err) {
1431 dev_err(&pdev->dev, "No usable DMA " 1741 dev_err(&pdev->dev, "No usable DMA "
1432 "configuration, aborting\n"); 1742 "configuration, aborting\n");
@@ -1614,6 +1924,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1614 adapter->eeprom_wol = 0; 1924 adapter->eeprom_wol = 0;
1615 break; 1925 break;
1616 case E1000_DEV_ID_82576_QUAD_COPPER: 1926 case E1000_DEV_ID_82576_QUAD_COPPER:
1927 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
1617 /* if quad port adapter, disable WoL on all but port A */ 1928 /* if quad port adapter, disable WoL on all but port A */
1618 if (global_quad_port_a != 0) 1929 if (global_quad_port_a != 0)
1619 adapter->eeprom_wol = 0; 1930 adapter->eeprom_wol = 0;
@@ -1657,6 +1968,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1657 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", 1968 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
1658 netdev->name, 1969 netdev->name,
1659 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" : 1970 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
1971 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
1660 "unknown"), 1972 "unknown"),
1661 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : 1973 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
1662 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" : 1974 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
@@ -1827,6 +2139,7 @@ static void igb_init_hw_timer(struct igb_adapter *adapter)
1827 struct e1000_hw *hw = &adapter->hw; 2139 struct e1000_hw *hw = &adapter->hw;
1828 2140
1829 switch (hw->mac.type) { 2141 switch (hw->mac.type) {
2142 case e1000_i350:
1830 case e1000_82580: 2143 case e1000_82580:
1831 memset(&adapter->cycles, 0, sizeof(adapter->cycles)); 2144 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1832 adapter->cycles.read = igb_read_clock; 2145 adapter->cycles.read = igb_read_clock;
@@ -2097,7 +2410,7 @@ static int igb_close(struct net_device *netdev)
2097 **/ 2410 **/
2098int igb_setup_tx_resources(struct igb_ring *tx_ring) 2411int igb_setup_tx_resources(struct igb_ring *tx_ring)
2099{ 2412{
2100 struct pci_dev *pdev = tx_ring->pdev; 2413 struct device *dev = tx_ring->dev;
2101 int size; 2414 int size;
2102 2415
2103 size = sizeof(struct igb_buffer) * tx_ring->count; 2416 size = sizeof(struct igb_buffer) * tx_ring->count;
@@ -2110,9 +2423,10 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
2110 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); 2423 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
2111 tx_ring->size = ALIGN(tx_ring->size, 4096); 2424 tx_ring->size = ALIGN(tx_ring->size, 4096);
2112 2425
2113 tx_ring->desc = pci_alloc_consistent(pdev, 2426 tx_ring->desc = dma_alloc_coherent(dev,
2114 tx_ring->size, 2427 tx_ring->size,
2115 &tx_ring->dma); 2428 &tx_ring->dma,
2429 GFP_KERNEL);
2116 2430
2117 if (!tx_ring->desc) 2431 if (!tx_ring->desc)
2118 goto err; 2432 goto err;
@@ -2123,7 +2437,7 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
2123 2437
2124err: 2438err:
2125 vfree(tx_ring->buffer_info); 2439 vfree(tx_ring->buffer_info);
2126 dev_err(&pdev->dev, 2440 dev_err(dev,
2127 "Unable to allocate memory for the transmit descriptor ring\n"); 2441 "Unable to allocate memory for the transmit descriptor ring\n");
2128 return -ENOMEM; 2442 return -ENOMEM;
2129} 2443}
@@ -2247,7 +2561,7 @@ static void igb_configure_tx(struct igb_adapter *adapter)
2247 **/ 2561 **/
2248int igb_setup_rx_resources(struct igb_ring *rx_ring) 2562int igb_setup_rx_resources(struct igb_ring *rx_ring)
2249{ 2563{
2250 struct pci_dev *pdev = rx_ring->pdev; 2564 struct device *dev = rx_ring->dev;
2251 int size, desc_len; 2565 int size, desc_len;
2252 2566
2253 size = sizeof(struct igb_buffer) * rx_ring->count; 2567 size = sizeof(struct igb_buffer) * rx_ring->count;
@@ -2262,8 +2576,10 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
2262 rx_ring->size = rx_ring->count * desc_len; 2576 rx_ring->size = rx_ring->count * desc_len;
2263 rx_ring->size = ALIGN(rx_ring->size, 4096); 2577 rx_ring->size = ALIGN(rx_ring->size, 4096);
2264 2578
2265 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, 2579 rx_ring->desc = dma_alloc_coherent(dev,
2266 &rx_ring->dma); 2580 rx_ring->size,
2581 &rx_ring->dma,
2582 GFP_KERNEL);
2267 2583
2268 if (!rx_ring->desc) 2584 if (!rx_ring->desc)
2269 goto err; 2585 goto err;
@@ -2276,8 +2592,8 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
2276err: 2592err:
2277 vfree(rx_ring->buffer_info); 2593 vfree(rx_ring->buffer_info);
2278 rx_ring->buffer_info = NULL; 2594 rx_ring->buffer_info = NULL;
2279 dev_err(&pdev->dev, "Unable to allocate memory for " 2595 dev_err(dev, "Unable to allocate memory for the receive descriptor"
2280 "the receive descriptor ring\n"); 2596 " ring\n");
2281 return -ENOMEM; 2597 return -ENOMEM;
2282} 2598}
2283 2599
@@ -2340,6 +2656,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
2340 if (adapter->vfs_allocated_count) { 2656 if (adapter->vfs_allocated_count) {
2341 /* 82575 and 82576 supports 2 RSS queues for VMDq */ 2657 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2342 switch (hw->mac.type) { 2658 switch (hw->mac.type) {
2659 case e1000_i350:
2343 case e1000_82580: 2660 case e1000_82580:
2344 num_rx_queues = 1; 2661 num_rx_queues = 1;
2345 shift = 0; 2662 shift = 0;
@@ -2591,6 +2908,8 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
2591 E1000_SRRCTL_BSIZEPKT_SHIFT; 2908 E1000_SRRCTL_BSIZEPKT_SHIFT;
2592 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; 2909 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2593 } 2910 }
2911 if (hw->mac.type == e1000_82580)
2912 srrctl |= E1000_SRRCTL_TIMESTAMP;
2594 /* Only set Drop Enable if we are supporting multiple queues */ 2913 /* Only set Drop Enable if we are supporting multiple queues */
2595 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1) 2914 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
2596 srrctl |= E1000_SRRCTL_DROP_EN; 2915 srrctl |= E1000_SRRCTL_DROP_EN;
@@ -2650,8 +2969,8 @@ void igb_free_tx_resources(struct igb_ring *tx_ring)
2650 if (!tx_ring->desc) 2969 if (!tx_ring->desc)
2651 return; 2970 return;
2652 2971
2653 pci_free_consistent(tx_ring->pdev, tx_ring->size, 2972 dma_free_coherent(tx_ring->dev, tx_ring->size,
2654 tx_ring->desc, tx_ring->dma); 2973 tx_ring->desc, tx_ring->dma);
2655 2974
2656 tx_ring->desc = NULL; 2975 tx_ring->desc = NULL;
2657} 2976}
@@ -2675,15 +2994,15 @@ void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
2675{ 2994{
2676 if (buffer_info->dma) { 2995 if (buffer_info->dma) {
2677 if (buffer_info->mapped_as_page) 2996 if (buffer_info->mapped_as_page)
2678 pci_unmap_page(tx_ring->pdev, 2997 dma_unmap_page(tx_ring->dev,
2679 buffer_info->dma, 2998 buffer_info->dma,
2680 buffer_info->length, 2999 buffer_info->length,
2681 PCI_DMA_TODEVICE); 3000 DMA_TO_DEVICE);
2682 else 3001 else
2683 pci_unmap_single(tx_ring->pdev, 3002 dma_unmap_single(tx_ring->dev,
2684 buffer_info->dma, 3003 buffer_info->dma,
2685 buffer_info->length, 3004 buffer_info->length,
2686 PCI_DMA_TODEVICE); 3005 DMA_TO_DEVICE);
2687 buffer_info->dma = 0; 3006 buffer_info->dma = 0;
2688 } 3007 }
2689 if (buffer_info->skb) { 3008 if (buffer_info->skb) {
@@ -2754,8 +3073,8 @@ void igb_free_rx_resources(struct igb_ring *rx_ring)
2754 if (!rx_ring->desc) 3073 if (!rx_ring->desc)
2755 return; 3074 return;
2756 3075
2757 pci_free_consistent(rx_ring->pdev, rx_ring->size, 3076 dma_free_coherent(rx_ring->dev, rx_ring->size,
2758 rx_ring->desc, rx_ring->dma); 3077 rx_ring->desc, rx_ring->dma);
2759 3078
2760 rx_ring->desc = NULL; 3079 rx_ring->desc = NULL;
2761} 3080}
@@ -2791,10 +3110,10 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
2791 for (i = 0; i < rx_ring->count; i++) { 3110 for (i = 0; i < rx_ring->count; i++) {
2792 buffer_info = &rx_ring->buffer_info[i]; 3111 buffer_info = &rx_ring->buffer_info[i];
2793 if (buffer_info->dma) { 3112 if (buffer_info->dma) {
2794 pci_unmap_single(rx_ring->pdev, 3113 dma_unmap_single(rx_ring->dev,
2795 buffer_info->dma, 3114 buffer_info->dma,
2796 rx_ring->rx_buffer_len, 3115 rx_ring->rx_buffer_len,
2797 PCI_DMA_FROMDEVICE); 3116 DMA_FROM_DEVICE);
2798 buffer_info->dma = 0; 3117 buffer_info->dma = 0;
2799 } 3118 }
2800 3119
@@ -2803,10 +3122,10 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
2803 buffer_info->skb = NULL; 3122 buffer_info->skb = NULL;
2804 } 3123 }
2805 if (buffer_info->page_dma) { 3124 if (buffer_info->page_dma) {
2806 pci_unmap_page(rx_ring->pdev, 3125 dma_unmap_page(rx_ring->dev,
2807 buffer_info->page_dma, 3126 buffer_info->page_dma,
2808 PAGE_SIZE / 2, 3127 PAGE_SIZE / 2,
2809 PCI_DMA_FROMDEVICE); 3128 DMA_FROM_DEVICE);
2810 buffer_info->page_dma = 0; 3129 buffer_info->page_dma = 0;
2811 } 3130 }
2812 if (buffer_info->page) { 3131 if (buffer_info->page) {
@@ -2877,7 +3196,7 @@ static int igb_write_mc_addr_list(struct net_device *netdev)
2877{ 3196{
2878 struct igb_adapter *adapter = netdev_priv(netdev); 3197 struct igb_adapter *adapter = netdev_priv(netdev);
2879 struct e1000_hw *hw = &adapter->hw; 3198 struct e1000_hw *hw = &adapter->hw;
2880 struct dev_mc_list *mc_ptr; 3199 struct netdev_hw_addr *ha;
2881 u8 *mta_list; 3200 u8 *mta_list;
2882 int i; 3201 int i;
2883 3202
@@ -2894,8 +3213,8 @@ static int igb_write_mc_addr_list(struct net_device *netdev)
2894 3213
2895 /* The shared function expects a packed array of only addresses. */ 3214 /* The shared function expects a packed array of only addresses. */
2896 i = 0; 3215 i = 0;
2897 netdev_for_each_mc_addr(mc_ptr, netdev) 3216 netdev_for_each_mc_addr(ha, netdev)
2898 memcpy(mta_list + (i++ * ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN); 3217 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
2899 3218
2900 igb_update_mc_addr_list(hw, mta_list, i); 3219 igb_update_mc_addr_list(hw, mta_list, i);
2901 kfree(mta_list); 3220 kfree(mta_list);
@@ -3105,17 +3424,13 @@ static void igb_watchdog_task(struct work_struct *work)
3105 ((ctrl & E1000_CTRL_RFCE) ? "RX" : 3424 ((ctrl & E1000_CTRL_RFCE) ? "RX" :
3106 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None"))); 3425 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
3107 3426
3108 /* tweak tx_queue_len according to speed/duplex and 3427 /* adjust timeout factor according to speed/duplex */
3109 * adjust the timeout factor */
3110 netdev->tx_queue_len = adapter->tx_queue_len;
3111 adapter->tx_timeout_factor = 1; 3428 adapter->tx_timeout_factor = 1;
3112 switch (adapter->link_speed) { 3429 switch (adapter->link_speed) {
3113 case SPEED_10: 3430 case SPEED_10:
3114 netdev->tx_queue_len = 10;
3115 adapter->tx_timeout_factor = 14; 3431 adapter->tx_timeout_factor = 14;
3116 break; 3432 break;
3117 case SPEED_100: 3433 case SPEED_100:
3118 netdev->tx_queue_len = 100;
3119 /* maybe add some timeout factor ? */ 3434 /* maybe add some timeout factor ? */
3120 break; 3435 break;
3121 } 3436 }
@@ -3498,7 +3813,7 @@ static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
3498 struct sk_buff *skb, u32 tx_flags) 3813 struct sk_buff *skb, u32 tx_flags)
3499{ 3814{
3500 struct e1000_adv_tx_context_desc *context_desc; 3815 struct e1000_adv_tx_context_desc *context_desc;
3501 struct pci_dev *pdev = tx_ring->pdev; 3816 struct device *dev = tx_ring->dev;
3502 struct igb_buffer *buffer_info; 3817 struct igb_buffer *buffer_info;
3503 u32 info = 0, tu_cmd = 0; 3818 u32 info = 0, tu_cmd = 0;
3504 unsigned int i; 3819 unsigned int i;
@@ -3549,7 +3864,7 @@ static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
3549 break; 3864 break;
3550 default: 3865 default:
3551 if (unlikely(net_ratelimit())) 3866 if (unlikely(net_ratelimit()))
3552 dev_warn(&pdev->dev, 3867 dev_warn(dev,
3553 "partial checksum but proto=%x!\n", 3868 "partial checksum but proto=%x!\n",
3554 skb->protocol); 3869 skb->protocol);
3555 break; 3870 break;
@@ -3583,59 +3898,61 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
3583 unsigned int first) 3898 unsigned int first)
3584{ 3899{
3585 struct igb_buffer *buffer_info; 3900 struct igb_buffer *buffer_info;
3586 struct pci_dev *pdev = tx_ring->pdev; 3901 struct device *dev = tx_ring->dev;
3587 unsigned int len = skb_headlen(skb); 3902 unsigned int hlen = skb_headlen(skb);
3588 unsigned int count = 0, i; 3903 unsigned int count = 0, i;
3589 unsigned int f; 3904 unsigned int f;
3905 u16 gso_segs = skb_shinfo(skb)->gso_segs ?: 1;
3590 3906
3591 i = tx_ring->next_to_use; 3907 i = tx_ring->next_to_use;
3592 3908
3593 buffer_info = &tx_ring->buffer_info[i]; 3909 buffer_info = &tx_ring->buffer_info[i];
3594 BUG_ON(len >= IGB_MAX_DATA_PER_TXD); 3910 BUG_ON(hlen >= IGB_MAX_DATA_PER_TXD);
3595 buffer_info->length = len; 3911 buffer_info->length = hlen;
3596 /* set time_stamp *before* dma to help avoid a possible race */ 3912 /* set time_stamp *before* dma to help avoid a possible race */
3597 buffer_info->time_stamp = jiffies; 3913 buffer_info->time_stamp = jiffies;
3598 buffer_info->next_to_watch = i; 3914 buffer_info->next_to_watch = i;
3599 buffer_info->dma = pci_map_single(pdev, skb->data, len, 3915 buffer_info->dma = dma_map_single(dev, skb->data, hlen,
3600 PCI_DMA_TODEVICE); 3916 DMA_TO_DEVICE);
3601 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 3917 if (dma_mapping_error(dev, buffer_info->dma))
3602 goto dma_error; 3918 goto dma_error;
3603 3919
3604 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { 3920 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
3605 struct skb_frag_struct *frag; 3921 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[f];
3922 unsigned int len = frag->size;
3606 3923
3607 count++; 3924 count++;
3608 i++; 3925 i++;
3609 if (i == tx_ring->count) 3926 if (i == tx_ring->count)
3610 i = 0; 3927 i = 0;
3611 3928
3612 frag = &skb_shinfo(skb)->frags[f];
3613 len = frag->size;
3614
3615 buffer_info = &tx_ring->buffer_info[i]; 3929 buffer_info = &tx_ring->buffer_info[i];
3616 BUG_ON(len >= IGB_MAX_DATA_PER_TXD); 3930 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
3617 buffer_info->length = len; 3931 buffer_info->length = len;
3618 buffer_info->time_stamp = jiffies; 3932 buffer_info->time_stamp = jiffies;
3619 buffer_info->next_to_watch = i; 3933 buffer_info->next_to_watch = i;
3620 buffer_info->mapped_as_page = true; 3934 buffer_info->mapped_as_page = true;
3621 buffer_info->dma = pci_map_page(pdev, 3935 buffer_info->dma = dma_map_page(dev,
3622 frag->page, 3936 frag->page,
3623 frag->page_offset, 3937 frag->page_offset,
3624 len, 3938 len,
3625 PCI_DMA_TODEVICE); 3939 DMA_TO_DEVICE);
3626 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 3940 if (dma_mapping_error(dev, buffer_info->dma))
3627 goto dma_error; 3941 goto dma_error;
3628 3942
3629 } 3943 }
3630 3944
3631 tx_ring->buffer_info[i].skb = skb; 3945 tx_ring->buffer_info[i].skb = skb;
3632 tx_ring->buffer_info[i].gso_segs = skb_shinfo(skb)->gso_segs ?: 1; 3946 tx_ring->buffer_info[i].shtx = skb_shinfo(skb)->tx_flags;
3947 /* multiply data chunks by size of headers */
3948 tx_ring->buffer_info[i].bytecount = ((gso_segs - 1) * hlen) + skb->len;
3949 tx_ring->buffer_info[i].gso_segs = gso_segs;
3633 tx_ring->buffer_info[first].next_to_watch = i; 3950 tx_ring->buffer_info[first].next_to_watch = i;
3634 3951
3635 return ++count; 3952 return ++count;
3636 3953
3637dma_error: 3954dma_error:
3638 dev_err(&pdev->dev, "TX DMA map failed\n"); 3955 dev_err(dev, "TX DMA map failed\n");
3639 3956
3640 /* clear timestamp and dma mappings for failed buffer_info mapping */ 3957 /* clear timestamp and dma mappings for failed buffer_info mapping */
3641 buffer_info->dma = 0; 3958 buffer_info->dma = 0;
@@ -3873,6 +4190,8 @@ static void igb_reset_task(struct work_struct *work)
3873 struct igb_adapter *adapter; 4190 struct igb_adapter *adapter;
3874 adapter = container_of(work, struct igb_adapter, reset_task); 4191 adapter = container_of(work, struct igb_adapter, reset_task);
3875 4192
4193 igb_dump(adapter);
4194 netdev_err(adapter->netdev, "Reset adapter\n");
3876 igb_reinit_locked(adapter); 4195 igb_reinit_locked(adapter);
3877} 4196}
3878 4197
@@ -3925,6 +4244,9 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3925 * i.e. RXBUFFER_2048 --> size-4096 slab 4244 * i.e. RXBUFFER_2048 --> size-4096 slab
3926 */ 4245 */
3927 4246
4247 if (adapter->hw.mac.type == e1000_82580)
4248 max_frame += IGB_TS_HDR_LEN;
4249
3928 if (max_frame <= IGB_RXBUFFER_1024) 4250 if (max_frame <= IGB_RXBUFFER_1024)
3929 rx_buffer_len = IGB_RXBUFFER_1024; 4251 rx_buffer_len = IGB_RXBUFFER_1024;
3930 else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE) 4252 else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
@@ -3932,6 +4254,14 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3932 else 4254 else
3933 rx_buffer_len = IGB_RXBUFFER_128; 4255 rx_buffer_len = IGB_RXBUFFER_128;
3934 4256
4257 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN + IGB_TS_HDR_LEN) ||
4258 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE + IGB_TS_HDR_LEN))
4259 rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE + IGB_TS_HDR_LEN;
4260
4261 if ((adapter->hw.mac.type == e1000_82580) &&
4262 (rx_buffer_len == IGB_RXBUFFER_128))
4263 rx_buffer_len += IGB_RXBUFFER_64;
4264
3935 if (netif_running(netdev)) 4265 if (netif_running(netdev))
3936 igb_down(adapter); 4266 igb_down(adapter);
3937 4267
@@ -3962,7 +4292,7 @@ void igb_update_stats(struct igb_adapter *adapter)
3962 struct net_device_stats *net_stats = igb_get_stats(adapter->netdev); 4292 struct net_device_stats *net_stats = igb_get_stats(adapter->netdev);
3963 struct e1000_hw *hw = &adapter->hw; 4293 struct e1000_hw *hw = &adapter->hw;
3964 struct pci_dev *pdev = adapter->pdev; 4294 struct pci_dev *pdev = adapter->pdev;
3965 u32 rnbc, reg; 4295 u32 reg, mpc;
3966 u16 phy_tmp; 4296 u16 phy_tmp;
3967 int i; 4297 int i;
3968 u64 bytes, packets; 4298 u64 bytes, packets;
@@ -4020,7 +4350,9 @@ void igb_update_stats(struct igb_adapter *adapter)
4020 adapter->stats.symerrs += rd32(E1000_SYMERRS); 4350 adapter->stats.symerrs += rd32(E1000_SYMERRS);
4021 adapter->stats.sec += rd32(E1000_SEC); 4351 adapter->stats.sec += rd32(E1000_SEC);
4022 4352
4023 adapter->stats.mpc += rd32(E1000_MPC); 4353 mpc = rd32(E1000_MPC);
4354 adapter->stats.mpc += mpc;
4355 net_stats->rx_fifo_errors += mpc;
4024 adapter->stats.scc += rd32(E1000_SCC); 4356 adapter->stats.scc += rd32(E1000_SCC);
4025 adapter->stats.ecol += rd32(E1000_ECOL); 4357 adapter->stats.ecol += rd32(E1000_ECOL);
4026 adapter->stats.mcc += rd32(E1000_MCC); 4358 adapter->stats.mcc += rd32(E1000_MCC);
@@ -4035,9 +4367,7 @@ void igb_update_stats(struct igb_adapter *adapter)
4035 adapter->stats.gptc += rd32(E1000_GPTC); 4367 adapter->stats.gptc += rd32(E1000_GPTC);
4036 adapter->stats.gotc += rd32(E1000_GOTCL); 4368 adapter->stats.gotc += rd32(E1000_GOTCL);
4037 rd32(E1000_GOTCH); /* clear GOTCL */ 4369 rd32(E1000_GOTCH); /* clear GOTCL */
4038 rnbc = rd32(E1000_RNBC); 4370 adapter->stats.rnbc += rd32(E1000_RNBC);
4039 adapter->stats.rnbc += rnbc;
4040 net_stats->rx_fifo_errors += rnbc;
4041 adapter->stats.ruc += rd32(E1000_RUC); 4371 adapter->stats.ruc += rd32(E1000_RUC);
4042 adapter->stats.rfc += rd32(E1000_RFC); 4372 adapter->stats.rfc += rd32(E1000_RFC);
4043 adapter->stats.rjc += rd32(E1000_RJC); 4373 adapter->stats.rjc += rd32(E1000_RJC);
@@ -4960,22 +5290,21 @@ static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
4960/** 5290/**
4961 * igb_tx_hwtstamp - utility function which checks for TX time stamp 5291 * igb_tx_hwtstamp - utility function which checks for TX time stamp
4962 * @q_vector: pointer to q_vector containing needed info 5292 * @q_vector: pointer to q_vector containing needed info
4963 * @skb: packet that was just sent 5293 * @buffer: pointer to igb_buffer structure
4964 * 5294 *
4965 * If we were asked to do hardware stamping and such a time stamp is 5295 * If we were asked to do hardware stamping and such a time stamp is
4966 * available, then it must have been for this skb here because we only 5296 * available, then it must have been for this skb here because we only
4967 * allow only one such packet into the queue. 5297 * allow only one such packet into the queue.
4968 */ 5298 */
4969static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb) 5299static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct igb_buffer *buffer_info)
4970{ 5300{
4971 struct igb_adapter *adapter = q_vector->adapter; 5301 struct igb_adapter *adapter = q_vector->adapter;
4972 union skb_shared_tx *shtx = skb_tx(skb);
4973 struct e1000_hw *hw = &adapter->hw; 5302 struct e1000_hw *hw = &adapter->hw;
4974 struct skb_shared_hwtstamps shhwtstamps; 5303 struct skb_shared_hwtstamps shhwtstamps;
4975 u64 regval; 5304 u64 regval;
4976 5305
4977 /* if skb does not support hw timestamp or TX stamp not valid exit */ 5306 /* if skb does not support hw timestamp or TX stamp not valid exit */
4978 if (likely(!shtx->hardware) || 5307 if (likely(!buffer_info->shtx.hardware) ||
4979 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID)) 5308 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
4980 return; 5309 return;
4981 5310
@@ -4983,7 +5312,7 @@ static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb)
4983 regval |= (u64)rd32(E1000_TXSTMPH) << 32; 5312 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
4984 5313
4985 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval); 5314 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
4986 skb_tstamp_tx(skb, &shhwtstamps); 5315 skb_tstamp_tx(buffer_info->skb, &shhwtstamps);
4987} 5316}
4988 5317
4989/** 5318/**
@@ -4998,7 +5327,6 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
4998 struct net_device *netdev = tx_ring->netdev; 5327 struct net_device *netdev = tx_ring->netdev;
4999 struct e1000_hw *hw = &adapter->hw; 5328 struct e1000_hw *hw = &adapter->hw;
5000 struct igb_buffer *buffer_info; 5329 struct igb_buffer *buffer_info;
5001 struct sk_buff *skb;
5002 union e1000_adv_tx_desc *tx_desc, *eop_desc; 5330 union e1000_adv_tx_desc *tx_desc, *eop_desc;
5003 unsigned int total_bytes = 0, total_packets = 0; 5331 unsigned int total_bytes = 0, total_packets = 0;
5004 unsigned int i, eop, count = 0; 5332 unsigned int i, eop, count = 0;
@@ -5014,19 +5342,12 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5014 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i); 5342 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
5015 buffer_info = &tx_ring->buffer_info[i]; 5343 buffer_info = &tx_ring->buffer_info[i];
5016 cleaned = (i == eop); 5344 cleaned = (i == eop);
5017 skb = buffer_info->skb;
5018 5345
5019 if (skb) { 5346 if (buffer_info->skb) {
5020 unsigned int segs, bytecount; 5347 total_bytes += buffer_info->bytecount;
5021 /* gso_segs is currently only valid for tcp */ 5348 /* gso_segs is currently only valid for tcp */
5022 segs = buffer_info->gso_segs; 5349 total_packets += buffer_info->gso_segs;
5023 /* multiply data chunks by size of headers */ 5350 igb_tx_hwtstamp(q_vector, buffer_info);
5024 bytecount = ((segs - 1) * skb_headlen(skb)) +
5025 skb->len;
5026 total_packets += segs;
5027 total_bytes += bytecount;
5028
5029 igb_tx_hwtstamp(q_vector, skb);
5030 } 5351 }
5031 5352
5032 igb_unmap_and_free_tx_resource(tx_ring, buffer_info); 5353 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
@@ -5066,7 +5387,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5066 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) { 5387 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
5067 5388
5068 /* detected Tx unit hang */ 5389 /* detected Tx unit hang */
5069 dev_err(&tx_ring->pdev->dev, 5390 dev_err(tx_ring->dev,
5070 "Detected Tx Unit Hang\n" 5391 "Detected Tx Unit Hang\n"
5071 " Tx Queue <%d>\n" 5392 " Tx Queue <%d>\n"
5072 " TDH <%x>\n" 5393 " TDH <%x>\n"
@@ -5109,7 +5430,7 @@ static void igb_receive_skb(struct igb_q_vector *q_vector,
5109{ 5430{
5110 struct igb_adapter *adapter = q_vector->adapter; 5431 struct igb_adapter *adapter = q_vector->adapter;
5111 5432
5112 if (vlan_tag) 5433 if (vlan_tag && adapter->vlgrp)
5113 vlan_gro_receive(&q_vector->napi, adapter->vlgrp, 5434 vlan_gro_receive(&q_vector->napi, adapter->vlgrp,
5114 vlan_tag, skb); 5435 vlan_tag, skb);
5115 else 5436 else
@@ -5145,10 +5466,10 @@ static inline void igb_rx_checksum_adv(struct igb_ring *ring,
5145 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) 5466 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
5146 skb->ip_summed = CHECKSUM_UNNECESSARY; 5467 skb->ip_summed = CHECKSUM_UNNECESSARY;
5147 5468
5148 dev_dbg(&ring->pdev->dev, "cksum success: bits %08X\n", status_err); 5469 dev_dbg(ring->dev, "cksum success: bits %08X\n", status_err);
5149} 5470}
5150 5471
5151static inline void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr, 5472static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
5152 struct sk_buff *skb) 5473 struct sk_buff *skb)
5153{ 5474{
5154 struct igb_adapter *adapter = q_vector->adapter; 5475 struct igb_adapter *adapter = q_vector->adapter;
@@ -5166,13 +5487,18 @@ static inline void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
5166 * If nothing went wrong, then it should have a skb_shared_tx that we 5487 * If nothing went wrong, then it should have a skb_shared_tx that we
5167 * can turn into a skb_shared_hwtstamps. 5488 * can turn into a skb_shared_hwtstamps.
5168 */ 5489 */
5169 if (likely(!(staterr & E1000_RXDADV_STAT_TS))) 5490 if (staterr & E1000_RXDADV_STAT_TSIP) {
5170 return; 5491 u32 *stamp = (u32 *)skb->data;
5171 if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) 5492 regval = le32_to_cpu(*(stamp + 2));
5172 return; 5493 regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
5494 skb_pull(skb, IGB_TS_HDR_LEN);
5495 } else {
5496 if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
5497 return;
5173 5498
5174 regval = rd32(E1000_RXSTMPL); 5499 regval = rd32(E1000_RXSTMPL);
5175 regval |= (u64)rd32(E1000_RXSTMPH) << 32; 5500 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
5501 }
5176 5502
5177 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); 5503 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
5178} 5504}
@@ -5195,7 +5521,7 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
5195{ 5521{
5196 struct igb_ring *rx_ring = q_vector->rx_ring; 5522 struct igb_ring *rx_ring = q_vector->rx_ring;
5197 struct net_device *netdev = rx_ring->netdev; 5523 struct net_device *netdev = rx_ring->netdev;
5198 struct pci_dev *pdev = rx_ring->pdev; 5524 struct device *dev = rx_ring->dev;
5199 union e1000_adv_rx_desc *rx_desc , *next_rxd; 5525 union e1000_adv_rx_desc *rx_desc , *next_rxd;
5200 struct igb_buffer *buffer_info , *next_buffer; 5526 struct igb_buffer *buffer_info , *next_buffer;
5201 struct sk_buff *skb; 5527 struct sk_buff *skb;
@@ -5235,9 +5561,9 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
5235 cleaned_count++; 5561 cleaned_count++;
5236 5562
5237 if (buffer_info->dma) { 5563 if (buffer_info->dma) {
5238 pci_unmap_single(pdev, buffer_info->dma, 5564 dma_unmap_single(dev, buffer_info->dma,
5239 rx_ring->rx_buffer_len, 5565 rx_ring->rx_buffer_len,
5240 PCI_DMA_FROMDEVICE); 5566 DMA_FROM_DEVICE);
5241 buffer_info->dma = 0; 5567 buffer_info->dma = 0;
5242 if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) { 5568 if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) {
5243 skb_put(skb, length); 5569 skb_put(skb, length);
@@ -5247,11 +5573,11 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
5247 } 5573 }
5248 5574
5249 if (length) { 5575 if (length) {
5250 pci_unmap_page(pdev, buffer_info->page_dma, 5576 dma_unmap_page(dev, buffer_info->page_dma,
5251 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE); 5577 PAGE_SIZE / 2, DMA_FROM_DEVICE);
5252 buffer_info->page_dma = 0; 5578 buffer_info->page_dma = 0;
5253 5579
5254 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++, 5580 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
5255 buffer_info->page, 5581 buffer_info->page,
5256 buffer_info->page_offset, 5582 buffer_info->page_offset,
5257 length); 5583 length);
@@ -5280,7 +5606,8 @@ send_up:
5280 goto next_desc; 5606 goto next_desc;
5281 } 5607 }
5282 5608
5283 igb_rx_hwtstamp(q_vector, staterr, skb); 5609 if (staterr & (E1000_RXDADV_STAT_TSIP | E1000_RXDADV_STAT_TS))
5610 igb_rx_hwtstamp(q_vector, staterr, skb);
5284 total_bytes += skb->len; 5611 total_bytes += skb->len;
5285 total_packets++; 5612 total_packets++;
5286 5613
@@ -5355,12 +5682,12 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
5355 buffer_info->page_offset ^= PAGE_SIZE / 2; 5682 buffer_info->page_offset ^= PAGE_SIZE / 2;
5356 } 5683 }
5357 buffer_info->page_dma = 5684 buffer_info->page_dma =
5358 pci_map_page(rx_ring->pdev, buffer_info->page, 5685 dma_map_page(rx_ring->dev, buffer_info->page,
5359 buffer_info->page_offset, 5686 buffer_info->page_offset,
5360 PAGE_SIZE / 2, 5687 PAGE_SIZE / 2,
5361 PCI_DMA_FROMDEVICE); 5688 DMA_FROM_DEVICE);
5362 if (pci_dma_mapping_error(rx_ring->pdev, 5689 if (dma_mapping_error(rx_ring->dev,
5363 buffer_info->page_dma)) { 5690 buffer_info->page_dma)) {
5364 buffer_info->page_dma = 0; 5691 buffer_info->page_dma = 0;
5365 rx_ring->rx_stats.alloc_failed++; 5692 rx_ring->rx_stats.alloc_failed++;
5366 goto no_buffers; 5693 goto no_buffers;
@@ -5378,12 +5705,12 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
5378 buffer_info->skb = skb; 5705 buffer_info->skb = skb;
5379 } 5706 }
5380 if (!buffer_info->dma) { 5707 if (!buffer_info->dma) {
5381 buffer_info->dma = pci_map_single(rx_ring->pdev, 5708 buffer_info->dma = dma_map_single(rx_ring->dev,
5382 skb->data, 5709 skb->data,
5383 bufsz, 5710 bufsz,
5384 PCI_DMA_FROMDEVICE); 5711 DMA_FROM_DEVICE);
5385 if (pci_dma_mapping_error(rx_ring->pdev, 5712 if (dma_mapping_error(rx_ring->dev,
5386 buffer_info->dma)) { 5713 buffer_info->dma)) {
5387 buffer_info->dma = 0; 5714 buffer_info->dma = 0;
5388 rx_ring->rx_stats.alloc_failed++; 5715 rx_ring->rx_stats.alloc_failed++;
5389 goto no_buffers; 5716 goto no_buffers;
@@ -5560,6 +5887,16 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev,
5560 return 0; 5887 return 0;
5561 } 5888 }
5562 5889
5890 /*
5891 * Per-packet timestamping only works if all packets are
5892 * timestamped, so enable timestamping in all packets as
5893 * long as one rx filter was configured.
5894 */
5895 if ((hw->mac.type == e1000_82580) && tsync_rx_ctl) {
5896 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
5897 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
5898 }
5899
5563 /* enable/disable TX */ 5900 /* enable/disable TX */
5564 regval = rd32(E1000_TSYNCTXCTL); 5901 regval = rd32(E1000_TSYNCTXCTL);
5565 regval &= ~E1000_TSYNCTXCTL_ENABLED; 5902 regval &= ~E1000_TSYNCTXCTL_ENABLED;
@@ -6136,19 +6473,25 @@ static void igb_vmm_control(struct igb_adapter *adapter)
6136 struct e1000_hw *hw = &adapter->hw; 6473 struct e1000_hw *hw = &adapter->hw;
6137 u32 reg; 6474 u32 reg;
6138 6475
6139 /* replication is not supported for 82575 */ 6476 switch (hw->mac.type) {
6140 if (hw->mac.type == e1000_82575) 6477 case e1000_82575:
6478 default:
6479 /* replication is not supported for 82575 */
6141 return; 6480 return;
6142 6481 case e1000_82576:
6143 /* enable replication vlan tag stripping */ 6482 /* notify HW that the MAC is adding vlan tags */
6144 reg = rd32(E1000_RPLOLR); 6483 reg = rd32(E1000_DTXCTL);
6145 reg |= E1000_RPLOLR_STRVLAN; 6484 reg |= E1000_DTXCTL_VLAN_ADDED;
6146 wr32(E1000_RPLOLR, reg); 6485 wr32(E1000_DTXCTL, reg);
6147 6486 case e1000_82580:
6148 /* notify HW that the MAC is adding vlan tags */ 6487 /* enable replication vlan tag stripping */
6149 reg = rd32(E1000_DTXCTL); 6488 reg = rd32(E1000_RPLOLR);
6150 reg |= E1000_DTXCTL_VLAN_ADDED; 6489 reg |= E1000_RPLOLR_STRVLAN;
6151 wr32(E1000_DTXCTL, reg); 6490 wr32(E1000_RPLOLR, reg);
6491 case e1000_i350:
6492 /* none of the above registers are supported by i350 */
6493 break;
6494 }
6152 6495
6153 if (adapter->vfs_allocated_count) { 6496 if (adapter->vfs_allocated_count) {
6154 igb_vmdq_set_loopback_pf(hw, true); 6497 igb_vmdq_set_loopback_pf(hw, true);