aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/igb/igb_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/igb/igb_main.c')
-rw-r--r--drivers/net/igb/igb_main.c613
1 files changed, 479 insertions, 134 deletions
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index c9baa2aa98cd..3881918f5382 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -62,6 +62,10 @@ static const struct e1000_info *igb_info_tbl[] = {
62}; 62};
63 63
64static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = { 64static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
67 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 }, 69 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 }, 70 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
67 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 }, 71 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
@@ -197,6 +201,336 @@ MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
197MODULE_LICENSE("GPL"); 201MODULE_LICENSE("GPL");
198MODULE_VERSION(DRV_VERSION); 202MODULE_VERSION(DRV_VERSION);
199 203
204struct igb_reg_info {
205 u32 ofs;
206 char *name;
207};
208
209static const struct igb_reg_info igb_reg_info_tbl[] = {
210
211 /* General Registers */
212 {E1000_CTRL, "CTRL"},
213 {E1000_STATUS, "STATUS"},
214 {E1000_CTRL_EXT, "CTRL_EXT"},
215
216 /* Interrupt Registers */
217 {E1000_ICR, "ICR"},
218
219 /* RX Registers */
220 {E1000_RCTL, "RCTL"},
221 {E1000_RDLEN(0), "RDLEN"},
222 {E1000_RDH(0), "RDH"},
223 {E1000_RDT(0), "RDT"},
224 {E1000_RXDCTL(0), "RXDCTL"},
225 {E1000_RDBAL(0), "RDBAL"},
226 {E1000_RDBAH(0), "RDBAH"},
227
228 /* TX Registers */
229 {E1000_TCTL, "TCTL"},
230 {E1000_TDBAL(0), "TDBAL"},
231 {E1000_TDBAH(0), "TDBAH"},
232 {E1000_TDLEN(0), "TDLEN"},
233 {E1000_TDH(0), "TDH"},
234 {E1000_TDT(0), "TDT"},
235 {E1000_TXDCTL(0), "TXDCTL"},
236 {E1000_TDFH, "TDFH"},
237 {E1000_TDFT, "TDFT"},
238 {E1000_TDFHS, "TDFHS"},
239 {E1000_TDFPC, "TDFPC"},
240
241 /* List Terminator */
242 {}
243};
244
245/*
246 * igb_regdump - register printout routine
247 */
248static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
249{
250 int n = 0;
251 char rname[16];
252 u32 regs[8];
253
254 switch (reginfo->ofs) {
255 case E1000_RDLEN(0):
256 for (n = 0; n < 4; n++)
257 regs[n] = rd32(E1000_RDLEN(n));
258 break;
259 case E1000_RDH(0):
260 for (n = 0; n < 4; n++)
261 regs[n] = rd32(E1000_RDH(n));
262 break;
263 case E1000_RDT(0):
264 for (n = 0; n < 4; n++)
265 regs[n] = rd32(E1000_RDT(n));
266 break;
267 case E1000_RXDCTL(0):
268 for (n = 0; n < 4; n++)
269 regs[n] = rd32(E1000_RXDCTL(n));
270 break;
271 case E1000_RDBAL(0):
272 for (n = 0; n < 4; n++)
273 regs[n] = rd32(E1000_RDBAL(n));
274 break;
275 case E1000_RDBAH(0):
276 for (n = 0; n < 4; n++)
277 regs[n] = rd32(E1000_RDBAH(n));
278 break;
279 case E1000_TDBAL(0):
280 for (n = 0; n < 4; n++)
281 regs[n] = rd32(E1000_RDBAL(n));
282 break;
283 case E1000_TDBAH(0):
284 for (n = 0; n < 4; n++)
285 regs[n] = rd32(E1000_TDBAH(n));
286 break;
287 case E1000_TDLEN(0):
288 for (n = 0; n < 4; n++)
289 regs[n] = rd32(E1000_TDLEN(n));
290 break;
291 case E1000_TDH(0):
292 for (n = 0; n < 4; n++)
293 regs[n] = rd32(E1000_TDH(n));
294 break;
295 case E1000_TDT(0):
296 for (n = 0; n < 4; n++)
297 regs[n] = rd32(E1000_TDT(n));
298 break;
299 case E1000_TXDCTL(0):
300 for (n = 0; n < 4; n++)
301 regs[n] = rd32(E1000_TXDCTL(n));
302 break;
303 default:
304 printk(KERN_INFO "%-15s %08x\n",
305 reginfo->name, rd32(reginfo->ofs));
306 return;
307 }
308
309 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
310 printk(KERN_INFO "%-15s ", rname);
311 for (n = 0; n < 4; n++)
312 printk(KERN_CONT "%08x ", regs[n]);
313 printk(KERN_CONT "\n");
314}
315
316/*
317 * igb_dump - Print registers, tx-rings and rx-rings
318 */
319static void igb_dump(struct igb_adapter *adapter)
320{
321 struct net_device *netdev = adapter->netdev;
322 struct e1000_hw *hw = &adapter->hw;
323 struct igb_reg_info *reginfo;
324 int n = 0;
325 struct igb_ring *tx_ring;
326 union e1000_adv_tx_desc *tx_desc;
327 struct my_u0 { u64 a; u64 b; } *u0;
328 struct igb_buffer *buffer_info;
329 struct igb_ring *rx_ring;
330 union e1000_adv_rx_desc *rx_desc;
331 u32 staterr;
332 int i = 0;
333
334 if (!netif_msg_hw(adapter))
335 return;
336
337 /* Print netdevice Info */
338 if (netdev) {
339 dev_info(&adapter->pdev->dev, "Net device Info\n");
340 printk(KERN_INFO "Device Name state "
341 "trans_start last_rx\n");
342 printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
343 netdev->name,
344 netdev->state,
345 netdev->trans_start,
346 netdev->last_rx);
347 }
348
349 /* Print Registers */
350 dev_info(&adapter->pdev->dev, "Register Dump\n");
351 printk(KERN_INFO " Register Name Value\n");
352 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
353 reginfo->name; reginfo++) {
354 igb_regdump(hw, reginfo);
355 }
356
357 /* Print TX Ring Summary */
358 if (!netdev || !netif_running(netdev))
359 goto exit;
360
361 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
362 printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
363 " leng ntw timestamp\n");
364 for (n = 0; n < adapter->num_tx_queues; n++) {
365 tx_ring = adapter->tx_ring[n];
366 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
367 printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
368 n, tx_ring->next_to_use, tx_ring->next_to_clean,
369 (u64)buffer_info->dma,
370 buffer_info->length,
371 buffer_info->next_to_watch,
372 (u64)buffer_info->time_stamp);
373 }
374
375 /* Print TX Rings */
376 if (!netif_msg_tx_done(adapter))
377 goto rx_ring_summary;
378
379 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
380
381 /* Transmit Descriptor Formats
382 *
383 * Advanced Transmit Descriptor
384 * +--------------------------------------------------------------+
385 * 0 | Buffer Address [63:0] |
386 * +--------------------------------------------------------------+
387 * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
388 * +--------------------------------------------------------------+
389 * 63 46 45 40 39 38 36 35 32 31 24 15 0
390 */
391
392 for (n = 0; n < adapter->num_tx_queues; n++) {
393 tx_ring = adapter->tx_ring[n];
394 printk(KERN_INFO "------------------------------------\n");
395 printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index);
396 printk(KERN_INFO "------------------------------------\n");
397 printk(KERN_INFO "T [desc] [address 63:0 ] "
398 "[PlPOCIStDDM Ln] [bi->dma ] "
399 "leng ntw timestamp bi->skb\n");
400
401 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
402 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
403 buffer_info = &tx_ring->buffer_info[i];
404 u0 = (struct my_u0 *)tx_desc;
405 printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX"
406 " %04X %3X %016llX %p", i,
407 le64_to_cpu(u0->a),
408 le64_to_cpu(u0->b),
409 (u64)buffer_info->dma,
410 buffer_info->length,
411 buffer_info->next_to_watch,
412 (u64)buffer_info->time_stamp,
413 buffer_info->skb);
414 if (i == tx_ring->next_to_use &&
415 i == tx_ring->next_to_clean)
416 printk(KERN_CONT " NTC/U\n");
417 else if (i == tx_ring->next_to_use)
418 printk(KERN_CONT " NTU\n");
419 else if (i == tx_ring->next_to_clean)
420 printk(KERN_CONT " NTC\n");
421 else
422 printk(KERN_CONT "\n");
423
424 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
425 print_hex_dump(KERN_INFO, "",
426 DUMP_PREFIX_ADDRESS,
427 16, 1, phys_to_virt(buffer_info->dma),
428 buffer_info->length, true);
429 }
430 }
431
432 /* Print RX Rings Summary */
433rx_ring_summary:
434 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
435 printk(KERN_INFO "Queue [NTU] [NTC]\n");
436 for (n = 0; n < adapter->num_rx_queues; n++) {
437 rx_ring = adapter->rx_ring[n];
438 printk(KERN_INFO " %5d %5X %5X\n", n,
439 rx_ring->next_to_use, rx_ring->next_to_clean);
440 }
441
442 /* Print RX Rings */
443 if (!netif_msg_rx_status(adapter))
444 goto exit;
445
446 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
447
448 /* Advanced Receive Descriptor (Read) Format
449 * 63 1 0
450 * +-----------------------------------------------------+
451 * 0 | Packet Buffer Address [63:1] |A0/NSE|
452 * +----------------------------------------------+------+
453 * 8 | Header Buffer Address [63:1] | DD |
454 * +-----------------------------------------------------+
455 *
456 *
457 * Advanced Receive Descriptor (Write-Back) Format
458 *
459 * 63 48 47 32 31 30 21 20 17 16 4 3 0
460 * +------------------------------------------------------+
461 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
462 * | Checksum Ident | | | | Type | Type |
463 * +------------------------------------------------------+
464 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
465 * +------------------------------------------------------+
466 * 63 48 47 32 31 20 19 0
467 */
468
469 for (n = 0; n < adapter->num_rx_queues; n++) {
470 rx_ring = adapter->rx_ring[n];
471 printk(KERN_INFO "------------------------------------\n");
472 printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index);
473 printk(KERN_INFO "------------------------------------\n");
474 printk(KERN_INFO "R [desc] [ PktBuf A0] "
475 "[ HeadBuf DD] [bi->dma ] [bi->skb] "
476 "<-- Adv Rx Read format\n");
477 printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] "
478 "[vl er S cks ln] ---------------- [bi->skb] "
479 "<-- Adv Rx Write-Back format\n");
480
481 for (i = 0; i < rx_ring->count; i++) {
482 buffer_info = &rx_ring->buffer_info[i];
483 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
484 u0 = (struct my_u0 *)rx_desc;
485 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
486 if (staterr & E1000_RXD_STAT_DD) {
487 /* Descriptor Done */
488 printk(KERN_INFO "RWB[0x%03X] %016llX "
489 "%016llX ---------------- %p", i,
490 le64_to_cpu(u0->a),
491 le64_to_cpu(u0->b),
492 buffer_info->skb);
493 } else {
494 printk(KERN_INFO "R [0x%03X] %016llX "
495 "%016llX %016llX %p", i,
496 le64_to_cpu(u0->a),
497 le64_to_cpu(u0->b),
498 (u64)buffer_info->dma,
499 buffer_info->skb);
500
501 if (netif_msg_pktdata(adapter)) {
502 print_hex_dump(KERN_INFO, "",
503 DUMP_PREFIX_ADDRESS,
504 16, 1,
505 phys_to_virt(buffer_info->dma),
506 rx_ring->rx_buffer_len, true);
507 if (rx_ring->rx_buffer_len
508 < IGB_RXBUFFER_1024)
509 print_hex_dump(KERN_INFO, "",
510 DUMP_PREFIX_ADDRESS,
511 16, 1,
512 phys_to_virt(
513 buffer_info->page_dma +
514 buffer_info->page_offset),
515 PAGE_SIZE/2, true);
516 }
517 }
518
519 if (i == rx_ring->next_to_use)
520 printk(KERN_CONT " NTU\n");
521 else if (i == rx_ring->next_to_clean)
522 printk(KERN_CONT " NTC\n");
523 else
524 printk(KERN_CONT "\n");
525
526 }
527 }
528
529exit:
530 return;
531}
532
533
200/** 534/**
201 * igb_read_clock - read raw cycle counter (to be used by time counter) 535 * igb_read_clock - read raw cycle counter (to be used by time counter)
202 */ 536 */
@@ -223,41 +557,15 @@ static cycle_t igb_read_clock(const struct cyclecounter *tc)
223 return stamp; 557 return stamp;
224} 558}
225 559
226#ifdef DEBUG
227/** 560/**
228 * igb_get_hw_dev_name - return device name string 561 * igb_get_hw_dev - return device
229 * used by hardware layer to print debugging information 562 * used by hardware layer to print debugging information
230 **/ 563 **/
231char *igb_get_hw_dev_name(struct e1000_hw *hw) 564struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
232{ 565{
233 struct igb_adapter *adapter = hw->back; 566 struct igb_adapter *adapter = hw->back;
234 return adapter->netdev->name; 567 return adapter->netdev;
235}
236
237/**
238 * igb_get_time_str - format current NIC and system time as string
239 */
240static char *igb_get_time_str(struct igb_adapter *adapter,
241 char buffer[160])
242{
243 cycle_t hw = adapter->cycles.read(&adapter->cycles);
244 struct timespec nic = ns_to_timespec(timecounter_read(&adapter->clock));
245 struct timespec sys;
246 struct timespec delta;
247 getnstimeofday(&sys);
248
249 delta = timespec_sub(nic, sys);
250
251 sprintf(buffer,
252 "HW %llu, NIC %ld.%09lus, SYS %ld.%09lus, NIC-SYS %lds + %09luns",
253 hw,
254 (long)nic.tv_sec, nic.tv_nsec,
255 (long)sys.tv_sec, sys.tv_nsec,
256 (long)delta.tv_sec, delta.tv_nsec);
257
258 return buffer;
259} 568}
260#endif
261 569
262/** 570/**
263 * igb_init_module - Driver Registration Routine 571 * igb_init_module - Driver Registration Routine
@@ -328,6 +636,7 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
328 } 636 }
329 case e1000_82575: 637 case e1000_82575:
330 case e1000_82580: 638 case e1000_82580:
639 case e1000_i350:
331 default: 640 default:
332 for (; i < adapter->num_rx_queues; i++) 641 for (; i < adapter->num_rx_queues; i++)
333 adapter->rx_ring[i]->reg_idx = rbase_offset + i; 642 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
@@ -371,7 +680,7 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
371 goto err; 680 goto err;
372 ring->count = adapter->tx_ring_count; 681 ring->count = adapter->tx_ring_count;
373 ring->queue_index = i; 682 ring->queue_index = i;
374 ring->pdev = adapter->pdev; 683 ring->dev = &adapter->pdev->dev;
375 ring->netdev = adapter->netdev; 684 ring->netdev = adapter->netdev;
376 /* For 82575, context index must be unique per ring. */ 685 /* For 82575, context index must be unique per ring. */
377 if (adapter->hw.mac.type == e1000_82575) 686 if (adapter->hw.mac.type == e1000_82575)
@@ -385,7 +694,7 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
385 goto err; 694 goto err;
386 ring->count = adapter->rx_ring_count; 695 ring->count = adapter->rx_ring_count;
387 ring->queue_index = i; 696 ring->queue_index = i;
388 ring->pdev = adapter->pdev; 697 ring->dev = &adapter->pdev->dev;
389 ring->netdev = adapter->netdev; 698 ring->netdev = adapter->netdev;
390 ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 699 ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
391 ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */ 700 ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
@@ -471,6 +780,7 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
471 q_vector->eims_value = 1 << msix_vector; 780 q_vector->eims_value = 1 << msix_vector;
472 break; 781 break;
473 case e1000_82580: 782 case e1000_82580:
783 case e1000_i350:
474 /* 82580 uses the same table-based approach as 82576 but has fewer 784 /* 82580 uses the same table-based approach as 82576 but has fewer
475 entries as a result we carry over for queues greater than 4. */ 785 entries as a result we carry over for queues greater than 4. */
476 if (rx_queue > IGB_N0_QUEUE) { 786 if (rx_queue > IGB_N0_QUEUE) {
@@ -551,6 +861,7 @@ static void igb_configure_msix(struct igb_adapter *adapter)
551 861
552 case e1000_82576: 862 case e1000_82576:
553 case e1000_82580: 863 case e1000_82580:
864 case e1000_i350:
554 /* Turn on MSI-X capability first, or our settings 865 /* Turn on MSI-X capability first, or our settings
555 * won't stick. And it will take days to debug. */ 866 * won't stick. And it will take days to debug. */
556 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE | 867 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
@@ -743,7 +1054,6 @@ msi_only:
743out: 1054out:
744 /* Notify the stack of the (possibly) reduced Tx Queue count. */ 1055 /* Notify the stack of the (possibly) reduced Tx Queue count. */
745 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues; 1056 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
746 return;
747} 1057}
748 1058
749/** 1059/**
@@ -1253,6 +1563,7 @@ void igb_reset(struct igb_adapter *adapter)
1253 * To take effect CTRL.RST is required. 1563 * To take effect CTRL.RST is required.
1254 */ 1564 */
1255 switch (mac->type) { 1565 switch (mac->type) {
1566 case e1000_i350:
1256 case e1000_82580: 1567 case e1000_82580:
1257 pba = rd32(E1000_RXPBS); 1568 pba = rd32(E1000_RXPBS);
1258 pba = igb_rxpbs_adjust_82580(pba); 1569 pba = igb_rxpbs_adjust_82580(pba);
@@ -1416,15 +1727,15 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1416 return err; 1727 return err;
1417 1728
1418 pci_using_dac = 0; 1729 pci_using_dac = 0;
1419 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 1730 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
1420 if (!err) { 1731 if (!err) {
1421 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 1732 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
1422 if (!err) 1733 if (!err)
1423 pci_using_dac = 1; 1734 pci_using_dac = 1;
1424 } else { 1735 } else {
1425 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1736 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1426 if (err) { 1737 if (err) {
1427 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 1738 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1428 if (err) { 1739 if (err) {
1429 dev_err(&pdev->dev, "No usable DMA " 1740 dev_err(&pdev->dev, "No usable DMA "
1430 "configuration, aborting\n"); 1741 "configuration, aborting\n");
@@ -1656,6 +1967,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1656 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", 1967 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
1657 netdev->name, 1968 netdev->name,
1658 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" : 1969 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
1970 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
1659 "unknown"), 1971 "unknown"),
1660 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : 1972 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
1661 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" : 1973 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
@@ -1826,6 +2138,7 @@ static void igb_init_hw_timer(struct igb_adapter *adapter)
1826 struct e1000_hw *hw = &adapter->hw; 2138 struct e1000_hw *hw = &adapter->hw;
1827 2139
1828 switch (hw->mac.type) { 2140 switch (hw->mac.type) {
2141 case e1000_i350:
1829 case e1000_82580: 2142 case e1000_82580:
1830 memset(&adapter->cycles, 0, sizeof(adapter->cycles)); 2143 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1831 adapter->cycles.read = igb_read_clock; 2144 adapter->cycles.read = igb_read_clock;
@@ -2096,7 +2409,7 @@ static int igb_close(struct net_device *netdev)
2096 **/ 2409 **/
2097int igb_setup_tx_resources(struct igb_ring *tx_ring) 2410int igb_setup_tx_resources(struct igb_ring *tx_ring)
2098{ 2411{
2099 struct pci_dev *pdev = tx_ring->pdev; 2412 struct device *dev = tx_ring->dev;
2100 int size; 2413 int size;
2101 2414
2102 size = sizeof(struct igb_buffer) * tx_ring->count; 2415 size = sizeof(struct igb_buffer) * tx_ring->count;
@@ -2109,9 +2422,10 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
2109 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); 2422 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
2110 tx_ring->size = ALIGN(tx_ring->size, 4096); 2423 tx_ring->size = ALIGN(tx_ring->size, 4096);
2111 2424
2112 tx_ring->desc = pci_alloc_consistent(pdev, 2425 tx_ring->desc = dma_alloc_coherent(dev,
2113 tx_ring->size, 2426 tx_ring->size,
2114 &tx_ring->dma); 2427 &tx_ring->dma,
2428 GFP_KERNEL);
2115 2429
2116 if (!tx_ring->desc) 2430 if (!tx_ring->desc)
2117 goto err; 2431 goto err;
@@ -2122,7 +2436,7 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
2122 2436
2123err: 2437err:
2124 vfree(tx_ring->buffer_info); 2438 vfree(tx_ring->buffer_info);
2125 dev_err(&pdev->dev, 2439 dev_err(dev,
2126 "Unable to allocate memory for the transmit descriptor ring\n"); 2440 "Unable to allocate memory for the transmit descriptor ring\n");
2127 return -ENOMEM; 2441 return -ENOMEM;
2128} 2442}
@@ -2246,7 +2560,7 @@ static void igb_configure_tx(struct igb_adapter *adapter)
2246 **/ 2560 **/
2247int igb_setup_rx_resources(struct igb_ring *rx_ring) 2561int igb_setup_rx_resources(struct igb_ring *rx_ring)
2248{ 2562{
2249 struct pci_dev *pdev = rx_ring->pdev; 2563 struct device *dev = rx_ring->dev;
2250 int size, desc_len; 2564 int size, desc_len;
2251 2565
2252 size = sizeof(struct igb_buffer) * rx_ring->count; 2566 size = sizeof(struct igb_buffer) * rx_ring->count;
@@ -2261,8 +2575,10 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
2261 rx_ring->size = rx_ring->count * desc_len; 2575 rx_ring->size = rx_ring->count * desc_len;
2262 rx_ring->size = ALIGN(rx_ring->size, 4096); 2576 rx_ring->size = ALIGN(rx_ring->size, 4096);
2263 2577
2264 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, 2578 rx_ring->desc = dma_alloc_coherent(dev,
2265 &rx_ring->dma); 2579 rx_ring->size,
2580 &rx_ring->dma,
2581 GFP_KERNEL);
2266 2582
2267 if (!rx_ring->desc) 2583 if (!rx_ring->desc)
2268 goto err; 2584 goto err;
@@ -2275,8 +2591,8 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
2275err: 2591err:
2276 vfree(rx_ring->buffer_info); 2592 vfree(rx_ring->buffer_info);
2277 rx_ring->buffer_info = NULL; 2593 rx_ring->buffer_info = NULL;
2278 dev_err(&pdev->dev, "Unable to allocate memory for " 2594 dev_err(dev, "Unable to allocate memory for the receive descriptor"
2279 "the receive descriptor ring\n"); 2595 " ring\n");
2280 return -ENOMEM; 2596 return -ENOMEM;
2281} 2597}
2282 2598
@@ -2339,6 +2655,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
2339 if (adapter->vfs_allocated_count) { 2655 if (adapter->vfs_allocated_count) {
2340 /* 82575 and 82576 supports 2 RSS queues for VMDq */ 2656 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2341 switch (hw->mac.type) { 2657 switch (hw->mac.type) {
2658 case e1000_i350:
2342 case e1000_82580: 2659 case e1000_82580:
2343 num_rx_queues = 1; 2660 num_rx_queues = 1;
2344 shift = 0; 2661 shift = 0;
@@ -2590,6 +2907,8 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
2590 E1000_SRRCTL_BSIZEPKT_SHIFT; 2907 E1000_SRRCTL_BSIZEPKT_SHIFT;
2591 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; 2908 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2592 } 2909 }
2910 if (hw->mac.type == e1000_82580)
2911 srrctl |= E1000_SRRCTL_TIMESTAMP;
2593 /* Only set Drop Enable if we are supporting multiple queues */ 2912 /* Only set Drop Enable if we are supporting multiple queues */
2594 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1) 2913 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
2595 srrctl |= E1000_SRRCTL_DROP_EN; 2914 srrctl |= E1000_SRRCTL_DROP_EN;
@@ -2649,8 +2968,8 @@ void igb_free_tx_resources(struct igb_ring *tx_ring)
2649 if (!tx_ring->desc) 2968 if (!tx_ring->desc)
2650 return; 2969 return;
2651 2970
2652 pci_free_consistent(tx_ring->pdev, tx_ring->size, 2971 dma_free_coherent(tx_ring->dev, tx_ring->size,
2653 tx_ring->desc, tx_ring->dma); 2972 tx_ring->desc, tx_ring->dma);
2654 2973
2655 tx_ring->desc = NULL; 2974 tx_ring->desc = NULL;
2656} 2975}
@@ -2674,15 +2993,15 @@ void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
2674{ 2993{
2675 if (buffer_info->dma) { 2994 if (buffer_info->dma) {
2676 if (buffer_info->mapped_as_page) 2995 if (buffer_info->mapped_as_page)
2677 pci_unmap_page(tx_ring->pdev, 2996 dma_unmap_page(tx_ring->dev,
2678 buffer_info->dma, 2997 buffer_info->dma,
2679 buffer_info->length, 2998 buffer_info->length,
2680 PCI_DMA_TODEVICE); 2999 DMA_TO_DEVICE);
2681 else 3000 else
2682 pci_unmap_single(tx_ring->pdev, 3001 dma_unmap_single(tx_ring->dev,
2683 buffer_info->dma, 3002 buffer_info->dma,
2684 buffer_info->length, 3003 buffer_info->length,
2685 PCI_DMA_TODEVICE); 3004 DMA_TO_DEVICE);
2686 buffer_info->dma = 0; 3005 buffer_info->dma = 0;
2687 } 3006 }
2688 if (buffer_info->skb) { 3007 if (buffer_info->skb) {
@@ -2753,8 +3072,8 @@ void igb_free_rx_resources(struct igb_ring *rx_ring)
2753 if (!rx_ring->desc) 3072 if (!rx_ring->desc)
2754 return; 3073 return;
2755 3074
2756 pci_free_consistent(rx_ring->pdev, rx_ring->size, 3075 dma_free_coherent(rx_ring->dev, rx_ring->size,
2757 rx_ring->desc, rx_ring->dma); 3076 rx_ring->desc, rx_ring->dma);
2758 3077
2759 rx_ring->desc = NULL; 3078 rx_ring->desc = NULL;
2760} 3079}
@@ -2790,10 +3109,10 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
2790 for (i = 0; i < rx_ring->count; i++) { 3109 for (i = 0; i < rx_ring->count; i++) {
2791 buffer_info = &rx_ring->buffer_info[i]; 3110 buffer_info = &rx_ring->buffer_info[i];
2792 if (buffer_info->dma) { 3111 if (buffer_info->dma) {
2793 pci_unmap_single(rx_ring->pdev, 3112 dma_unmap_single(rx_ring->dev,
2794 buffer_info->dma, 3113 buffer_info->dma,
2795 rx_ring->rx_buffer_len, 3114 rx_ring->rx_buffer_len,
2796 PCI_DMA_FROMDEVICE); 3115 DMA_FROM_DEVICE);
2797 buffer_info->dma = 0; 3116 buffer_info->dma = 0;
2798 } 3117 }
2799 3118
@@ -2802,10 +3121,10 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
2802 buffer_info->skb = NULL; 3121 buffer_info->skb = NULL;
2803 } 3122 }
2804 if (buffer_info->page_dma) { 3123 if (buffer_info->page_dma) {
2805 pci_unmap_page(rx_ring->pdev, 3124 dma_unmap_page(rx_ring->dev,
2806 buffer_info->page_dma, 3125 buffer_info->page_dma,
2807 PAGE_SIZE / 2, 3126 PAGE_SIZE / 2,
2808 PCI_DMA_FROMDEVICE); 3127 DMA_FROM_DEVICE);
2809 buffer_info->page_dma = 0; 3128 buffer_info->page_dma = 0;
2810 } 3129 }
2811 if (buffer_info->page) { 3130 if (buffer_info->page) {
@@ -2876,7 +3195,7 @@ static int igb_write_mc_addr_list(struct net_device *netdev)
2876{ 3195{
2877 struct igb_adapter *adapter = netdev_priv(netdev); 3196 struct igb_adapter *adapter = netdev_priv(netdev);
2878 struct e1000_hw *hw = &adapter->hw; 3197 struct e1000_hw *hw = &adapter->hw;
2879 struct dev_mc_list *mc_ptr; 3198 struct netdev_hw_addr *ha;
2880 u8 *mta_list; 3199 u8 *mta_list;
2881 int i; 3200 int i;
2882 3201
@@ -2893,8 +3212,8 @@ static int igb_write_mc_addr_list(struct net_device *netdev)
2893 3212
2894 /* The shared function expects a packed array of only addresses. */ 3213 /* The shared function expects a packed array of only addresses. */
2895 i = 0; 3214 i = 0;
2896 netdev_for_each_mc_addr(mc_ptr, netdev) 3215 netdev_for_each_mc_addr(ha, netdev)
2897 memcpy(mta_list + (i++ * ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN); 3216 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
2898 3217
2899 igb_update_mc_addr_list(hw, mta_list, i); 3218 igb_update_mc_addr_list(hw, mta_list, i);
2900 kfree(mta_list); 3219 kfree(mta_list);
@@ -3397,8 +3716,6 @@ set_itr_now:
3397 q_vector->itr_val = new_itr; 3716 q_vector->itr_val = new_itr;
3398 q_vector->set_itr = 1; 3717 q_vector->set_itr = 1;
3399 } 3718 }
3400
3401 return;
3402} 3719}
3403 3720
3404#define IGB_TX_FLAGS_CSUM 0x00000001 3721#define IGB_TX_FLAGS_CSUM 0x00000001
@@ -3493,7 +3810,7 @@ static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
3493 struct sk_buff *skb, u32 tx_flags) 3810 struct sk_buff *skb, u32 tx_flags)
3494{ 3811{
3495 struct e1000_adv_tx_context_desc *context_desc; 3812 struct e1000_adv_tx_context_desc *context_desc;
3496 struct pci_dev *pdev = tx_ring->pdev; 3813 struct device *dev = tx_ring->dev;
3497 struct igb_buffer *buffer_info; 3814 struct igb_buffer *buffer_info;
3498 u32 info = 0, tu_cmd = 0; 3815 u32 info = 0, tu_cmd = 0;
3499 unsigned int i; 3816 unsigned int i;
@@ -3544,7 +3861,7 @@ static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
3544 break; 3861 break;
3545 default: 3862 default:
3546 if (unlikely(net_ratelimit())) 3863 if (unlikely(net_ratelimit()))
3547 dev_warn(&pdev->dev, 3864 dev_warn(dev,
3548 "partial checksum but proto=%x!\n", 3865 "partial checksum but proto=%x!\n",
3549 skb->protocol); 3866 skb->protocol);
3550 break; 3867 break;
@@ -3578,59 +3895,61 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
3578 unsigned int first) 3895 unsigned int first)
3579{ 3896{
3580 struct igb_buffer *buffer_info; 3897 struct igb_buffer *buffer_info;
3581 struct pci_dev *pdev = tx_ring->pdev; 3898 struct device *dev = tx_ring->dev;
3582 unsigned int len = skb_headlen(skb); 3899 unsigned int hlen = skb_headlen(skb);
3583 unsigned int count = 0, i; 3900 unsigned int count = 0, i;
3584 unsigned int f; 3901 unsigned int f;
3902 u16 gso_segs = skb_shinfo(skb)->gso_segs ?: 1;
3585 3903
3586 i = tx_ring->next_to_use; 3904 i = tx_ring->next_to_use;
3587 3905
3588 buffer_info = &tx_ring->buffer_info[i]; 3906 buffer_info = &tx_ring->buffer_info[i];
3589 BUG_ON(len >= IGB_MAX_DATA_PER_TXD); 3907 BUG_ON(hlen >= IGB_MAX_DATA_PER_TXD);
3590 buffer_info->length = len; 3908 buffer_info->length = hlen;
3591 /* set time_stamp *before* dma to help avoid a possible race */ 3909 /* set time_stamp *before* dma to help avoid a possible race */
3592 buffer_info->time_stamp = jiffies; 3910 buffer_info->time_stamp = jiffies;
3593 buffer_info->next_to_watch = i; 3911 buffer_info->next_to_watch = i;
3594 buffer_info->dma = pci_map_single(pdev, skb->data, len, 3912 buffer_info->dma = dma_map_single(dev, skb->data, hlen,
3595 PCI_DMA_TODEVICE); 3913 DMA_TO_DEVICE);
3596 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 3914 if (dma_mapping_error(dev, buffer_info->dma))
3597 goto dma_error; 3915 goto dma_error;
3598 3916
3599 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { 3917 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
3600 struct skb_frag_struct *frag; 3918 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[f];
3919 unsigned int len = frag->size;
3601 3920
3602 count++; 3921 count++;
3603 i++; 3922 i++;
3604 if (i == tx_ring->count) 3923 if (i == tx_ring->count)
3605 i = 0; 3924 i = 0;
3606 3925
3607 frag = &skb_shinfo(skb)->frags[f];
3608 len = frag->size;
3609
3610 buffer_info = &tx_ring->buffer_info[i]; 3926 buffer_info = &tx_ring->buffer_info[i];
3611 BUG_ON(len >= IGB_MAX_DATA_PER_TXD); 3927 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
3612 buffer_info->length = len; 3928 buffer_info->length = len;
3613 buffer_info->time_stamp = jiffies; 3929 buffer_info->time_stamp = jiffies;
3614 buffer_info->next_to_watch = i; 3930 buffer_info->next_to_watch = i;
3615 buffer_info->mapped_as_page = true; 3931 buffer_info->mapped_as_page = true;
3616 buffer_info->dma = pci_map_page(pdev, 3932 buffer_info->dma = dma_map_page(dev,
3617 frag->page, 3933 frag->page,
3618 frag->page_offset, 3934 frag->page_offset,
3619 len, 3935 len,
3620 PCI_DMA_TODEVICE); 3936 DMA_TO_DEVICE);
3621 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 3937 if (dma_mapping_error(dev, buffer_info->dma))
3622 goto dma_error; 3938 goto dma_error;
3623 3939
3624 } 3940 }
3625 3941
3626 tx_ring->buffer_info[i].skb = skb; 3942 tx_ring->buffer_info[i].skb = skb;
3627 tx_ring->buffer_info[i].gso_segs = skb_shinfo(skb)->gso_segs ?: 1; 3943 tx_ring->buffer_info[i].shtx = skb_shinfo(skb)->tx_flags;
3944 /* multiply data chunks by size of headers */
3945 tx_ring->buffer_info[i].bytecount = ((gso_segs - 1) * hlen) + skb->len;
3946 tx_ring->buffer_info[i].gso_segs = gso_segs;
3628 tx_ring->buffer_info[first].next_to_watch = i; 3947 tx_ring->buffer_info[first].next_to_watch = i;
3629 3948
3630 return ++count; 3949 return ++count;
3631 3950
3632dma_error: 3951dma_error:
3633 dev_err(&pdev->dev, "TX DMA map failed\n"); 3952 dev_err(dev, "TX DMA map failed\n");
3634 3953
3635 /* clear timestamp and dma mappings for failed buffer_info mapping */ 3954 /* clear timestamp and dma mappings for failed buffer_info mapping */
3636 buffer_info->dma = 0; 3955 buffer_info->dma = 0;
@@ -3868,6 +4187,8 @@ static void igb_reset_task(struct work_struct *work)
3868 struct igb_adapter *adapter; 4187 struct igb_adapter *adapter;
3869 adapter = container_of(work, struct igb_adapter, reset_task); 4188 adapter = container_of(work, struct igb_adapter, reset_task);
3870 4189
4190 igb_dump(adapter);
4191 netdev_err(adapter->netdev, "Reset adapter\n");
3871 igb_reinit_locked(adapter); 4192 igb_reinit_locked(adapter);
3872} 4193}
3873 4194
@@ -3920,6 +4241,9 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3920 * i.e. RXBUFFER_2048 --> size-4096 slab 4241 * i.e. RXBUFFER_2048 --> size-4096 slab
3921 */ 4242 */
3922 4243
4244 if (adapter->hw.mac.type == e1000_82580)
4245 max_frame += IGB_TS_HDR_LEN;
4246
3923 if (max_frame <= IGB_RXBUFFER_1024) 4247 if (max_frame <= IGB_RXBUFFER_1024)
3924 rx_buffer_len = IGB_RXBUFFER_1024; 4248 rx_buffer_len = IGB_RXBUFFER_1024;
3925 else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE) 4249 else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
@@ -3927,6 +4251,14 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3927 else 4251 else
3928 rx_buffer_len = IGB_RXBUFFER_128; 4252 rx_buffer_len = IGB_RXBUFFER_128;
3929 4253
4254 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN + IGB_TS_HDR_LEN) ||
4255 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE + IGB_TS_HDR_LEN))
4256 rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE + IGB_TS_HDR_LEN;
4257
4258 if ((adapter->hw.mac.type == e1000_82580) &&
4259 (rx_buffer_len == IGB_RXBUFFER_128))
4260 rx_buffer_len += IGB_RXBUFFER_64;
4261
3930 if (netif_running(netdev)) 4262 if (netif_running(netdev))
3931 igb_down(adapter); 4263 igb_down(adapter);
3932 4264
@@ -4955,22 +5287,21 @@ static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
4955/** 5287/**
4956 * igb_tx_hwtstamp - utility function which checks for TX time stamp 5288 * igb_tx_hwtstamp - utility function which checks for TX time stamp
4957 * @q_vector: pointer to q_vector containing needed info 5289 * @q_vector: pointer to q_vector containing needed info
4958 * @skb: packet that was just sent 5290 * @buffer: pointer to igb_buffer structure
4959 * 5291 *
4960 * If we were asked to do hardware stamping and such a time stamp is 5292 * If we were asked to do hardware stamping and such a time stamp is
4961 * available, then it must have been for this skb here because we only 5293 * available, then it must have been for this skb here because we only
4962 * allow only one such packet into the queue. 5294 * allow only one such packet into the queue.
4963 */ 5295 */
4964static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb) 5296static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct igb_buffer *buffer_info)
4965{ 5297{
4966 struct igb_adapter *adapter = q_vector->adapter; 5298 struct igb_adapter *adapter = q_vector->adapter;
4967 union skb_shared_tx *shtx = skb_tx(skb);
4968 struct e1000_hw *hw = &adapter->hw; 5299 struct e1000_hw *hw = &adapter->hw;
4969 struct skb_shared_hwtstamps shhwtstamps; 5300 struct skb_shared_hwtstamps shhwtstamps;
4970 u64 regval; 5301 u64 regval;
4971 5302
4972 /* if skb does not support hw timestamp or TX stamp not valid exit */ 5303 /* if skb does not support hw timestamp or TX stamp not valid exit */
4973 if (likely(!shtx->hardware) || 5304 if (likely(!buffer_info->shtx.hardware) ||
4974 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID)) 5305 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
4975 return; 5306 return;
4976 5307
@@ -4978,7 +5309,7 @@ static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb)
4978 regval |= (u64)rd32(E1000_TXSTMPH) << 32; 5309 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
4979 5310
4980 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval); 5311 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
4981 skb_tstamp_tx(skb, &shhwtstamps); 5312 skb_tstamp_tx(buffer_info->skb, &shhwtstamps);
4982} 5313}
4983 5314
4984/** 5315/**
@@ -4993,7 +5324,6 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
4993 struct net_device *netdev = tx_ring->netdev; 5324 struct net_device *netdev = tx_ring->netdev;
4994 struct e1000_hw *hw = &adapter->hw; 5325 struct e1000_hw *hw = &adapter->hw;
4995 struct igb_buffer *buffer_info; 5326 struct igb_buffer *buffer_info;
4996 struct sk_buff *skb;
4997 union e1000_adv_tx_desc *tx_desc, *eop_desc; 5327 union e1000_adv_tx_desc *tx_desc, *eop_desc;
4998 unsigned int total_bytes = 0, total_packets = 0; 5328 unsigned int total_bytes = 0, total_packets = 0;
4999 unsigned int i, eop, count = 0; 5329 unsigned int i, eop, count = 0;
@@ -5009,19 +5339,12 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5009 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i); 5339 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
5010 buffer_info = &tx_ring->buffer_info[i]; 5340 buffer_info = &tx_ring->buffer_info[i];
5011 cleaned = (i == eop); 5341 cleaned = (i == eop);
5012 skb = buffer_info->skb;
5013 5342
5014 if (skb) { 5343 if (buffer_info->skb) {
5015 unsigned int segs, bytecount; 5344 total_bytes += buffer_info->bytecount;
5016 /* gso_segs is currently only valid for tcp */ 5345 /* gso_segs is currently only valid for tcp */
5017 segs = buffer_info->gso_segs; 5346 total_packets += buffer_info->gso_segs;
5018 /* multiply data chunks by size of headers */ 5347 igb_tx_hwtstamp(q_vector, buffer_info);
5019 bytecount = ((segs - 1) * skb_headlen(skb)) +
5020 skb->len;
5021 total_packets += segs;
5022 total_bytes += bytecount;
5023
5024 igb_tx_hwtstamp(q_vector, skb);
5025 } 5348 }
5026 5349
5027 igb_unmap_and_free_tx_resource(tx_ring, buffer_info); 5350 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
@@ -5061,7 +5384,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5061 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) { 5384 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
5062 5385
5063 /* detected Tx unit hang */ 5386 /* detected Tx unit hang */
5064 dev_err(&tx_ring->pdev->dev, 5387 dev_err(tx_ring->dev,
5065 "Detected Tx Unit Hang\n" 5388 "Detected Tx Unit Hang\n"
5066 " Tx Queue <%d>\n" 5389 " Tx Queue <%d>\n"
5067 " TDH <%x>\n" 5390 " TDH <%x>\n"
@@ -5140,10 +5463,10 @@ static inline void igb_rx_checksum_adv(struct igb_ring *ring,
5140 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) 5463 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
5141 skb->ip_summed = CHECKSUM_UNNECESSARY; 5464 skb->ip_summed = CHECKSUM_UNNECESSARY;
5142 5465
5143 dev_dbg(&ring->pdev->dev, "cksum success: bits %08X\n", status_err); 5466 dev_dbg(ring->dev, "cksum success: bits %08X\n", status_err);
5144} 5467}
5145 5468
5146static inline void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr, 5469static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
5147 struct sk_buff *skb) 5470 struct sk_buff *skb)
5148{ 5471{
5149 struct igb_adapter *adapter = q_vector->adapter; 5472 struct igb_adapter *adapter = q_vector->adapter;
@@ -5161,13 +5484,18 @@ static inline void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
5161 * If nothing went wrong, then it should have a skb_shared_tx that we 5484 * If nothing went wrong, then it should have a skb_shared_tx that we
5162 * can turn into a skb_shared_hwtstamps. 5485 * can turn into a skb_shared_hwtstamps.
5163 */ 5486 */
5164 if (likely(!(staterr & E1000_RXDADV_STAT_TS))) 5487 if (staterr & E1000_RXDADV_STAT_TSIP) {
5165 return; 5488 u32 *stamp = (u32 *)skb->data;
5166 if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) 5489 regval = le32_to_cpu(*(stamp + 2));
5167 return; 5490 regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
5491 skb_pull(skb, IGB_TS_HDR_LEN);
5492 } else {
5493 if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
5494 return;
5168 5495
5169 regval = rd32(E1000_RXSTMPL); 5496 regval = rd32(E1000_RXSTMPL);
5170 regval |= (u64)rd32(E1000_RXSTMPH) << 32; 5497 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
5498 }
5171 5499
5172 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); 5500 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
5173} 5501}
@@ -5190,7 +5518,7 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
5190{ 5518{
5191 struct igb_ring *rx_ring = q_vector->rx_ring; 5519 struct igb_ring *rx_ring = q_vector->rx_ring;
5192 struct net_device *netdev = rx_ring->netdev; 5520 struct net_device *netdev = rx_ring->netdev;
5193 struct pci_dev *pdev = rx_ring->pdev; 5521 struct device *dev = rx_ring->dev;
5194 union e1000_adv_rx_desc *rx_desc , *next_rxd; 5522 union e1000_adv_rx_desc *rx_desc , *next_rxd;
5195 struct igb_buffer *buffer_info , *next_buffer; 5523 struct igb_buffer *buffer_info , *next_buffer;
5196 struct sk_buff *skb; 5524 struct sk_buff *skb;
@@ -5230,9 +5558,9 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
5230 cleaned_count++; 5558 cleaned_count++;
5231 5559
5232 if (buffer_info->dma) { 5560 if (buffer_info->dma) {
5233 pci_unmap_single(pdev, buffer_info->dma, 5561 dma_unmap_single(dev, buffer_info->dma,
5234 rx_ring->rx_buffer_len, 5562 rx_ring->rx_buffer_len,
5235 PCI_DMA_FROMDEVICE); 5563 DMA_FROM_DEVICE);
5236 buffer_info->dma = 0; 5564 buffer_info->dma = 0;
5237 if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) { 5565 if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) {
5238 skb_put(skb, length); 5566 skb_put(skb, length);
@@ -5242,11 +5570,11 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
5242 } 5570 }
5243 5571
5244 if (length) { 5572 if (length) {
5245 pci_unmap_page(pdev, buffer_info->page_dma, 5573 dma_unmap_page(dev, buffer_info->page_dma,
5246 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE); 5574 PAGE_SIZE / 2, DMA_FROM_DEVICE);
5247 buffer_info->page_dma = 0; 5575 buffer_info->page_dma = 0;
5248 5576
5249 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++, 5577 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
5250 buffer_info->page, 5578 buffer_info->page,
5251 buffer_info->page_offset, 5579 buffer_info->page_offset,
5252 length); 5580 length);
@@ -5275,7 +5603,8 @@ send_up:
5275 goto next_desc; 5603 goto next_desc;
5276 } 5604 }
5277 5605
5278 igb_rx_hwtstamp(q_vector, staterr, skb); 5606 if (staterr & (E1000_RXDADV_STAT_TSIP | E1000_RXDADV_STAT_TS))
5607 igb_rx_hwtstamp(q_vector, staterr, skb);
5279 total_bytes += skb->len; 5608 total_bytes += skb->len;
5280 total_packets++; 5609 total_packets++;
5281 5610
@@ -5350,12 +5679,12 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
5350 buffer_info->page_offset ^= PAGE_SIZE / 2; 5679 buffer_info->page_offset ^= PAGE_SIZE / 2;
5351 } 5680 }
5352 buffer_info->page_dma = 5681 buffer_info->page_dma =
5353 pci_map_page(rx_ring->pdev, buffer_info->page, 5682 dma_map_page(rx_ring->dev, buffer_info->page,
5354 buffer_info->page_offset, 5683 buffer_info->page_offset,
5355 PAGE_SIZE / 2, 5684 PAGE_SIZE / 2,
5356 PCI_DMA_FROMDEVICE); 5685 DMA_FROM_DEVICE);
5357 if (pci_dma_mapping_error(rx_ring->pdev, 5686 if (dma_mapping_error(rx_ring->dev,
5358 buffer_info->page_dma)) { 5687 buffer_info->page_dma)) {
5359 buffer_info->page_dma = 0; 5688 buffer_info->page_dma = 0;
5360 rx_ring->rx_stats.alloc_failed++; 5689 rx_ring->rx_stats.alloc_failed++;
5361 goto no_buffers; 5690 goto no_buffers;
@@ -5373,12 +5702,12 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
5373 buffer_info->skb = skb; 5702 buffer_info->skb = skb;
5374 } 5703 }
5375 if (!buffer_info->dma) { 5704 if (!buffer_info->dma) {
5376 buffer_info->dma = pci_map_single(rx_ring->pdev, 5705 buffer_info->dma = dma_map_single(rx_ring->dev,
5377 skb->data, 5706 skb->data,
5378 bufsz, 5707 bufsz,
5379 PCI_DMA_FROMDEVICE); 5708 DMA_FROM_DEVICE);
5380 if (pci_dma_mapping_error(rx_ring->pdev, 5709 if (dma_mapping_error(rx_ring->dev,
5381 buffer_info->dma)) { 5710 buffer_info->dma)) {
5382 buffer_info->dma = 0; 5711 buffer_info->dma = 0;
5383 rx_ring->rx_stats.alloc_failed++; 5712 rx_ring->rx_stats.alloc_failed++;
5384 goto no_buffers; 5713 goto no_buffers;
@@ -5555,6 +5884,16 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev,
5555 return 0; 5884 return 0;
5556 } 5885 }
5557 5886
5887 /*
5888 * Per-packet timestamping only works if all packets are
5889 * timestamped, so enable timestamping in all packets as
5890 * long as one rx filter was configured.
5891 */
5892 if ((hw->mac.type == e1000_82580) && tsync_rx_ctl) {
5893 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
5894 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
5895 }
5896
5558 /* enable/disable TX */ 5897 /* enable/disable TX */
5559 regval = rd32(E1000_TSYNCTXCTL); 5898 regval = rd32(E1000_TSYNCTXCTL);
5560 regval &= ~E1000_TSYNCTXCTL_ENABLED; 5899 regval &= ~E1000_TSYNCTXCTL_ENABLED;
@@ -6131,19 +6470,25 @@ static void igb_vmm_control(struct igb_adapter *adapter)
6131 struct e1000_hw *hw = &adapter->hw; 6470 struct e1000_hw *hw = &adapter->hw;
6132 u32 reg; 6471 u32 reg;
6133 6472
6134 /* replication is not supported for 82575 */ 6473 switch (hw->mac.type) {
6135 if (hw->mac.type == e1000_82575) 6474 case e1000_82575:
6475 default:
6476 /* replication is not supported for 82575 */
6136 return; 6477 return;
6137 6478 case e1000_82576:
6138 /* enable replication vlan tag stripping */ 6479 /* notify HW that the MAC is adding vlan tags */
6139 reg = rd32(E1000_RPLOLR); 6480 reg = rd32(E1000_DTXCTL);
6140 reg |= E1000_RPLOLR_STRVLAN; 6481 reg |= E1000_DTXCTL_VLAN_ADDED;
6141 wr32(E1000_RPLOLR, reg); 6482 wr32(E1000_DTXCTL, reg);
6142 6483 case e1000_82580:
6143 /* notify HW that the MAC is adding vlan tags */ 6484 /* enable replication vlan tag stripping */
6144 reg = rd32(E1000_DTXCTL); 6485 reg = rd32(E1000_RPLOLR);
6145 reg |= E1000_DTXCTL_VLAN_ADDED; 6486 reg |= E1000_RPLOLR_STRVLAN;
6146 wr32(E1000_DTXCTL, reg); 6487 wr32(E1000_RPLOLR, reg);
6488 case e1000_i350:
6489 /* none of the above registers are supported by i350 */
6490 break;
6491 }
6147 6492
6148 if (adapter->vfs_allocated_count) { 6493 if (adapter->vfs_allocated_count) {
6149 igb_vmdq_set_loopback_pf(hw, true); 6494 igb_vmdq_set_loopback_pf(hw, true);