aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2011-08-20 20:25:36 -0400
committerDavid S. Miller <davem@davemloft.net>2011-08-20 20:25:36 -0400
commitca1ba7caa68520864e4b9227e67f3bbc6fed373b (patch)
tree84010e15b506f0506c15dbf03794dd8b776074ea
parent6461be3a54f802e00d5dcba3537271f92a90eaf3 (diff)
parent66f32a8b97f11ad73d2e7b8c192c55febb20b425 (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/jkirsher/net-next
Conflicts: drivers/net/ethernet/intel/e1000e/netdev.c
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c9
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c199
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h27
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c554
7 files changed, 445 insertions, 359 deletions
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 8533ad7f3559..fa72052a0031 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -461,8 +461,9 @@ struct e1000_info {
461 461
462#define E1000_RX_DESC_PS(R, i) \ 462#define E1000_RX_DESC_PS(R, i) \
463 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) 463 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
464#define E1000_RX_DESC_EXT(R, i) \
465 (&(((union e1000_rx_desc_extended *)((R).desc))[i]))
464#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) 466#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
465#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc)
466#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc) 467#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc)
467#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc) 468#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc)
468 469
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 6a0526a59a8a..e0cbd6a0bde8 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -1195,7 +1195,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1195 goto err_nomem; 1195 goto err_nomem;
1196 } 1196 }
1197 1197
1198 rx_ring->size = rx_ring->count * sizeof(struct e1000_rx_desc); 1198 rx_ring->size = rx_ring->count * sizeof(union e1000_rx_desc_extended);
1199 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, 1199 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
1200 &rx_ring->dma, GFP_KERNEL); 1200 &rx_ring->dma, GFP_KERNEL);
1201 if (!rx_ring->desc) { 1201 if (!rx_ring->desc) {
@@ -1221,7 +1221,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1221 ew32(RCTL, rctl); 1221 ew32(RCTL, rctl);
1222 1222
1223 for (i = 0; i < rx_ring->count; i++) { 1223 for (i = 0; i < rx_ring->count; i++) {
1224 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i); 1224 union e1000_rx_desc_extended *rx_desc;
1225 struct sk_buff *skb; 1225 struct sk_buff *skb;
1226 1226
1227 skb = alloc_skb(2048 + NET_IP_ALIGN, GFP_KERNEL); 1227 skb = alloc_skb(2048 + NET_IP_ALIGN, GFP_KERNEL);
@@ -1239,8 +1239,9 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1239 ret_val = 8; 1239 ret_val = 8;
1240 goto err_nomem; 1240 goto err_nomem;
1241 } 1241 }
1242 rx_desc->buffer_addr = 1242 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
1243 cpu_to_le64(rx_ring->buffer_info[i].dma); 1243 rx_desc->read.buffer_addr =
1244 cpu_to_le64(rx_ring->buffer_info[i].dma);
1244 memset(skb->data, 0x00, skb->len); 1245 memset(skb->data, 0x00, skb->len);
1245 } 1246 }
1246 1247
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index b1f925bfb8b6..9742bc603cad 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -56,7 +56,7 @@
56 56
57#define DRV_EXTRAVERSION "-k" 57#define DRV_EXTRAVERSION "-k"
58 58
59#define DRV_VERSION "1.4.4" DRV_EXTRAVERSION 59#define DRV_VERSION "1.5.1" DRV_EXTRAVERSION
60char e1000e_driver_name[] = "e1000e"; 60char e1000e_driver_name[] = "e1000e";
61const char e1000e_driver_version[] = DRV_VERSION; 61const char e1000e_driver_version[] = DRV_VERSION;
62 62
@@ -192,7 +192,7 @@ static void e1000e_dump(struct e1000_adapter *adapter)
192 struct e1000_buffer *buffer_info; 192 struct e1000_buffer *buffer_info;
193 struct e1000_ring *rx_ring = adapter->rx_ring; 193 struct e1000_ring *rx_ring = adapter->rx_ring;
194 union e1000_rx_desc_packet_split *rx_desc_ps; 194 union e1000_rx_desc_packet_split *rx_desc_ps;
195 struct e1000_rx_desc *rx_desc; 195 union e1000_rx_desc_extended *rx_desc;
196 struct my_u1 { 196 struct my_u1 {
197 u64 a; 197 u64 a;
198 u64 b; 198 u64 b;
@@ -399,41 +399,70 @@ rx_ring_summary:
399 break; 399 break;
400 default: 400 default:
401 case 0: 401 case 0:
402 /* Legacy Receive Descriptor Format 402 /* Extended Receive Descriptor (Read) Format
403 * 403 *
404 * +-----------------------------------------------------+ 404 * +-----------------------------------------------------+
405 * | Buffer Address [63:0] | 405 * 0 | Buffer Address [63:0] |
406 * +-----------------------------------------------------+ 406 * +-----------------------------------------------------+
407 * | VLAN Tag | Errors | Status 0 | Packet csum | Length | 407 * 8 | Reserved |
408 * +-----------------------------------------------------+ 408 * +-----------------------------------------------------+
409 * 63 48 47 40 39 32 31 16 15 0
410 */ 409 */
411 printk(KERN_INFO "Rl[desc] [address 63:0 ] " 410 printk(KERN_INFO "R [desc] [buf addr 63:0 ] "
412 "[vl er S cks ln] [bi->dma ] [bi->skb] " 411 "[reserved 63:0 ] [bi->dma ] "
413 "<-- Legacy format\n"); 412 "[bi->skb] <-- Ext (Read) format\n");
414 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { 413 /* Extended Receive Descriptor (Write-Back) Format
415 rx_desc = E1000_RX_DESC(*rx_ring, i); 414 *
415 * 63 48 47 32 31 24 23 4 3 0
416 * +------------------------------------------------------+
417 * | RSS Hash | | | |
418 * 0 +-------------------+ Rsvd | Reserved | MRQ RSS |
419 * | Packet | IP | | | Type |
420 * | Checksum | Ident | | | |
421 * +------------------------------------------------------+
422 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
423 * +------------------------------------------------------+
424 * 63 48 47 32 31 20 19 0
425 */
426 printk(KERN_INFO "RWB[desc] [cs ipid mrq] "
427 "[vt ln xe xs] "
428 "[bi->skb] <-- Ext (Write-Back) format\n");
429
430 for (i = 0; i < rx_ring->count; i++) {
416 buffer_info = &rx_ring->buffer_info[i]; 431 buffer_info = &rx_ring->buffer_info[i];
417 u0 = (struct my_u0 *)rx_desc; 432 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
418 printk(KERN_INFO "Rl[0x%03X] %016llX %016llX " 433 u1 = (struct my_u1 *)rx_desc;
419 "%016llX %p", i, 434 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
420 (unsigned long long)le64_to_cpu(u0->a), 435 if (staterr & E1000_RXD_STAT_DD) {
421 (unsigned long long)le64_to_cpu(u0->b), 436 /* Descriptor Done */
422 (unsigned long long)buffer_info->dma, 437 printk(KERN_INFO "RWB[0x%03X] %016llX "
423 buffer_info->skb); 438 "%016llX ---------------- %p", i,
439 (unsigned long long)le64_to_cpu(u1->a),
440 (unsigned long long)le64_to_cpu(u1->b),
441 buffer_info->skb);
442 } else {
443 printk(KERN_INFO "R [0x%03X] %016llX "
444 "%016llX %016llX %p", i,
445 (unsigned long long)le64_to_cpu(u1->a),
446 (unsigned long long)le64_to_cpu(u1->b),
447 (unsigned long long)buffer_info->dma,
448 buffer_info->skb);
449
450 if (netif_msg_pktdata(adapter))
451 print_hex_dump(KERN_INFO, "",
452 DUMP_PREFIX_ADDRESS, 16,
453 1,
454 phys_to_virt
455 (buffer_info->dma),
456 adapter->rx_buffer_len,
457 true);
458 }
459
424 if (i == rx_ring->next_to_use) 460 if (i == rx_ring->next_to_use)
425 printk(KERN_CONT " NTU\n"); 461 printk(KERN_CONT " NTU\n");
426 else if (i == rx_ring->next_to_clean) 462 else if (i == rx_ring->next_to_clean)
427 printk(KERN_CONT " NTC\n"); 463 printk(KERN_CONT " NTC\n");
428 else 464 else
429 printk(KERN_CONT "\n"); 465 printk(KERN_CONT "\n");
430
431 if (netif_msg_pktdata(adapter))
432 print_hex_dump(KERN_INFO, "",
433 DUMP_PREFIX_ADDRESS,
434 16, 1,
435 phys_to_virt(buffer_info->dma),
436 adapter->rx_buffer_len, true);
437 } 466 }
438 } 467 }
439 468
@@ -576,7 +605,7 @@ static void e1000e_update_tdt_wa(struct e1000_adapter *adapter, unsigned int i)
576} 605}
577 606
578/** 607/**
579 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended 608 * e1000_alloc_rx_buffers - Replace used receive buffers
580 * @adapter: address of board private structure 609 * @adapter: address of board private structure
581 **/ 610 **/
582static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, 611static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
@@ -585,7 +614,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
585 struct net_device *netdev = adapter->netdev; 614 struct net_device *netdev = adapter->netdev;
586 struct pci_dev *pdev = adapter->pdev; 615 struct pci_dev *pdev = adapter->pdev;
587 struct e1000_ring *rx_ring = adapter->rx_ring; 616 struct e1000_ring *rx_ring = adapter->rx_ring;
588 struct e1000_rx_desc *rx_desc; 617 union e1000_rx_desc_extended *rx_desc;
589 struct e1000_buffer *buffer_info; 618 struct e1000_buffer *buffer_info;
590 struct sk_buff *skb; 619 struct sk_buff *skb;
591 unsigned int i; 620 unsigned int i;
@@ -619,8 +648,8 @@ map_skb:
619 break; 648 break;
620 } 649 }
621 650
622 rx_desc = E1000_RX_DESC(*rx_ring, i); 651 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
623 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 652 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
624 653
625 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { 654 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
626 /* 655 /*
@@ -761,7 +790,7 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
761{ 790{
762 struct net_device *netdev = adapter->netdev; 791 struct net_device *netdev = adapter->netdev;
763 struct pci_dev *pdev = adapter->pdev; 792 struct pci_dev *pdev = adapter->pdev;
764 struct e1000_rx_desc *rx_desc; 793 union e1000_rx_desc_extended *rx_desc;
765 struct e1000_ring *rx_ring = adapter->rx_ring; 794 struct e1000_ring *rx_ring = adapter->rx_ring;
766 struct e1000_buffer *buffer_info; 795 struct e1000_buffer *buffer_info;
767 struct sk_buff *skb; 796 struct sk_buff *skb;
@@ -802,8 +831,8 @@ check_page:
802 PAGE_SIZE, 831 PAGE_SIZE,
803 DMA_FROM_DEVICE); 832 DMA_FROM_DEVICE);
804 833
805 rx_desc = E1000_RX_DESC(*rx_ring, i); 834 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
806 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 835 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
807 836
808 if (unlikely(++i == rx_ring->count)) 837 if (unlikely(++i == rx_ring->count))
809 i = 0; 838 i = 0;
@@ -841,28 +870,27 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
841 struct pci_dev *pdev = adapter->pdev; 870 struct pci_dev *pdev = adapter->pdev;
842 struct e1000_hw *hw = &adapter->hw; 871 struct e1000_hw *hw = &adapter->hw;
843 struct e1000_ring *rx_ring = adapter->rx_ring; 872 struct e1000_ring *rx_ring = adapter->rx_ring;
844 struct e1000_rx_desc *rx_desc, *next_rxd; 873 union e1000_rx_desc_extended *rx_desc, *next_rxd;
845 struct e1000_buffer *buffer_info, *next_buffer; 874 struct e1000_buffer *buffer_info, *next_buffer;
846 u32 length; 875 u32 length, staterr;
847 unsigned int i; 876 unsigned int i;
848 int cleaned_count = 0; 877 int cleaned_count = 0;
849 bool cleaned = 0; 878 bool cleaned = 0;
850 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 879 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
851 880
852 i = rx_ring->next_to_clean; 881 i = rx_ring->next_to_clean;
853 rx_desc = E1000_RX_DESC(*rx_ring, i); 882 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
883 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
854 buffer_info = &rx_ring->buffer_info[i]; 884 buffer_info = &rx_ring->buffer_info[i];
855 885
856 while (rx_desc->status & E1000_RXD_STAT_DD) { 886 while (staterr & E1000_RXD_STAT_DD) {
857 struct sk_buff *skb; 887 struct sk_buff *skb;
858 u8 status;
859 888
860 if (*work_done >= work_to_do) 889 if (*work_done >= work_to_do)
861 break; 890 break;
862 (*work_done)++; 891 (*work_done)++;
863 rmb(); /* read descriptor and rx_buffer_info after status DD */ 892 rmb(); /* read descriptor and rx_buffer_info after status DD */
864 893
865 status = rx_desc->status;
866 skb = buffer_info->skb; 894 skb = buffer_info->skb;
867 buffer_info->skb = NULL; 895 buffer_info->skb = NULL;
868 896
@@ -871,7 +899,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
871 i++; 899 i++;
872 if (i == rx_ring->count) 900 if (i == rx_ring->count)
873 i = 0; 901 i = 0;
874 next_rxd = E1000_RX_DESC(*rx_ring, i); 902 next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
875 prefetch(next_rxd); 903 prefetch(next_rxd);
876 904
877 next_buffer = &rx_ring->buffer_info[i]; 905 next_buffer = &rx_ring->buffer_info[i];
@@ -884,7 +912,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
884 DMA_FROM_DEVICE); 912 DMA_FROM_DEVICE);
885 buffer_info->dma = 0; 913 buffer_info->dma = 0;
886 914
887 length = le16_to_cpu(rx_desc->length); 915 length = le16_to_cpu(rx_desc->wb.upper.length);
888 916
889 /* 917 /*
890 * !EOP means multiple descriptors were used to store a single 918 * !EOP means multiple descriptors were used to store a single
@@ -893,7 +921,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
893 * next frame that _does_ have the EOP bit set, as it is by 921 * next frame that _does_ have the EOP bit set, as it is by
894 * definition only a frame fragment 922 * definition only a frame fragment
895 */ 923 */
896 if (unlikely(!(status & E1000_RXD_STAT_EOP))) 924 if (unlikely(!(staterr & E1000_RXD_STAT_EOP)))
897 adapter->flags2 |= FLAG2_IS_DISCARDING; 925 adapter->flags2 |= FLAG2_IS_DISCARDING;
898 926
899 if (adapter->flags2 & FLAG2_IS_DISCARDING) { 927 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
@@ -901,12 +929,12 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
901 e_dbg("Receive packet consumed multiple buffers\n"); 929 e_dbg("Receive packet consumed multiple buffers\n");
902 /* recycle */ 930 /* recycle */
903 buffer_info->skb = skb; 931 buffer_info->skb = skb;
904 if (status & E1000_RXD_STAT_EOP) 932 if (staterr & E1000_RXD_STAT_EOP)
905 adapter->flags2 &= ~FLAG2_IS_DISCARDING; 933 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
906 goto next_desc; 934 goto next_desc;
907 } 935 }
908 936
909 if (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) { 937 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
910 /* recycle */ 938 /* recycle */
911 buffer_info->skb = skb; 939 buffer_info->skb = skb;
912 goto next_desc; 940 goto next_desc;
@@ -944,15 +972,15 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
944 skb_put(skb, length); 972 skb_put(skb, length);
945 973
946 /* Receive Checksum Offload */ 974 /* Receive Checksum Offload */
947 e1000_rx_checksum(adapter, 975 e1000_rx_checksum(adapter, staterr,
948 (u32)(status) | 976 le16_to_cpu(rx_desc->wb.lower.hi_dword.
949 ((u32)(rx_desc->errors) << 24), 977 csum_ip.csum), skb);
950 le16_to_cpu(rx_desc->csum), skb);
951 978
952 e1000_receive_skb(adapter, netdev, skb,status,rx_desc->special); 979 e1000_receive_skb(adapter, netdev, skb, staterr,
980 rx_desc->wb.upper.vlan);
953 981
954next_desc: 982next_desc:
955 rx_desc->status = 0; 983 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
956 984
957 /* return some buffers to hardware, one at a time is too slow */ 985 /* return some buffers to hardware, one at a time is too slow */
958 if (cleaned_count >= E1000_RX_BUFFER_WRITE) { 986 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
@@ -964,6 +992,8 @@ next_desc:
964 /* use prefetched values */ 992 /* use prefetched values */
965 rx_desc = next_rxd; 993 rx_desc = next_rxd;
966 buffer_info = next_buffer; 994 buffer_info = next_buffer;
995
996 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
967 } 997 }
968 rx_ring->next_to_clean = i; 998 rx_ring->next_to_clean = i;
969 999
@@ -1347,35 +1377,34 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
1347 struct net_device *netdev = adapter->netdev; 1377 struct net_device *netdev = adapter->netdev;
1348 struct pci_dev *pdev = adapter->pdev; 1378 struct pci_dev *pdev = adapter->pdev;
1349 struct e1000_ring *rx_ring = adapter->rx_ring; 1379 struct e1000_ring *rx_ring = adapter->rx_ring;
1350 struct e1000_rx_desc *rx_desc, *next_rxd; 1380 union e1000_rx_desc_extended *rx_desc, *next_rxd;
1351 struct e1000_buffer *buffer_info, *next_buffer; 1381 struct e1000_buffer *buffer_info, *next_buffer;
1352 u32 length; 1382 u32 length, staterr;
1353 unsigned int i; 1383 unsigned int i;
1354 int cleaned_count = 0; 1384 int cleaned_count = 0;
1355 bool cleaned = false; 1385 bool cleaned = false;
1356 unsigned int total_rx_bytes=0, total_rx_packets=0; 1386 unsigned int total_rx_bytes=0, total_rx_packets=0;
1357 1387
1358 i = rx_ring->next_to_clean; 1388 i = rx_ring->next_to_clean;
1359 rx_desc = E1000_RX_DESC(*rx_ring, i); 1389 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
1390 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1360 buffer_info = &rx_ring->buffer_info[i]; 1391 buffer_info = &rx_ring->buffer_info[i];
1361 1392
1362 while (rx_desc->status & E1000_RXD_STAT_DD) { 1393 while (staterr & E1000_RXD_STAT_DD) {
1363 struct sk_buff *skb; 1394 struct sk_buff *skb;
1364 u8 status;
1365 1395
1366 if (*work_done >= work_to_do) 1396 if (*work_done >= work_to_do)
1367 break; 1397 break;
1368 (*work_done)++; 1398 (*work_done)++;
1369 rmb(); /* read descriptor and rx_buffer_info after status DD */ 1399 rmb(); /* read descriptor and rx_buffer_info after status DD */
1370 1400
1371 status = rx_desc->status;
1372 skb = buffer_info->skb; 1401 skb = buffer_info->skb;
1373 buffer_info->skb = NULL; 1402 buffer_info->skb = NULL;
1374 1403
1375 ++i; 1404 ++i;
1376 if (i == rx_ring->count) 1405 if (i == rx_ring->count)
1377 i = 0; 1406 i = 0;
1378 next_rxd = E1000_RX_DESC(*rx_ring, i); 1407 next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
1379 prefetch(next_rxd); 1408 prefetch(next_rxd);
1380 1409
1381 next_buffer = &rx_ring->buffer_info[i]; 1410 next_buffer = &rx_ring->buffer_info[i];
@@ -1386,23 +1415,22 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
1386 DMA_FROM_DEVICE); 1415 DMA_FROM_DEVICE);
1387 buffer_info->dma = 0; 1416 buffer_info->dma = 0;
1388 1417
1389 length = le16_to_cpu(rx_desc->length); 1418 length = le16_to_cpu(rx_desc->wb.upper.length);
1390 1419
1391 /* errors is only valid for DD + EOP descriptors */ 1420 /* errors is only valid for DD + EOP descriptors */
1392 if (unlikely((status & E1000_RXD_STAT_EOP) && 1421 if (unlikely((staterr & E1000_RXD_STAT_EOP) &&
1393 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { 1422 (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK))) {
1394 /* recycle both page and skb */ 1423 /* recycle both page and skb */
1395 buffer_info->skb = skb; 1424 buffer_info->skb = skb;
1396 /* an error means any chain goes out the window 1425 /* an error means any chain goes out the window too */
1397 * too */ 1426 if (rx_ring->rx_skb_top)
1398 if (rx_ring->rx_skb_top) 1427 dev_kfree_skb_irq(rx_ring->rx_skb_top);
1399 dev_kfree_skb_irq(rx_ring->rx_skb_top); 1428 rx_ring->rx_skb_top = NULL;
1400 rx_ring->rx_skb_top = NULL; 1429 goto next_desc;
1401 goto next_desc;
1402 } 1430 }
1403 1431
1404#define rxtop (rx_ring->rx_skb_top) 1432#define rxtop (rx_ring->rx_skb_top)
1405 if (!(status & E1000_RXD_STAT_EOP)) { 1433 if (!(staterr & E1000_RXD_STAT_EOP)) {
1406 /* this descriptor is only the beginning (or middle) */ 1434 /* this descriptor is only the beginning (or middle) */
1407 if (!rxtop) { 1435 if (!rxtop) {
1408 /* this is the beginning of a chain */ 1436 /* this is the beginning of a chain */
@@ -1457,10 +1485,9 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
1457 } 1485 }
1458 1486
1459 /* Receive Checksum Offload XXX recompute due to CRC strip? */ 1487 /* Receive Checksum Offload XXX recompute due to CRC strip? */
1460 e1000_rx_checksum(adapter, 1488 e1000_rx_checksum(adapter, staterr,
1461 (u32)(status) | 1489 le16_to_cpu(rx_desc->wb.lower.hi_dword.
1462 ((u32)(rx_desc->errors) << 24), 1490 csum_ip.csum), skb);
1463 le16_to_cpu(rx_desc->csum), skb);
1464 1491
1465 /* probably a little skewed due to removing CRC */ 1492 /* probably a little skewed due to removing CRC */
1466 total_rx_bytes += skb->len; 1493 total_rx_bytes += skb->len;
@@ -1473,11 +1500,11 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
1473 goto next_desc; 1500 goto next_desc;
1474 } 1501 }
1475 1502
1476 e1000_receive_skb(adapter, netdev, skb, status, 1503 e1000_receive_skb(adapter, netdev, skb, staterr,
1477 rx_desc->special); 1504 rx_desc->wb.upper.vlan);
1478 1505
1479next_desc: 1506next_desc:
1480 rx_desc->status = 0; 1507 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
1481 1508
1482 /* return some buffers to hardware, one at a time is too slow */ 1509 /* return some buffers to hardware, one at a time is too slow */
1483 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { 1510 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
@@ -1489,6 +1516,8 @@ next_desc:
1489 /* use prefetched values */ 1516 /* use prefetched values */
1490 rx_desc = next_rxd; 1517 rx_desc = next_rxd;
1491 buffer_info = next_buffer; 1518 buffer_info = next_buffer;
1519
1520 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1492 } 1521 }
1493 rx_ring->next_to_clean = i; 1522 rx_ring->next_to_clean = i;
1494 1523
@@ -2887,6 +2916,10 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2887 break; 2916 break;
2888 } 2917 }
2889 2918
2919 /* Enable Extended Status in all Receive Descriptors */
2920 rfctl = er32(RFCTL);
2921 rfctl |= E1000_RFCTL_EXTEN;
2922
2890 /* 2923 /*
2891 * 82571 and greater support packet-split where the protocol 2924 * 82571 and greater support packet-split where the protocol
2892 * header is placed in skb->data and the packet data is 2925 * header is placed in skb->data and the packet data is
@@ -2912,9 +2945,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2912 if (adapter->rx_ps_pages) { 2945 if (adapter->rx_ps_pages) {
2913 u32 psrctl = 0; 2946 u32 psrctl = 0;
2914 2947
2915 /* Configure extra packet-split registers */
2916 rfctl = er32(RFCTL);
2917 rfctl |= E1000_RFCTL_EXTEN;
2918 /* 2948 /*
2919 * disable packet split support for IPv6 extension headers, 2949 * disable packet split support for IPv6 extension headers,
2920 * because some malformed IPv6 headers can hang the Rx 2950 * because some malformed IPv6 headers can hang the Rx
@@ -2922,8 +2952,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2922 rfctl |= (E1000_RFCTL_IPV6_EX_DIS | 2952 rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
2923 E1000_RFCTL_NEW_IPV6_EXT_DIS); 2953 E1000_RFCTL_NEW_IPV6_EXT_DIS);
2924 2954
2925 ew32(RFCTL, rfctl);
2926
2927 /* Enable Packet split descriptors */ 2955 /* Enable Packet split descriptors */
2928 rctl |= E1000_RCTL_DTYP_PS; 2956 rctl |= E1000_RCTL_DTYP_PS;
2929 2957
@@ -2946,6 +2974,7 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2946 ew32(PSRCTL, psrctl); 2974 ew32(PSRCTL, psrctl);
2947 } 2975 }
2948 2976
2977 ew32(RFCTL, rfctl);
2949 ew32(RCTL, rctl); 2978 ew32(RCTL, rctl);
2950 /* just started the receive unit, no need to restart */ 2979 /* just started the receive unit, no need to restart */
2951 adapter->flags &= ~FLAG_RX_RESTART_NOW; 2980 adapter->flags &= ~FLAG_RX_RESTART_NOW;
@@ -2971,11 +3000,11 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
2971 adapter->clean_rx = e1000_clean_rx_irq_ps; 3000 adapter->clean_rx = e1000_clean_rx_irq_ps;
2972 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; 3001 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
2973 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) { 3002 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
2974 rdlen = rx_ring->count * sizeof(struct e1000_rx_desc); 3003 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
2975 adapter->clean_rx = e1000_clean_jumbo_rx_irq; 3004 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
2976 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; 3005 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
2977 } else { 3006 } else {
2978 rdlen = rx_ring->count * sizeof(struct e1000_rx_desc); 3007 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
2979 adapter->clean_rx = e1000_clean_rx_irq; 3008 adapter->clean_rx = e1000_clean_rx_irq;
2980 adapter->alloc_rx_buf = e1000_alloc_rx_buffers; 3009 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
2981 } 3010 }
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index e04a8e49e6dc..378ce46a7f92 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -91,13 +91,16 @@
91#define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */ 91#define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */
92 92
93#define IXGBE_TX_FLAGS_CSUM (u32)(1) 93#define IXGBE_TX_FLAGS_CSUM (u32)(1)
94#define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1) 94#define IXGBE_TX_FLAGS_HW_VLAN (u32)(1 << 1)
95#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2) 95#define IXGBE_TX_FLAGS_SW_VLAN (u32)(1 << 2)
96#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3) 96#define IXGBE_TX_FLAGS_TSO (u32)(1 << 3)
97#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 4) 97#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 4)
98#define IXGBE_TX_FLAGS_FSO (u32)(1 << 5) 98#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 5)
99#define IXGBE_TX_FLAGS_FSO (u32)(1 << 6)
100#define IXGBE_TX_FLAGS_MAPPED_AS_PAGE (u32)(1 << 7)
99#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 101#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
100#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 102#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
103#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29
101#define IXGBE_TX_FLAGS_VLAN_SHIFT 16 104#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
102 105
103#define IXGBE_MAX_RSC_INT_RATE 162760 106#define IXGBE_MAX_RSC_INT_RATE 162760
@@ -141,14 +144,14 @@ struct vf_macvlans {
141/* wrapper around a pointer to a socket buffer, 144/* wrapper around a pointer to a socket buffer,
142 * so a DMA handle can be stored along with the buffer */ 145 * so a DMA handle can be stored along with the buffer */
143struct ixgbe_tx_buffer { 146struct ixgbe_tx_buffer {
144 struct sk_buff *skb; 147 union ixgbe_adv_tx_desc *next_to_watch;
145 dma_addr_t dma;
146 unsigned long time_stamp; 148 unsigned long time_stamp;
147 u16 length; 149 dma_addr_t dma;
148 u16 next_to_watch; 150 u32 length;
149 unsigned int bytecount; 151 u32 tx_flags;
152 struct sk_buff *skb;
153 u32 bytecount;
150 u16 gso_segs; 154 u16 gso_segs;
151 u8 mapped_as_page;
152}; 155};
153 156
154struct ixgbe_rx_buffer { 157struct ixgbe_rx_buffer {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index 0ace6ce1d0b4..da6d53e7af99 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -414,7 +414,7 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
414 u8 prio_tc[MAX_TRAFFIC_CLASS] = {0, 1, 2, 3, 4, 5, 6, 7}; 414 u8 prio_tc[MAX_TRAFFIC_CLASS] = {0, 1, 2, 3, 4, 5, 6, 7};
415 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 415 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
416 416
417#ifdef CONFIG_FCOE 417#ifdef IXGBE_FCOE
418 if (adapter->netdev->features & NETIF_F_FCOE_MTU) 418 if (adapter->netdev->features & NETIF_F_FCOE_MTU)
419 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); 419 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
420#endif 420#endif
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 824edae77865..e9b992fe5e46 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -241,10 +241,12 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
241 */ 241 */
242 if (lastsize == bufflen) { 242 if (lastsize == bufflen) {
243 if (j >= IXGBE_BUFFCNT_MAX) { 243 if (j >= IXGBE_BUFFCNT_MAX) {
244 e_err(drv, "xid=%x:%d,%d,%d:addr=%llx " 244 printk_once("Will NOT use DDP since there are not "
245 "not enough user buffers. We need an extra " 245 "enough user buffers. We need an extra "
246 "buffer because lastsize is bufflen.\n", 246 "buffer because lastsize is bufflen. "
247 xid, i, j, dmacount, (u64)addr); 247 "xid=%x:%d,%d,%d:addr=%llx\n",
248 xid, i, j, dmacount, (u64)addr);
249
248 goto out_noddp_free; 250 goto out_noddp_free;
249 } 251 }
250 252
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index b73194c1c44a..e8aad76fa530 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -385,7 +385,7 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
385 tx_ring = adapter->tx_ring[n]; 385 tx_ring = adapter->tx_ring[n];
386 tx_buffer_info = 386 tx_buffer_info =
387 &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; 387 &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
388 pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n", 388 pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
389 n, tx_ring->next_to_use, tx_ring->next_to_clean, 389 n, tx_ring->next_to_use, tx_ring->next_to_clean,
390 (u64)tx_buffer_info->dma, 390 (u64)tx_buffer_info->dma,
391 tx_buffer_info->length, 391 tx_buffer_info->length,
@@ -424,7 +424,7 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
424 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 424 tx_buffer_info = &tx_ring->tx_buffer_info[i];
425 u0 = (struct my_u0 *)tx_desc; 425 u0 = (struct my_u0 *)tx_desc;
426 pr_info("T [0x%03X] %016llX %016llX %016llX" 426 pr_info("T [0x%03X] %016llX %016llX %016llX"
427 " %04X %3X %016llX %p", i, 427 " %04X %p %016llX %p", i,
428 le64_to_cpu(u0->a), 428 le64_to_cpu(u0->a),
429 le64_to_cpu(u0->b), 429 le64_to_cpu(u0->b),
430 (u64)tx_buffer_info->dma, 430 (u64)tx_buffer_info->dma,
@@ -643,27 +643,31 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
643 } 643 }
644} 644}
645 645
646void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring, 646static inline void ixgbe_unmap_tx_resource(struct ixgbe_ring *ring,
647 struct ixgbe_tx_buffer *tx_buffer_info) 647 struct ixgbe_tx_buffer *tx_buffer)
648{ 648{
649 if (tx_buffer_info->dma) { 649 if (tx_buffer->dma) {
650 if (tx_buffer_info->mapped_as_page) 650 if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_MAPPED_AS_PAGE)
651 dma_unmap_page(tx_ring->dev, 651 dma_unmap_page(ring->dev,
652 tx_buffer_info->dma, 652 tx_buffer->dma,
653 tx_buffer_info->length, 653 tx_buffer->length,
654 DMA_TO_DEVICE); 654 DMA_TO_DEVICE);
655 else 655 else
656 dma_unmap_single(tx_ring->dev, 656 dma_unmap_single(ring->dev,
657 tx_buffer_info->dma, 657 tx_buffer->dma,
658 tx_buffer_info->length, 658 tx_buffer->length,
659 DMA_TO_DEVICE); 659 DMA_TO_DEVICE);
660 tx_buffer_info->dma = 0;
661 } 660 }
662 if (tx_buffer_info->skb) { 661 tx_buffer->dma = 0;
662}
663
664void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
665 struct ixgbe_tx_buffer *tx_buffer_info)
666{
667 ixgbe_unmap_tx_resource(tx_ring, tx_buffer_info);
668 if (tx_buffer_info->skb)
663 dev_kfree_skb_any(tx_buffer_info->skb); 669 dev_kfree_skb_any(tx_buffer_info->skb);
664 tx_buffer_info->skb = NULL; 670 tx_buffer_info->skb = NULL;
665 }
666 tx_buffer_info->time_stamp = 0;
667 /* tx_buffer_info must be completely set up in the transmit path */ 671 /* tx_buffer_info must be completely set up in the transmit path */
668} 672}
669 673
@@ -797,56 +801,72 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
797 struct ixgbe_ring *tx_ring) 801 struct ixgbe_ring *tx_ring)
798{ 802{
799 struct ixgbe_adapter *adapter = q_vector->adapter; 803 struct ixgbe_adapter *adapter = q_vector->adapter;
800 union ixgbe_adv_tx_desc *tx_desc, *eop_desc; 804 struct ixgbe_tx_buffer *tx_buffer;
801 struct ixgbe_tx_buffer *tx_buffer_info; 805 union ixgbe_adv_tx_desc *tx_desc;
802 unsigned int total_bytes = 0, total_packets = 0; 806 unsigned int total_bytes = 0, total_packets = 0;
803 u16 i, eop, count = 0; 807 u16 i = tx_ring->next_to_clean;
808 u16 count;
804 809
805 i = tx_ring->next_to_clean; 810 tx_buffer = &tx_ring->tx_buffer_info[i];
806 eop = tx_ring->tx_buffer_info[i].next_to_watch; 811 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
807 eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
808 812
809 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && 813 for (count = 0; count < q_vector->tx.work_limit; count++) {
810 (count < q_vector->tx.work_limit)) { 814 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
811 bool cleaned = false; 815
812 rmb(); /* read buffer_info after eop_desc */ 816 /* if next_to_watch is not set then there is no work pending */
813 for ( ; !cleaned; count++) { 817 if (!eop_desc)
814 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); 818 break;
815 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 819
820 /* if DD is not set pending work has not been completed */
821 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
822 break;
816 823
824 /* count the packet as being completed */
825 tx_ring->tx_stats.completed++;
826
827 /* clear next_to_watch to prevent false hangs */
828 tx_buffer->next_to_watch = NULL;
829
830 /* prevent any other reads prior to eop_desc being verified */
831 rmb();
832
833 do {
834 ixgbe_unmap_tx_resource(tx_ring, tx_buffer);
817 tx_desc->wb.status = 0; 835 tx_desc->wb.status = 0;
818 cleaned = (i == eop); 836 if (likely(tx_desc == eop_desc)) {
837 eop_desc = NULL;
838 dev_kfree_skb_any(tx_buffer->skb);
839 tx_buffer->skb = NULL;
840
841 total_bytes += tx_buffer->bytecount;
842 total_packets += tx_buffer->gso_segs;
843 }
819 844
845 tx_buffer++;
846 tx_desc++;
820 i++; 847 i++;
821 if (i == tx_ring->count) 848 if (unlikely(i == tx_ring->count)) {
822 i = 0; 849 i = 0;
823 850
824 if (cleaned && tx_buffer_info->skb) { 851 tx_buffer = tx_ring->tx_buffer_info;
825 total_bytes += tx_buffer_info->bytecount; 852 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, 0);
826 total_packets += tx_buffer_info->gso_segs;
827 } 853 }
828 854
829 ixgbe_unmap_and_free_tx_resource(tx_ring, 855 } while (eop_desc);
830 tx_buffer_info);
831 }
832
833 tx_ring->tx_stats.completed++;
834 eop = tx_ring->tx_buffer_info[i].next_to_watch;
835 eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
836 } 856 }
837 857
838 tx_ring->next_to_clean = i; 858 tx_ring->next_to_clean = i;
859 u64_stats_update_begin(&tx_ring->syncp);
839 tx_ring->stats.bytes += total_bytes; 860 tx_ring->stats.bytes += total_bytes;
840 tx_ring->stats.packets += total_packets; 861 tx_ring->stats.packets += total_packets;
841 u64_stats_update_begin(&tx_ring->syncp); 862 u64_stats_update_end(&tx_ring->syncp);
842 q_vector->tx.total_bytes += total_bytes; 863 q_vector->tx.total_bytes += total_bytes;
843 q_vector->tx.total_packets += total_packets; 864 q_vector->tx.total_packets += total_packets;
844 u64_stats_update_end(&tx_ring->syncp);
845 865
846 if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) { 866 if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
847 /* schedule immediate reset if we believe we hung */ 867 /* schedule immediate reset if we believe we hung */
848 struct ixgbe_hw *hw = &adapter->hw; 868 struct ixgbe_hw *hw = &adapter->hw;
849 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); 869 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
850 e_err(drv, "Detected Tx Unit Hang\n" 870 e_err(drv, "Detected Tx Unit Hang\n"
851 " Tx Queue <%d>\n" 871 " Tx Queue <%d>\n"
852 " TDH, TDT <%x>, <%x>\n" 872 " TDH, TDT <%x>, <%x>\n"
@@ -858,8 +878,8 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
858 tx_ring->queue_index, 878 tx_ring->queue_index,
859 IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)), 879 IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
860 IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)), 880 IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
861 tx_ring->next_to_use, eop, 881 tx_ring->next_to_use, i,
862 tx_ring->tx_buffer_info[eop].time_stamp, jiffies); 882 tx_ring->tx_buffer_info[i].time_stamp, jiffies);
863 883
864 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); 884 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
865 885
@@ -3597,7 +3617,7 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
3597 3617
3598 /* reconfigure the hardware */ 3618 /* reconfigure the hardware */
3599 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) { 3619 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) {
3600#ifdef CONFIG_FCOE 3620#ifdef IXGBE_FCOE
3601 if (adapter->netdev->features & NETIF_F_FCOE_MTU) 3621 if (adapter->netdev->features & NETIF_F_FCOE_MTU)
3602 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); 3622 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
3603#endif 3623#endif
@@ -6351,7 +6371,7 @@ static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
6351 u32 type_tucmd = 0; 6371 u32 type_tucmd = 0;
6352 6372
6353 if (skb->ip_summed != CHECKSUM_PARTIAL) { 6373 if (skb->ip_summed != CHECKSUM_PARTIAL) {
6354 if (!(tx_flags & IXGBE_TX_FLAGS_VLAN)) 6374 if (!(tx_flags & IXGBE_TX_FLAGS_HW_VLAN))
6355 return false; 6375 return false;
6356 } else { 6376 } else {
6357 u8 l4_hdr = 0; 6377 u8 l4_hdr = 0;
@@ -6408,185 +6428,179 @@ static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
6408 return (skb->ip_summed == CHECKSUM_PARTIAL); 6428 return (skb->ip_summed == CHECKSUM_PARTIAL);
6409} 6429}
6410 6430
6411static int ixgbe_tx_map(struct ixgbe_adapter *adapter, 6431static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
6412 struct ixgbe_ring *tx_ring,
6413 struct sk_buff *skb, u32 tx_flags,
6414 unsigned int first, const u8 hdr_len)
6415{ 6432{
6416 struct device *dev = tx_ring->dev; 6433 /* set type for advanced descriptor with frame checksum insertion */
6417 struct ixgbe_tx_buffer *tx_buffer_info; 6434 __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
6418 unsigned int len; 6435 IXGBE_ADVTXD_DCMD_IFCS |
6419 unsigned int total = skb->len; 6436 IXGBE_ADVTXD_DCMD_DEXT);
6420 unsigned int offset = 0, size, count = 0;
6421 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
6422 unsigned int f;
6423 unsigned int bytecount = skb->len;
6424 u16 gso_segs = 1;
6425 u16 i;
6426 6437
6427 i = tx_ring->next_to_use; 6438 /* set HW vlan bit if vlan is present */
6439 if (tx_flags & IXGBE_TX_FLAGS_HW_VLAN)
6440 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
6428 6441
6429 if (tx_flags & IXGBE_TX_FLAGS_FCOE) 6442 /* set segmentation enable bits for TSO/FSO */
6430 /* excluding fcoe_crc_eof for FCoE */ 6443#ifdef IXGBE_FCOE
6431 total -= sizeof(struct fcoe_crc_eof); 6444 if ((tx_flags & IXGBE_TX_FLAGS_TSO) || (tx_flags & IXGBE_TX_FLAGS_FSO))
6445#else
6446 if (tx_flags & IXGBE_TX_FLAGS_TSO)
6447#endif
6448 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
6432 6449
6433 len = min(skb_headlen(skb), total); 6450 return cmd_type;
6434 while (len) { 6451}
6435 tx_buffer_info = &tx_ring->tx_buffer_info[i];
6436 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
6437
6438 tx_buffer_info->length = size;
6439 tx_buffer_info->mapped_as_page = false;
6440 tx_buffer_info->dma = dma_map_single(dev,
6441 skb->data + offset,
6442 size, DMA_TO_DEVICE);
6443 if (dma_mapping_error(dev, tx_buffer_info->dma))
6444 goto dma_error;
6445 tx_buffer_info->time_stamp = jiffies;
6446 tx_buffer_info->next_to_watch = i;
6447 6452
6448 len -= size; 6453static __le32 ixgbe_tx_olinfo_status(u32 tx_flags, unsigned int paylen)
6449 total -= size; 6454{
6450 offset += size; 6455 __le32 olinfo_status =
6451 count++; 6456 cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
6452 6457
6453 if (len) { 6458 if (tx_flags & IXGBE_TX_FLAGS_TSO) {
6454 i++; 6459 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM |
6455 if (i == tx_ring->count) 6460 (1 << IXGBE_ADVTXD_IDX_SHIFT));
6456 i = 0; 6461 /* enble IPv4 checksum for TSO */
6457 } 6462 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
6463 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
6458 } 6464 }
6459 6465
6460 for (f = 0; f < nr_frags; f++) { 6466 /* enable L4 checksum for TSO and TX checksum offload */
6461 struct skb_frag_struct *frag; 6467 if (tx_flags & IXGBE_TX_FLAGS_CSUM)
6468 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
6462 6469
6463 frag = &skb_shinfo(skb)->frags[f]; 6470#ifdef IXGBE_FCOE
6464 len = min((unsigned int)frag->size, total); 6471 /* use index 1 context for FCOE/FSO */
6465 offset = frag->page_offset; 6472 if (tx_flags & IXGBE_TX_FLAGS_FCOE)
6473 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC |
6474 (1 << IXGBE_ADVTXD_IDX_SHIFT));
6466 6475
6467 while (len) { 6476#endif
6468 i++; 6477 return olinfo_status;
6469 if (i == tx_ring->count) 6478}
6470 i = 0;
6471 6479
6472 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 6480#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
6473 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); 6481 IXGBE_TXD_CMD_RS)
6474 6482
6475 tx_buffer_info->length = size; 6483static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
6476 tx_buffer_info->dma = dma_map_page(dev, 6484 struct sk_buff *skb,
6477 frag->page, 6485 struct ixgbe_tx_buffer *first,
6478 offset, size, 6486 u32 tx_flags,
6479 DMA_TO_DEVICE); 6487 const u8 hdr_len)
6480 tx_buffer_info->mapped_as_page = true; 6488{
6481 if (dma_mapping_error(dev, tx_buffer_info->dma)) 6489 struct device *dev = tx_ring->dev;
6482 goto dma_error; 6490 struct ixgbe_tx_buffer *tx_buffer_info;
6483 tx_buffer_info->time_stamp = jiffies; 6491 union ixgbe_adv_tx_desc *tx_desc;
6484 tx_buffer_info->next_to_watch = i; 6492 dma_addr_t dma;
6485 6493 __le32 cmd_type, olinfo_status;
6486 len -= size; 6494 struct skb_frag_struct *frag;
6487 total -= size; 6495 unsigned int f = 0;
6488 offset += size; 6496 unsigned int data_len = skb->data_len;
6489 count++; 6497 unsigned int size = skb_headlen(skb);
6498 u32 offset = 0;
6499 u32 paylen = skb->len - hdr_len;
6500 u16 i = tx_ring->next_to_use;
6501 u16 gso_segs;
6502
6503#ifdef IXGBE_FCOE
6504 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
6505 if (data_len >= sizeof(struct fcoe_crc_eof)) {
6506 data_len -= sizeof(struct fcoe_crc_eof);
6507 } else {
6508 size -= sizeof(struct fcoe_crc_eof) - data_len;
6509 data_len = 0;
6490 } 6510 }
6491 if (total == 0)
6492 break;
6493 } 6511 }
6494 6512
6495 if (tx_flags & IXGBE_TX_FLAGS_TSO) 6513#endif
6496 gso_segs = skb_shinfo(skb)->gso_segs; 6514 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
6497#ifdef IXGBE_FCOE 6515 if (dma_mapping_error(dev, dma))
6498 /* adjust for FCoE Sequence Offload */ 6516 goto dma_error;
6499 else if (tx_flags & IXGBE_TX_FLAGS_FSO)
6500 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
6501 skb_shinfo(skb)->gso_size);
6502#endif /* IXGBE_FCOE */
6503 bytecount += (gso_segs - 1) * hdr_len;
6504 6517
6505 /* multiply data chunks by size of headers */ 6518 cmd_type = ixgbe_tx_cmd_type(tx_flags);
6506 tx_ring->tx_buffer_info[i].bytecount = bytecount; 6519 olinfo_status = ixgbe_tx_olinfo_status(tx_flags, paylen);
6507 tx_ring->tx_buffer_info[i].gso_segs = gso_segs;
6508 tx_ring->tx_buffer_info[i].skb = skb;
6509 tx_ring->tx_buffer_info[first].next_to_watch = i;
6510 6520
6511 return count; 6521 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
6512 6522
6513dma_error: 6523 for (;;) {
6514 e_dev_err("TX DMA map failed\n"); 6524 while (size > IXGBE_MAX_DATA_PER_TXD) {
6525 tx_desc->read.buffer_addr = cpu_to_le64(dma + offset);
6526 tx_desc->read.cmd_type_len =
6527 cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
6528 tx_desc->read.olinfo_status = olinfo_status;
6515 6529
6516 /* clear timestamp and dma mappings for failed tx_buffer_info map */ 6530 offset += IXGBE_MAX_DATA_PER_TXD;
6517 tx_buffer_info->dma = 0; 6531 size -= IXGBE_MAX_DATA_PER_TXD;
6518 tx_buffer_info->time_stamp = 0;
6519 tx_buffer_info->next_to_watch = 0;
6520 if (count)
6521 count--;
6522 6532
6523 /* clear timestamp and dma mappings for remaining portion of packet */ 6533 tx_desc++;
6524 while (count--) { 6534 i++;
6525 if (i == 0) 6535 if (i == tx_ring->count) {
6526 i += tx_ring->count; 6536 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, 0);
6527 i--; 6537 i = 0;
6528 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 6538 }
6529 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); 6539 }
6530 }
6531 6540
6532 return 0; 6541 tx_buffer_info = &tx_ring->tx_buffer_info[i];
6533} 6542 tx_buffer_info->length = offset + size;
6543 tx_buffer_info->tx_flags = tx_flags;
6544 tx_buffer_info->dma = dma;
6534 6545
6535static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring, 6546 tx_desc->read.buffer_addr = cpu_to_le64(dma + offset);
6536 int tx_flags, int count, u32 paylen, u8 hdr_len) 6547 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
6537{ 6548 tx_desc->read.olinfo_status = olinfo_status;
6538 union ixgbe_adv_tx_desc *tx_desc = NULL;
6539 struct ixgbe_tx_buffer *tx_buffer_info;
6540 u32 olinfo_status = 0, cmd_type_len = 0;
6541 unsigned int i;
6542 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
6543 6549
6544 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA; 6550 if (!data_len)
6551 break;
6545 6552
6546 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT; 6553 frag = &skb_shinfo(skb)->frags[f];
6554#ifdef IXGBE_FCOE
6555 size = min_t(unsigned int, data_len, frag->size);
6556#else
6557 size = frag->size;
6558#endif
6559 data_len -= size;
6560 f++;
6547 6561
6548 if (tx_flags & IXGBE_TX_FLAGS_VLAN) 6562 offset = 0;
6549 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; 6563 tx_flags |= IXGBE_TX_FLAGS_MAPPED_AS_PAGE;
6550 6564
6551 if (tx_flags & IXGBE_TX_FLAGS_TSO) { 6565 dma = dma_map_page(dev, frag->page, frag->page_offset,
6552 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; 6566 size, DMA_TO_DEVICE);
6567 if (dma_mapping_error(dev, dma))
6568 goto dma_error;
6553 6569
6554 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 6570 tx_desc++;
6555 IXGBE_ADVTXD_POPTS_SHIFT; 6571 i++;
6572 if (i == tx_ring->count) {
6573 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, 0);
6574 i = 0;
6575 }
6576 }
6556 6577
6557 /* use index 1 context for tso */ 6578 tx_desc->read.cmd_type_len |= cpu_to_le32(IXGBE_TXD_CMD);
6558 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
6559 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
6560 olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
6561 IXGBE_ADVTXD_POPTS_SHIFT;
6562 6579
6563 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM) 6580 i++;
6564 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 6581 if (i == tx_ring->count)
6565 IXGBE_ADVTXD_POPTS_SHIFT; 6582 i = 0;
6566 6583
6567 if (tx_flags & IXGBE_TX_FLAGS_FCOE) { 6584 tx_ring->next_to_use = i;
6568 olinfo_status |= IXGBE_ADVTXD_CC;
6569 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
6570 if (tx_flags & IXGBE_TX_FLAGS_FSO)
6571 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
6572 }
6573 6585
6574 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); 6586 if (tx_flags & IXGBE_TX_FLAGS_TSO)
6587 gso_segs = skb_shinfo(skb)->gso_segs;
6588#ifdef IXGBE_FCOE
6589 /* adjust for FCoE Sequence Offload */
6590 else if (tx_flags & IXGBE_TX_FLAGS_FSO)
6591 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
6592 skb_shinfo(skb)->gso_size);
6593#endif /* IXGBE_FCOE */
6594 else
6595 gso_segs = 1;
6575 6596
6576 i = tx_ring->next_to_use; 6597 /* multiply data chunks by size of headers */
6577 while (count--) { 6598 tx_buffer_info->bytecount = paylen + (gso_segs * hdr_len);
6578 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 6599 tx_buffer_info->gso_segs = gso_segs;
6579 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); 6600 tx_buffer_info->skb = skb;
6580 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
6581 tx_desc->read.cmd_type_len =
6582 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
6583 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
6584 i++;
6585 if (i == tx_ring->count)
6586 i = 0;
6587 }
6588 6601
6589 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd); 6602 /* set the timestamp */
6603 first->time_stamp = jiffies;
6590 6604
6591 /* 6605 /*
6592 * Force memory writes to complete before letting h/w 6606 * Force memory writes to complete before letting h/w
@@ -6596,8 +6610,30 @@ static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring,
6596 */ 6610 */
6597 wmb(); 6611 wmb();
6598 6612
6599 tx_ring->next_to_use = i; 6613 /* set next_to_watch value indicating a packet is present */
6614 first->next_to_watch = tx_desc;
6615
6616 /* notify HW of packet */
6600 writel(i, tx_ring->tail); 6617 writel(i, tx_ring->tail);
6618
6619 return;
6620dma_error:
6621 dev_err(dev, "TX DMA map failed\n");
6622
6623 /* clear dma mappings for failed tx_buffer_info map */
6624 for (;;) {
6625 tx_buffer_info = &tx_ring->tx_buffer_info[i];
6626 ixgbe_unmap_tx_resource(tx_ring, tx_buffer_info);
6627 if (tx_buffer_info == first)
6628 break;
6629 if (i == 0)
6630 i = tx_ring->count;
6631 i--;
6632 }
6633
6634 dev_kfree_skb_any(skb);
6635
6636 tx_ring->next_to_use = i;
6601} 6637}
6602 6638
6603static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb, 6639static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb,
@@ -6636,8 +6672,8 @@ static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb,
6636 6672
6637 th = tcp_hdr(skb); 6673 th = tcp_hdr(skb);
6638 6674
6639 /* skip this packet since the socket is closing */ 6675 /* skip this packet since it is invalid or the socket is closing */
6640 if (th->fin) 6676 if (!th || th->fin)
6641 return; 6677 return;
6642 6678
6643 /* sample on all syn packets or once every atr sample count */ 6679 /* sample on all syn packets or once every atr sample count */
@@ -6662,7 +6698,7 @@ static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb,
6662 * since src port and flex bytes occupy the same word XOR them together 6698 * since src port and flex bytes occupy the same word XOR them together
6663 * and write the value to source port portion of compressed dword 6699 * and write the value to source port portion of compressed dword
6664 */ 6700 */
6665 if (vlan_id) 6701 if (tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN))
6666 common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q); 6702 common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q);
6667 else 6703 else
6668 common.port.src ^= th->dest ^ protocol; 6704 common.port.src ^= th->dest ^ protocol;
@@ -6744,14 +6780,14 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6744 struct ixgbe_adapter *adapter, 6780 struct ixgbe_adapter *adapter,
6745 struct ixgbe_ring *tx_ring) 6781 struct ixgbe_ring *tx_ring)
6746{ 6782{
6783 struct ixgbe_tx_buffer *first;
6747 int tso; 6784 int tso;
6748 u32 tx_flags = 0; 6785 u32 tx_flags = 0;
6749#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD 6786#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
6750 unsigned short f; 6787 unsigned short f;
6751#endif 6788#endif
6752 u16 first;
6753 u16 count = TXD_USE_COUNT(skb_headlen(skb)); 6789 u16 count = TXD_USE_COUNT(skb_headlen(skb));
6754 __be16 protocol; 6790 __be16 protocol = skb->protocol;
6755 u8 hdr_len = 0; 6791 u8 hdr_len = 0;
6756 6792
6757 /* 6793 /*
@@ -6772,68 +6808,82 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6772 return NETDEV_TX_BUSY; 6808 return NETDEV_TX_BUSY;
6773 } 6809 }
6774 6810
6775 protocol = vlan_get_protocol(skb); 6811 /* if we have a HW VLAN tag being added default to the HW one */
6776
6777 if (vlan_tx_tag_present(skb)) { 6812 if (vlan_tx_tag_present(skb)) {
6778 tx_flags |= vlan_tx_tag_get(skb); 6813 tx_flags |= vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
6779 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 6814 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
6780 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK; 6815 /* else if it is a SW VLAN check the next protocol and store the tag */
6781 tx_flags |= tx_ring->dcb_tc << 13; 6816 } else if (protocol == __constant_htons(ETH_P_8021Q)) {
6817 struct vlan_hdr *vhdr, _vhdr;
6818 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
6819 if (!vhdr)
6820 goto out_drop;
6821
6822 protocol = vhdr->h_vlan_encapsulated_proto;
6823 tx_flags |= ntohs(vhdr->h_vlan_TCI) << IXGBE_TX_FLAGS_VLAN_SHIFT;
6824 tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
6825 }
6826
6827 if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
6828 skb->priority != TC_PRIO_CONTROL) {
6829 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
6830 tx_flags |= tx_ring->dcb_tc <<
6831 IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT;
6832 if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) {
6833 struct vlan_ethhdr *vhdr;
6834 if (skb_header_cloned(skb) &&
6835 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6836 goto out_drop;
6837 vhdr = (struct vlan_ethhdr *)skb->data;
6838 vhdr->h_vlan_TCI = htons(tx_flags >>
6839 IXGBE_TX_FLAGS_VLAN_SHIFT);
6840 } else {
6841 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
6782 } 6842 }
6783 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
6784 tx_flags |= IXGBE_TX_FLAGS_VLAN;
6785 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED &&
6786 skb->priority != TC_PRIO_CONTROL) {
6787 tx_flags |= tx_ring->dcb_tc << 13;
6788 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
6789 tx_flags |= IXGBE_TX_FLAGS_VLAN;
6790 } 6843 }
6791 6844
6792#ifdef IXGBE_FCOE
6793 /* for FCoE with DCB, we force the priority to what
6794 * was specified by the switch */
6795 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED &&
6796 (protocol == htons(ETH_P_FCOE)))
6797 tx_flags |= IXGBE_TX_FLAGS_FCOE;
6798
6799#endif
6800 /* record the location of the first descriptor for this packet */ 6845 /* record the location of the first descriptor for this packet */
6801 first = tx_ring->next_to_use; 6846 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
6802 6847
6803 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
6804#ifdef IXGBE_FCOE 6848#ifdef IXGBE_FCOE
6805 /* setup tx offload for FCoE */ 6849 /* setup tx offload for FCoE */
6850 if ((protocol == __constant_htons(ETH_P_FCOE)) &&
6851 (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
6806 tso = ixgbe_fso(tx_ring, skb, tx_flags, &hdr_len); 6852 tso = ixgbe_fso(tx_ring, skb, tx_flags, &hdr_len);
6807 if (tso < 0) 6853 if (tso < 0)
6808 goto out_drop; 6854 goto out_drop;
6809 else if (tso) 6855 else if (tso)
6810 tx_flags |= IXGBE_TX_FLAGS_FSO; 6856 tx_flags |= IXGBE_TX_FLAGS_FSO |
6811#endif /* IXGBE_FCOE */ 6857 IXGBE_TX_FLAGS_FCOE;
6812 } else { 6858 else
6813 if (protocol == htons(ETH_P_IP)) 6859 tx_flags |= IXGBE_TX_FLAGS_FCOE;
6814 tx_flags |= IXGBE_TX_FLAGS_IPV4; 6860
6815 tso = ixgbe_tso(tx_ring, skb, tx_flags, protocol, &hdr_len); 6861 goto xmit_fcoe;
6816 if (tso < 0)
6817 goto out_drop;
6818 else if (tso)
6819 tx_flags |= IXGBE_TX_FLAGS_TSO;
6820 else if (ixgbe_tx_csum(tx_ring, skb, tx_flags, protocol))
6821 tx_flags |= IXGBE_TX_FLAGS_CSUM;
6822 } 6862 }
6823 6863
6824 count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len); 6864#endif /* IXGBE_FCOE */
6825 if (count) { 6865 /* setup IPv4/IPv6 offloads */
6826 /* add the ATR filter if ATR is on */ 6866 if (protocol == __constant_htons(ETH_P_IP))
6827 if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) 6867 tx_flags |= IXGBE_TX_FLAGS_IPV4;
6828 ixgbe_atr(tx_ring, skb, tx_flags, protocol);
6829 ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len);
6830 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
6831 6868
6832 } else { 6869 tso = ixgbe_tso(tx_ring, skb, tx_flags, protocol, &hdr_len);
6833 tx_ring->tx_buffer_info[first].time_stamp = 0; 6870 if (tso < 0)
6834 tx_ring->next_to_use = first;
6835 goto out_drop; 6871 goto out_drop;
6836 } 6872 else if (tso)
6873 tx_flags |= IXGBE_TX_FLAGS_TSO;
6874 else if (ixgbe_tx_csum(tx_ring, skb, tx_flags, protocol))
6875 tx_flags |= IXGBE_TX_FLAGS_CSUM;
6876
6877 /* add the ATR filter if ATR is on */
6878 if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
6879 ixgbe_atr(tx_ring, skb, tx_flags, protocol);
6880
6881#ifdef IXGBE_FCOE
6882xmit_fcoe:
6883#endif /* IXGBE_FCOE */
6884 ixgbe_tx_map(tx_ring, skb, first, tx_flags, hdr_len);
6885
6886 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
6837 6887
6838 return NETDEV_TX_OK; 6888 return NETDEV_TX_OK;
6839 6889