aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c9
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c197
3 files changed, 120 insertions, 89 deletions
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 638d175792cf..cbbbff4627ac 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -456,8 +456,9 @@ struct e1000_info {
456 456
457#define E1000_RX_DESC_PS(R, i) \ 457#define E1000_RX_DESC_PS(R, i) \
458 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) 458 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
459#define E1000_RX_DESC_EXT(R, i) \
460 (&(((union e1000_rx_desc_extended *)((R).desc))[i]))
459#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) 461#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
460#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc)
461#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc) 462#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc)
462#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc) 463#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc)
463 464
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 06d88f316dce..8d3ca85ae039 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -1195,7 +1195,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1195 goto err_nomem; 1195 goto err_nomem;
1196 } 1196 }
1197 1197
1198 rx_ring->size = rx_ring->count * sizeof(struct e1000_rx_desc); 1198 rx_ring->size = rx_ring->count * sizeof(union e1000_rx_desc_extended);
1199 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, 1199 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
1200 &rx_ring->dma, GFP_KERNEL); 1200 &rx_ring->dma, GFP_KERNEL);
1201 if (!rx_ring->desc) { 1201 if (!rx_ring->desc) {
@@ -1220,7 +1220,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1220 ew32(RCTL, rctl); 1220 ew32(RCTL, rctl);
1221 1221
1222 for (i = 0; i < rx_ring->count; i++) { 1222 for (i = 0; i < rx_ring->count; i++) {
1223 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i); 1223 union e1000_rx_desc_extended *rx_desc;
1224 struct sk_buff *skb; 1224 struct sk_buff *skb;
1225 1225
1226 skb = alloc_skb(2048 + NET_IP_ALIGN, GFP_KERNEL); 1226 skb = alloc_skb(2048 + NET_IP_ALIGN, GFP_KERNEL);
@@ -1238,8 +1238,9 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1238 ret_val = 8; 1238 ret_val = 8;
1239 goto err_nomem; 1239 goto err_nomem;
1240 } 1240 }
1241 rx_desc->buffer_addr = 1241 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
1242 cpu_to_le64(rx_ring->buffer_info[i].dma); 1242 rx_desc->read.buffer_addr =
1243 cpu_to_le64(rx_ring->buffer_info[i].dma);
1243 memset(skb->data, 0x00, skb->len); 1244 memset(skb->data, 0x00, skb->len);
1244 } 1245 }
1245 1246
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index d0fdb512e849..55c3cc1d6834 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -192,7 +192,7 @@ static void e1000e_dump(struct e1000_adapter *adapter)
192 struct e1000_buffer *buffer_info; 192 struct e1000_buffer *buffer_info;
193 struct e1000_ring *rx_ring = adapter->rx_ring; 193 struct e1000_ring *rx_ring = adapter->rx_ring;
194 union e1000_rx_desc_packet_split *rx_desc_ps; 194 union e1000_rx_desc_packet_split *rx_desc_ps;
195 struct e1000_rx_desc *rx_desc; 195 union e1000_rx_desc_extended *rx_desc;
196 struct my_u1 { 196 struct my_u1 {
197 u64 a; 197 u64 a;
198 u64 b; 198 u64 b;
@@ -399,41 +399,70 @@ rx_ring_summary:
399 break; 399 break;
400 default: 400 default:
401 case 0: 401 case 0:
402 /* Legacy Receive Descriptor Format 402 /* Extended Receive Descriptor (Read) Format
403 * 403 *
404 * +-----------------------------------------------------+ 404 * +-----------------------------------------------------+
405 * | Buffer Address [63:0] | 405 * 0 | Buffer Address [63:0] |
406 * +-----------------------------------------------------+ 406 * +-----------------------------------------------------+
407 * | VLAN Tag | Errors | Status 0 | Packet csum | Length | 407 * 8 | Reserved |
408 * +-----------------------------------------------------+ 408 * +-----------------------------------------------------+
409 * 63 48 47 40 39 32 31 16 15 0
410 */ 409 */
411 printk(KERN_INFO "Rl[desc] [address 63:0 ] " 410 printk(KERN_INFO "R [desc] [buf addr 63:0 ] "
412 "[vl er S cks ln] [bi->dma ] [bi->skb] " 411 "[reserved 63:0 ] [bi->dma ] "
413 "<-- Legacy format\n"); 412 "[bi->skb] <-- Ext (Read) format\n");
414 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { 413 /* Extended Receive Descriptor (Write-Back) Format
415 rx_desc = E1000_RX_DESC(*rx_ring, i); 414 *
415 * 63 48 47 32 31 24 23 4 3 0
416 * +------------------------------------------------------+
417 * | RSS Hash | | | |
418 * 0 +-------------------+ Rsvd | Reserved | MRQ RSS |
419 * | Packet | IP | | | Type |
420 * | Checksum | Ident | | | |
421 * +------------------------------------------------------+
422 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
423 * +------------------------------------------------------+
424 * 63 48 47 32 31 20 19 0
425 */
426 printk(KERN_INFO "RWB[desc] [cs ipid mrq] "
427 "[vt ln xe xs] "
428 "[bi->skb] <-- Ext (Write-Back) format\n");
429
430 for (i = 0; i < rx_ring->count; i++) {
416 buffer_info = &rx_ring->buffer_info[i]; 431 buffer_info = &rx_ring->buffer_info[i];
417 u0 = (struct my_u0 *)rx_desc; 432 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
418 printk(KERN_INFO "Rl[0x%03X] %016llX %016llX " 433 u1 = (struct my_u1 *)rx_desc;
419 "%016llX %p", i, 434 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
420 (unsigned long long)le64_to_cpu(u0->a), 435 if (staterr & E1000_RXD_STAT_DD) {
421 (unsigned long long)le64_to_cpu(u0->b), 436 /* Descriptor Done */
422 (unsigned long long)buffer_info->dma, 437 printk(KERN_INFO "RWB[0x%03X] %016llX "
423 buffer_info->skb); 438 "%016llX ---------------- %p", i,
439 (unsigned long long)le64_to_cpu(u1->a),
440 (unsigned long long)le64_to_cpu(u1->b),
441 buffer_info->skb);
442 } else {
443 printk(KERN_INFO "R [0x%03X] %016llX "
444 "%016llX %016llX %p", i,
445 (unsigned long long)le64_to_cpu(u1->a),
446 (unsigned long long)le64_to_cpu(u1->b),
447 (unsigned long long)buffer_info->dma,
448 buffer_info->skb);
449
450 if (netif_msg_pktdata(adapter))
451 print_hex_dump(KERN_INFO, "",
452 DUMP_PREFIX_ADDRESS, 16,
453 1,
454 phys_to_virt
455 (buffer_info->dma),
456 adapter->rx_buffer_len,
457 true);
458 }
459
424 if (i == rx_ring->next_to_use) 460 if (i == rx_ring->next_to_use)
425 printk(KERN_CONT " NTU\n"); 461 printk(KERN_CONT " NTU\n");
426 else if (i == rx_ring->next_to_clean) 462 else if (i == rx_ring->next_to_clean)
427 printk(KERN_CONT " NTC\n"); 463 printk(KERN_CONT " NTC\n");
428 else 464 else
429 printk(KERN_CONT "\n"); 465 printk(KERN_CONT "\n");
430
431 if (netif_msg_pktdata(adapter))
432 print_hex_dump(KERN_INFO, "",
433 DUMP_PREFIX_ADDRESS,
434 16, 1,
435 phys_to_virt(buffer_info->dma),
436 adapter->rx_buffer_len, true);
437 } 466 }
438 } 467 }
439 468
@@ -519,7 +548,7 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
519} 548}
520 549
521/** 550/**
522 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended 551 * e1000_alloc_rx_buffers - Replace used receive buffers
523 * @adapter: address of board private structure 552 * @adapter: address of board private structure
524 **/ 553 **/
525static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, 554static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
@@ -528,7 +557,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
528 struct net_device *netdev = adapter->netdev; 557 struct net_device *netdev = adapter->netdev;
529 struct pci_dev *pdev = adapter->pdev; 558 struct pci_dev *pdev = adapter->pdev;
530 struct e1000_ring *rx_ring = adapter->rx_ring; 559 struct e1000_ring *rx_ring = adapter->rx_ring;
531 struct e1000_rx_desc *rx_desc; 560 union e1000_rx_desc_extended *rx_desc;
532 struct e1000_buffer *buffer_info; 561 struct e1000_buffer *buffer_info;
533 struct sk_buff *skb; 562 struct sk_buff *skb;
534 unsigned int i; 563 unsigned int i;
@@ -562,8 +591,8 @@ map_skb:
562 break; 591 break;
563 } 592 }
564 593
565 rx_desc = E1000_RX_DESC(*rx_ring, i); 594 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
566 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 595 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
567 596
568 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { 597 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
569 /* 598 /*
@@ -697,7 +726,7 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
697{ 726{
698 struct net_device *netdev = adapter->netdev; 727 struct net_device *netdev = adapter->netdev;
699 struct pci_dev *pdev = adapter->pdev; 728 struct pci_dev *pdev = adapter->pdev;
700 struct e1000_rx_desc *rx_desc; 729 union e1000_rx_desc_extended *rx_desc;
701 struct e1000_ring *rx_ring = adapter->rx_ring; 730 struct e1000_ring *rx_ring = adapter->rx_ring;
702 struct e1000_buffer *buffer_info; 731 struct e1000_buffer *buffer_info;
703 struct sk_buff *skb; 732 struct sk_buff *skb;
@@ -738,8 +767,8 @@ check_page:
738 PAGE_SIZE, 767 PAGE_SIZE,
739 DMA_FROM_DEVICE); 768 DMA_FROM_DEVICE);
740 769
741 rx_desc = E1000_RX_DESC(*rx_ring, i); 770 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
742 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 771 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
743 772
744 if (unlikely(++i == rx_ring->count)) 773 if (unlikely(++i == rx_ring->count))
745 i = 0; 774 i = 0;
@@ -774,28 +803,27 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
774 struct pci_dev *pdev = adapter->pdev; 803 struct pci_dev *pdev = adapter->pdev;
775 struct e1000_hw *hw = &adapter->hw; 804 struct e1000_hw *hw = &adapter->hw;
776 struct e1000_ring *rx_ring = adapter->rx_ring; 805 struct e1000_ring *rx_ring = adapter->rx_ring;
777 struct e1000_rx_desc *rx_desc, *next_rxd; 806 union e1000_rx_desc_extended *rx_desc, *next_rxd;
778 struct e1000_buffer *buffer_info, *next_buffer; 807 struct e1000_buffer *buffer_info, *next_buffer;
779 u32 length; 808 u32 length, staterr;
780 unsigned int i; 809 unsigned int i;
781 int cleaned_count = 0; 810 int cleaned_count = 0;
782 bool cleaned = 0; 811 bool cleaned = 0;
783 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 812 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
784 813
785 i = rx_ring->next_to_clean; 814 i = rx_ring->next_to_clean;
786 rx_desc = E1000_RX_DESC(*rx_ring, i); 815 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
816 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
787 buffer_info = &rx_ring->buffer_info[i]; 817 buffer_info = &rx_ring->buffer_info[i];
788 818
789 while (rx_desc->status & E1000_RXD_STAT_DD) { 819 while (staterr & E1000_RXD_STAT_DD) {
790 struct sk_buff *skb; 820 struct sk_buff *skb;
791 u8 status;
792 821
793 if (*work_done >= work_to_do) 822 if (*work_done >= work_to_do)
794 break; 823 break;
795 (*work_done)++; 824 (*work_done)++;
796 rmb(); /* read descriptor and rx_buffer_info after status DD */ 825 rmb(); /* read descriptor and rx_buffer_info after status DD */
797 826
798 status = rx_desc->status;
799 skb = buffer_info->skb; 827 skb = buffer_info->skb;
800 buffer_info->skb = NULL; 828 buffer_info->skb = NULL;
801 829
@@ -804,7 +832,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
804 i++; 832 i++;
805 if (i == rx_ring->count) 833 if (i == rx_ring->count)
806 i = 0; 834 i = 0;
807 next_rxd = E1000_RX_DESC(*rx_ring, i); 835 next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
808 prefetch(next_rxd); 836 prefetch(next_rxd);
809 837
810 next_buffer = &rx_ring->buffer_info[i]; 838 next_buffer = &rx_ring->buffer_info[i];
@@ -817,7 +845,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
817 DMA_FROM_DEVICE); 845 DMA_FROM_DEVICE);
818 buffer_info->dma = 0; 846 buffer_info->dma = 0;
819 847
820 length = le16_to_cpu(rx_desc->length); 848 length = le16_to_cpu(rx_desc->wb.upper.length);
821 849
822 /* 850 /*
823 * !EOP means multiple descriptors were used to store a single 851 * !EOP means multiple descriptors were used to store a single
@@ -826,7 +854,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
826 * next frame that _does_ have the EOP bit set, as it is by 854 * next frame that _does_ have the EOP bit set, as it is by
827 * definition only a frame fragment 855 * definition only a frame fragment
828 */ 856 */
829 if (unlikely(!(status & E1000_RXD_STAT_EOP))) 857 if (unlikely(!(staterr & E1000_RXD_STAT_EOP)))
830 adapter->flags2 |= FLAG2_IS_DISCARDING; 858 adapter->flags2 |= FLAG2_IS_DISCARDING;
831 859
832 if (adapter->flags2 & FLAG2_IS_DISCARDING) { 860 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
@@ -834,12 +862,12 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
834 e_dbg("Receive packet consumed multiple buffers\n"); 862 e_dbg("Receive packet consumed multiple buffers\n");
835 /* recycle */ 863 /* recycle */
836 buffer_info->skb = skb; 864 buffer_info->skb = skb;
837 if (status & E1000_RXD_STAT_EOP) 865 if (staterr & E1000_RXD_STAT_EOP)
838 adapter->flags2 &= ~FLAG2_IS_DISCARDING; 866 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
839 goto next_desc; 867 goto next_desc;
840 } 868 }
841 869
842 if (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) { 870 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
843 /* recycle */ 871 /* recycle */
844 buffer_info->skb = skb; 872 buffer_info->skb = skb;
845 goto next_desc; 873 goto next_desc;
@@ -877,15 +905,15 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
877 skb_put(skb, length); 905 skb_put(skb, length);
878 906
879 /* Receive Checksum Offload */ 907 /* Receive Checksum Offload */
880 e1000_rx_checksum(adapter, 908 e1000_rx_checksum(adapter, staterr,
881 (u32)(status) | 909 le16_to_cpu(rx_desc->wb.lower.hi_dword.
882 ((u32)(rx_desc->errors) << 24), 910 csum_ip.csum), skb);
883 le16_to_cpu(rx_desc->csum), skb);
884 911
885 e1000_receive_skb(adapter, netdev, skb,status,rx_desc->special); 912 e1000_receive_skb(adapter, netdev, skb, staterr,
913 rx_desc->wb.upper.vlan);
886 914
887next_desc: 915next_desc:
888 rx_desc->status = 0; 916 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
889 917
890 /* return some buffers to hardware, one at a time is too slow */ 918 /* return some buffers to hardware, one at a time is too slow */
891 if (cleaned_count >= E1000_RX_BUFFER_WRITE) { 919 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
@@ -897,6 +925,8 @@ next_desc:
897 /* use prefetched values */ 925 /* use prefetched values */
898 rx_desc = next_rxd; 926 rx_desc = next_rxd;
899 buffer_info = next_buffer; 927 buffer_info = next_buffer;
928
929 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
900 } 930 }
901 rx_ring->next_to_clean = i; 931 rx_ring->next_to_clean = i;
902 932
@@ -1280,35 +1310,34 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
1280 struct net_device *netdev = adapter->netdev; 1310 struct net_device *netdev = adapter->netdev;
1281 struct pci_dev *pdev = adapter->pdev; 1311 struct pci_dev *pdev = adapter->pdev;
1282 struct e1000_ring *rx_ring = adapter->rx_ring; 1312 struct e1000_ring *rx_ring = adapter->rx_ring;
1283 struct e1000_rx_desc *rx_desc, *next_rxd; 1313 union e1000_rx_desc_extended *rx_desc, *next_rxd;
1284 struct e1000_buffer *buffer_info, *next_buffer; 1314 struct e1000_buffer *buffer_info, *next_buffer;
1285 u32 length; 1315 u32 length, staterr;
1286 unsigned int i; 1316 unsigned int i;
1287 int cleaned_count = 0; 1317 int cleaned_count = 0;
1288 bool cleaned = false; 1318 bool cleaned = false;
1289 unsigned int total_rx_bytes=0, total_rx_packets=0; 1319 unsigned int total_rx_bytes=0, total_rx_packets=0;
1290 1320
1291 i = rx_ring->next_to_clean; 1321 i = rx_ring->next_to_clean;
1292 rx_desc = E1000_RX_DESC(*rx_ring, i); 1322 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
1323 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1293 buffer_info = &rx_ring->buffer_info[i]; 1324 buffer_info = &rx_ring->buffer_info[i];
1294 1325
1295 while (rx_desc->status & E1000_RXD_STAT_DD) { 1326 while (staterr & E1000_RXD_STAT_DD) {
1296 struct sk_buff *skb; 1327 struct sk_buff *skb;
1297 u8 status;
1298 1328
1299 if (*work_done >= work_to_do) 1329 if (*work_done >= work_to_do)
1300 break; 1330 break;
1301 (*work_done)++; 1331 (*work_done)++;
1302 rmb(); /* read descriptor and rx_buffer_info after status DD */ 1332 rmb(); /* read descriptor and rx_buffer_info after status DD */
1303 1333
1304 status = rx_desc->status;
1305 skb = buffer_info->skb; 1334 skb = buffer_info->skb;
1306 buffer_info->skb = NULL; 1335 buffer_info->skb = NULL;
1307 1336
1308 ++i; 1337 ++i;
1309 if (i == rx_ring->count) 1338 if (i == rx_ring->count)
1310 i = 0; 1339 i = 0;
1311 next_rxd = E1000_RX_DESC(*rx_ring, i); 1340 next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
1312 prefetch(next_rxd); 1341 prefetch(next_rxd);
1313 1342
1314 next_buffer = &rx_ring->buffer_info[i]; 1343 next_buffer = &rx_ring->buffer_info[i];
@@ -1319,23 +1348,22 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
1319 DMA_FROM_DEVICE); 1348 DMA_FROM_DEVICE);
1320 buffer_info->dma = 0; 1349 buffer_info->dma = 0;
1321 1350
1322 length = le16_to_cpu(rx_desc->length); 1351 length = le16_to_cpu(rx_desc->wb.upper.length);
1323 1352
1324 /* errors is only valid for DD + EOP descriptors */ 1353 /* errors is only valid for DD + EOP descriptors */
1325 if (unlikely((status & E1000_RXD_STAT_EOP) && 1354 if (unlikely((staterr & E1000_RXD_STAT_EOP) &&
1326 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { 1355 (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK))) {
1327 /* recycle both page and skb */ 1356 /* recycle both page and skb */
1328 buffer_info->skb = skb; 1357 buffer_info->skb = skb;
1329 /* an error means any chain goes out the window 1358 /* an error means any chain goes out the window too */
1330 * too */ 1359 if (rx_ring->rx_skb_top)
1331 if (rx_ring->rx_skb_top) 1360 dev_kfree_skb_irq(rx_ring->rx_skb_top);
1332 dev_kfree_skb_irq(rx_ring->rx_skb_top); 1361 rx_ring->rx_skb_top = NULL;
1333 rx_ring->rx_skb_top = NULL; 1362 goto next_desc;
1334 goto next_desc;
1335 } 1363 }
1336 1364
1337#define rxtop (rx_ring->rx_skb_top) 1365#define rxtop (rx_ring->rx_skb_top)
1338 if (!(status & E1000_RXD_STAT_EOP)) { 1366 if (!(staterr & E1000_RXD_STAT_EOP)) {
1339 /* this descriptor is only the beginning (or middle) */ 1367 /* this descriptor is only the beginning (or middle) */
1340 if (!rxtop) { 1368 if (!rxtop) {
1341 /* this is the beginning of a chain */ 1369 /* this is the beginning of a chain */
@@ -1390,10 +1418,9 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
1390 } 1418 }
1391 1419
1392 /* Receive Checksum Offload XXX recompute due to CRC strip? */ 1420 /* Receive Checksum Offload XXX recompute due to CRC strip? */
1393 e1000_rx_checksum(adapter, 1421 e1000_rx_checksum(adapter, staterr,
1394 (u32)(status) | 1422 le16_to_cpu(rx_desc->wb.lower.hi_dword.
1395 ((u32)(rx_desc->errors) << 24), 1423 csum_ip.csum), skb);
1396 le16_to_cpu(rx_desc->csum), skb);
1397 1424
1398 /* probably a little skewed due to removing CRC */ 1425 /* probably a little skewed due to removing CRC */
1399 total_rx_bytes += skb->len; 1426 total_rx_bytes += skb->len;
@@ -1406,11 +1433,11 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
1406 goto next_desc; 1433 goto next_desc;
1407 } 1434 }
1408 1435
1409 e1000_receive_skb(adapter, netdev, skb, status, 1436 e1000_receive_skb(adapter, netdev, skb, staterr,
1410 rx_desc->special); 1437 rx_desc->wb.upper.vlan);
1411 1438
1412next_desc: 1439next_desc:
1413 rx_desc->status = 0; 1440 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
1414 1441
1415 /* return some buffers to hardware, one at a time is too slow */ 1442 /* return some buffers to hardware, one at a time is too slow */
1416 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { 1443 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
@@ -1422,6 +1449,8 @@ next_desc:
1422 /* use prefetched values */ 1449 /* use prefetched values */
1423 rx_desc = next_rxd; 1450 rx_desc = next_rxd;
1424 buffer_info = next_buffer; 1451 buffer_info = next_buffer;
1452
1453 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1425 } 1454 }
1426 rx_ring->next_to_clean = i; 1455 rx_ring->next_to_clean = i;
1427 1456
@@ -2820,6 +2849,10 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2820 break; 2849 break;
2821 } 2850 }
2822 2851
2852 /* Enable Extended Status in all Receive Descriptors */
2853 rfctl = er32(RFCTL);
2854 rfctl |= E1000_RFCTL_EXTEN;
2855
2823 /* 2856 /*
2824 * 82571 and greater support packet-split where the protocol 2857 * 82571 and greater support packet-split where the protocol
2825 * header is placed in skb->data and the packet data is 2858 * header is placed in skb->data and the packet data is
@@ -2845,9 +2878,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2845 if (adapter->rx_ps_pages) { 2878 if (adapter->rx_ps_pages) {
2846 u32 psrctl = 0; 2879 u32 psrctl = 0;
2847 2880
2848 /* Configure extra packet-split registers */
2849 rfctl = er32(RFCTL);
2850 rfctl |= E1000_RFCTL_EXTEN;
2851 /* 2881 /*
2852 * disable packet split support for IPv6 extension headers, 2882 * disable packet split support for IPv6 extension headers,
2853 * because some malformed IPv6 headers can hang the Rx 2883 * because some malformed IPv6 headers can hang the Rx
@@ -2855,8 +2885,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2855 rfctl |= (E1000_RFCTL_IPV6_EX_DIS | 2885 rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
2856 E1000_RFCTL_NEW_IPV6_EXT_DIS); 2886 E1000_RFCTL_NEW_IPV6_EXT_DIS);
2857 2887
2858 ew32(RFCTL, rfctl);
2859
2860 /* Enable Packet split descriptors */ 2888 /* Enable Packet split descriptors */
2861 rctl |= E1000_RCTL_DTYP_PS; 2889 rctl |= E1000_RCTL_DTYP_PS;
2862 2890
@@ -2879,6 +2907,7 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2879 ew32(PSRCTL, psrctl); 2907 ew32(PSRCTL, psrctl);
2880 } 2908 }
2881 2909
2910 ew32(RFCTL, rfctl);
2882 ew32(RCTL, rctl); 2911 ew32(RCTL, rctl);
2883 /* just started the receive unit, no need to restart */ 2912 /* just started the receive unit, no need to restart */
2884 adapter->flags &= ~FLAG_RX_RESTART_NOW; 2913 adapter->flags &= ~FLAG_RX_RESTART_NOW;
@@ -2904,11 +2933,11 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
2904 adapter->clean_rx = e1000_clean_rx_irq_ps; 2933 adapter->clean_rx = e1000_clean_rx_irq_ps;
2905 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; 2934 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
2906 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) { 2935 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
2907 rdlen = rx_ring->count * sizeof(struct e1000_rx_desc); 2936 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
2908 adapter->clean_rx = e1000_clean_jumbo_rx_irq; 2937 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
2909 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; 2938 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
2910 } else { 2939 } else {
2911 rdlen = rx_ring->count * sizeof(struct e1000_rx_desc); 2940 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
2912 adapter->clean_rx = e1000_clean_rx_irq; 2941 adapter->clean_rx = e1000_clean_rx_irq;
2913 adapter->alloc_rx_buf = e1000_alloc_rx_buffers; 2942 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
2914 } 2943 }