aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/e1000e/netdev.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/e1000e/netdev.c')
-rw-r--r--drivers/net/e1000e/netdev.c425
1 files changed, 72 insertions, 353 deletions
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 033e124d1c1f..4fd2e23720b6 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -245,37 +245,36 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
245 rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 245 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
246 246
247 for (j = 0; j < PS_PAGE_BUFFERS; j++) { 247 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
248 ps_page = &rx_ring->ps_pages[(i * PS_PAGE_BUFFERS) 248 ps_page = &buffer_info->ps_pages[j];
249 + j]; 249 if (j >= adapter->rx_ps_pages) {
250 if (j < adapter->rx_ps_pages) { 250 /* all unused desc entries get hw null ptr */
251 rx_desc->read.buffer_addr[j+1] = ~0;
252 continue;
253 }
254 if (!ps_page->page) {
255 ps_page->page = alloc_page(GFP_ATOMIC);
251 if (!ps_page->page) { 256 if (!ps_page->page) {
252 ps_page->page = alloc_page(GFP_ATOMIC); 257 adapter->alloc_rx_buff_failed++;
253 if (!ps_page->page) { 258 goto no_buffers;
254 adapter->alloc_rx_buff_failed++; 259 }
255 goto no_buffers; 260 ps_page->dma = pci_map_page(pdev,
256 } 261 ps_page->page,
257 ps_page->dma = pci_map_page(pdev, 262 0, PAGE_SIZE,
258 ps_page->page, 263 PCI_DMA_FROMDEVICE);
259 0, PAGE_SIZE, 264 if (pci_dma_mapping_error(ps_page->dma)) {
260 PCI_DMA_FROMDEVICE); 265 dev_err(&adapter->pdev->dev,
261 if (pci_dma_mapping_error( 266 "RX DMA page map failed\n");
262 ps_page->dma)) { 267 adapter->rx_dma_failed++;
263 dev_err(&adapter->pdev->dev, 268 goto no_buffers;
264 "RX DMA page map failed\n");
265 adapter->rx_dma_failed++;
266 goto no_buffers;
267 }
268 } 269 }
269 /*
270 * Refresh the desc even if buffer_addrs
271 * didn't change because each write-back
272 * erases this info.
273 */
274 rx_desc->read.buffer_addr[j+1] =
275 cpu_to_le64(ps_page->dma);
276 } else {
277 rx_desc->read.buffer_addr[j+1] = ~0;
278 } 270 }
271 /*
272 * Refresh the desc even if buffer_addrs
273 * didn't change because each write-back
274 * erases this info.
275 */
276 rx_desc->read.buffer_addr[j+1] =
277 cpu_to_le64(ps_page->dma);
279 } 278 }
280 279
281 skb = netdev_alloc_skb(netdev, 280 skb = netdev_alloc_skb(netdev,
@@ -334,94 +333,6 @@ no_buffers:
334} 333}
335 334
336/** 335/**
337 * e1000_alloc_rx_buffers_jumbo - Replace used jumbo receive buffers
338 *
339 * @adapter: address of board private structure
340 * @cleaned_count: number of buffers to allocate this pass
341 **/
342static void e1000_alloc_rx_buffers_jumbo(struct e1000_adapter *adapter,
343 int cleaned_count)
344{
345 struct net_device *netdev = adapter->netdev;
346 struct pci_dev *pdev = adapter->pdev;
347 struct e1000_ring *rx_ring = adapter->rx_ring;
348 struct e1000_rx_desc *rx_desc;
349 struct e1000_buffer *buffer_info;
350 struct sk_buff *skb;
351 unsigned int i;
352 unsigned int bufsz = 256 -
353 16 /*for skb_reserve */ -
354 NET_IP_ALIGN;
355
356 i = rx_ring->next_to_use;
357 buffer_info = &rx_ring->buffer_info[i];
358
359 while (cleaned_count--) {
360 skb = buffer_info->skb;
361 if (skb) {
362 skb_trim(skb, 0);
363 goto check_page;
364 }
365
366 skb = netdev_alloc_skb(netdev, bufsz);
367 if (!skb) {
368 /* Better luck next round */
369 adapter->alloc_rx_buff_failed++;
370 break;
371 }
372
373 /* Make buffer alignment 2 beyond a 16 byte boundary
374 * this will result in a 16 byte aligned IP header after
375 * the 14 byte MAC header is removed
376 */
377 skb_reserve(skb, NET_IP_ALIGN);
378
379 buffer_info->skb = skb;
380check_page:
381 /* allocate a new page if necessary */
382 if (!buffer_info->page) {
383 buffer_info->page = alloc_page(GFP_ATOMIC);
384 if (!buffer_info->page) {
385 adapter->alloc_rx_buff_failed++;
386 break;
387 }
388 }
389
390 if (!buffer_info->dma)
391 buffer_info->dma = pci_map_page(pdev,
392 buffer_info->page, 0,
393 PAGE_SIZE,
394 PCI_DMA_FROMDEVICE);
395 if (pci_dma_mapping_error(buffer_info->dma)) {
396 dev_err(&adapter->pdev->dev, "RX DMA page map failed\n");
397 adapter->rx_dma_failed++;
398 break;
399 }
400
401 rx_desc = E1000_RX_DESC(*rx_ring, i);
402 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
403
404 i++;
405 if (i == rx_ring->count)
406 i = 0;
407 buffer_info = &rx_ring->buffer_info[i];
408 }
409
410 if (rx_ring->next_to_use != i) {
411 rx_ring->next_to_use = i;
412 if (i-- == 0)
413 i = (rx_ring->count - 1);
414
415 /* Force memory writes to complete before letting h/w
416 * know there are new descriptors to fetch. (Only
417 * applicable for weak-ordered memory model archs,
418 * such as IA-64). */
419 wmb();
420 writel(i, adapter->hw.hw_addr + rx_ring->tail);
421 }
422}
423
424/**
425 * e1000_clean_rx_irq - Send received data up the network stack; legacy 336 * e1000_clean_rx_irq - Send received data up the network stack; legacy
426 * @adapter: board private structure 337 * @adapter: board private structure
427 * 338 *
@@ -495,10 +406,6 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
495 goto next_desc; 406 goto next_desc;
496 } 407 }
497 408
498 /* adjust length to remove Ethernet CRC */
499 length -= 4;
500
501 /* probably a little skewed due to removing CRC */
502 total_rx_bytes += length; 409 total_rx_bytes += length;
503 total_rx_packets++; 410 total_rx_packets++;
504 411
@@ -554,15 +461,6 @@ next_desc:
554 return cleaned; 461 return cleaned;
555} 462}
556 463
557static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
558 u16 length)
559{
560 bi->page = NULL;
561 skb->len += length;
562 skb->data_len += length;
563 skb->truesize += length;
564}
565
566static void e1000_put_txbuf(struct e1000_adapter *adapter, 464static void e1000_put_txbuf(struct e1000_adapter *adapter,
567 struct e1000_buffer *buffer_info) 465 struct e1000_buffer *buffer_info)
568{ 466{
@@ -699,174 +597,6 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
699} 597}
700 598
701/** 599/**
702 * e1000_clean_rx_irq_jumbo - Send received data up the network stack; legacy
703 * @adapter: board private structure
704 *
705 * the return value indicates whether actual cleaning was done, there
706 * is no guarantee that everything was cleaned
707 **/
708static bool e1000_clean_rx_irq_jumbo(struct e1000_adapter *adapter,
709 int *work_done, int work_to_do)
710{
711 struct net_device *netdev = adapter->netdev;
712 struct pci_dev *pdev = adapter->pdev;
713 struct e1000_ring *rx_ring = adapter->rx_ring;
714 struct e1000_rx_desc *rx_desc, *next_rxd;
715 struct e1000_buffer *buffer_info, *next_buffer;
716 u32 length;
717 unsigned int i;
718 int cleaned_count = 0;
719 bool cleaned = 0;
720 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
721
722 i = rx_ring->next_to_clean;
723 rx_desc = E1000_RX_DESC(*rx_ring, i);
724 buffer_info = &rx_ring->buffer_info[i];
725
726 while (rx_desc->status & E1000_RXD_STAT_DD) {
727 struct sk_buff *skb;
728 u8 status;
729
730 if (*work_done >= work_to_do)
731 break;
732 (*work_done)++;
733
734 status = rx_desc->status;
735 skb = buffer_info->skb;
736 buffer_info->skb = NULL;
737
738 i++;
739 if (i == rx_ring->count)
740 i = 0;
741 next_rxd = E1000_RX_DESC(*rx_ring, i);
742 prefetch(next_rxd);
743
744 next_buffer = &rx_ring->buffer_info[i];
745
746 cleaned = 1;
747 cleaned_count++;
748 pci_unmap_page(pdev,
749 buffer_info->dma,
750 PAGE_SIZE,
751 PCI_DMA_FROMDEVICE);
752 buffer_info->dma = 0;
753
754 length = le16_to_cpu(rx_desc->length);
755
756 /* errors is only valid for DD + EOP descriptors */
757 if ((status & E1000_RXD_STAT_EOP) &&
758 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
759 /* recycle both page and skb */
760 buffer_info->skb = skb;
761 /* an error means any chain goes out the window too */
762 if (rx_ring->rx_skb_top)
763 dev_kfree_skb(rx_ring->rx_skb_top);
764 rx_ring->rx_skb_top = NULL;
765 goto next_desc;
766 }
767
768#define rxtop rx_ring->rx_skb_top
769 if (!(status & E1000_RXD_STAT_EOP)) {
770 /* this descriptor is only the beginning (or middle) */
771 if (!rxtop) {
772 /* this is the beginning of a chain */
773 rxtop = skb;
774 skb_fill_page_desc(rxtop, 0, buffer_info->page,
775 0, length);
776 } else {
777 /* this is the middle of a chain */
778 skb_fill_page_desc(rxtop,
779 skb_shinfo(rxtop)->nr_frags,
780 buffer_info->page, 0,
781 length);
782 /* re-use the skb, only consumed the page */
783 buffer_info->skb = skb;
784 }
785 e1000_consume_page(buffer_info, rxtop, length);
786 goto next_desc;
787 } else {
788 if (rxtop) {
789 /* end of the chain */
790 skb_fill_page_desc(rxtop,
791 skb_shinfo(rxtop)->nr_frags,
792 buffer_info->page, 0, length);
793 /* re-use the current skb, we only consumed the
794 * page */
795 buffer_info->skb = skb;
796 skb = rxtop;
797 rxtop = NULL;
798 e1000_consume_page(buffer_info, skb, length);
799 } else {
800 /* no chain, got EOP, this buf is the packet
801 * copybreak to save the put_page/alloc_page */
802 if (length <= copybreak &&
803 skb_tailroom(skb) >= length) {
804 u8 *vaddr;
805 vaddr = kmap_atomic(buffer_info->page,
806 KM_SKB_DATA_SOFTIRQ);
807 memcpy(skb_tail_pointer(skb),
808 vaddr, length);
809 kunmap_atomic(vaddr,
810 KM_SKB_DATA_SOFTIRQ);
811 /* re-use the page, so don't erase
812 * buffer_info->page */
813 skb_put(skb, length);
814 } else {
815 skb_fill_page_desc(skb, 0,
816 buffer_info->page, 0,
817 length);
818 e1000_consume_page(buffer_info, skb,
819 length);
820 }
821 }
822 }
823
824 /* Receive Checksum Offload XXX recompute due to CRC strip? */
825 e1000_rx_checksum(adapter,
826 (u32)(status) |
827 ((u32)(rx_desc->errors) << 24),
828 le16_to_cpu(rx_desc->csum), skb);
829
830 pskb_trim(skb, skb->len - 4);
831
832 /* probably a little skewed due to removing CRC */
833 total_rx_bytes += skb->len;
834 total_rx_packets++;
835
836 /* eth type trans needs skb->data to point to something */
837 if (!pskb_may_pull(skb, ETH_HLEN)) {
838 ndev_err(netdev, "__pskb_pull_tail failed.\n");
839 dev_kfree_skb(skb);
840 goto next_desc;
841 }
842
843 e1000_receive_skb(adapter, netdev, skb,status,rx_desc->special);
844
845next_desc:
846 rx_desc->status = 0;
847
848 /* return some buffers to hardware, one at a time is too slow */
849 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
850 adapter->alloc_rx_buf(adapter, cleaned_count);
851 cleaned_count = 0;
852 }
853
854 /* use prefetched values */
855 rx_desc = next_rxd;
856 buffer_info = next_buffer;
857 }
858 rx_ring->next_to_clean = i;
859
860 cleaned_count = e1000_desc_unused(rx_ring);
861 if (cleaned_count)
862 adapter->alloc_rx_buf(adapter, cleaned_count);
863
864 adapter->total_rx_packets += total_rx_packets;
865 adapter->total_rx_bytes += total_rx_bytes;
866 return cleaned;
867}
868
869/**
870 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split 600 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
871 * @adapter: board private structure 601 * @adapter: board private structure
872 * 602 *
@@ -953,7 +683,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
953 ((length + l1) <= adapter->rx_ps_bsize0)) { 683 ((length + l1) <= adapter->rx_ps_bsize0)) {
954 u8 *vaddr; 684 u8 *vaddr;
955 685
956 ps_page = &rx_ring->ps_pages[i * PS_PAGE_BUFFERS]; 686 ps_page = &buffer_info->ps_pages[0];
957 687
958 /* there is no documentation about how to call 688 /* there is no documentation about how to call
959 * kmap_atomic, so we can't hold the mapping 689 * kmap_atomic, so we can't hold the mapping
@@ -965,8 +695,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
965 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); 695 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
966 pci_dma_sync_single_for_device(pdev, ps_page->dma, 696 pci_dma_sync_single_for_device(pdev, ps_page->dma,
967 PAGE_SIZE, PCI_DMA_FROMDEVICE); 697 PAGE_SIZE, PCI_DMA_FROMDEVICE);
968 /* remove the CRC */ 698
969 l1 -= 4;
970 skb_put(skb, l1); 699 skb_put(skb, l1);
971 goto copydone; 700 goto copydone;
972 } /* if */ 701 } /* if */
@@ -977,7 +706,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
977 if (!length) 706 if (!length)
978 break; 707 break;
979 708
980 ps_page = &rx_ring->ps_pages[(i * PS_PAGE_BUFFERS) + j]; 709 ps_page = &buffer_info->ps_pages[j];
981 pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE, 710 pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
982 PCI_DMA_FROMDEVICE); 711 PCI_DMA_FROMDEVICE);
983 ps_page->dma = 0; 712 ps_page->dma = 0;
@@ -988,10 +717,6 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
988 skb->truesize += length; 717 skb->truesize += length;
989 } 718 }
990 719
991 /* strip the ethernet crc, problem is we're using pages now so
992 * this whole operation can get a little cpu intensive */
993 pskb_trim(skb, skb->len - 4);
994
995copydone: 720copydone:
996 total_rx_bytes += skb->len; 721 total_rx_bytes += skb->len;
997 total_rx_packets++; 722 total_rx_packets++;
@@ -1043,7 +768,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
1043 struct e1000_buffer *buffer_info; 768 struct e1000_buffer *buffer_info;
1044 struct e1000_ps_page *ps_page; 769 struct e1000_ps_page *ps_page;
1045 struct pci_dev *pdev = adapter->pdev; 770 struct pci_dev *pdev = adapter->pdev;
1046 unsigned long size;
1047 unsigned int i, j; 771 unsigned int i, j;
1048 772
1049 /* Free all the Rx ring sk_buffs */ 773 /* Free all the Rx ring sk_buffs */
@@ -1054,9 +778,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
1054 pci_unmap_single(pdev, buffer_info->dma, 778 pci_unmap_single(pdev, buffer_info->dma,
1055 adapter->rx_buffer_len, 779 adapter->rx_buffer_len,
1056 PCI_DMA_FROMDEVICE); 780 PCI_DMA_FROMDEVICE);
1057 else if (adapter->clean_rx == e1000_clean_rx_irq_jumbo)
1058 pci_unmap_page(pdev, buffer_info->dma,
1059 PAGE_SIZE, PCI_DMA_FROMDEVICE);
1060 else if (adapter->clean_rx == e1000_clean_rx_irq_ps) 781 else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
1061 pci_unmap_single(pdev, buffer_info->dma, 782 pci_unmap_single(pdev, buffer_info->dma,
1062 adapter->rx_ps_bsize0, 783 adapter->rx_ps_bsize0,
@@ -1064,19 +785,13 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
1064 buffer_info->dma = 0; 785 buffer_info->dma = 0;
1065 } 786 }
1066 787
1067 if (buffer_info->page) {
1068 put_page(buffer_info->page);
1069 buffer_info->page = NULL;
1070 }
1071
1072 if (buffer_info->skb) { 788 if (buffer_info->skb) {
1073 dev_kfree_skb(buffer_info->skb); 789 dev_kfree_skb(buffer_info->skb);
1074 buffer_info->skb = NULL; 790 buffer_info->skb = NULL;
1075 } 791 }
1076 792
1077 for (j = 0; j < PS_PAGE_BUFFERS; j++) { 793 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
1078 ps_page = &rx_ring->ps_pages[(i * PS_PAGE_BUFFERS) 794 ps_page = &buffer_info->ps_pages[j];
1079 + j];
1080 if (!ps_page->page) 795 if (!ps_page->page)
1081 break; 796 break;
1082 pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE, 797 pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
@@ -1093,12 +808,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
1093 rx_ring->rx_skb_top = NULL; 808 rx_ring->rx_skb_top = NULL;
1094 } 809 }
1095 810
1096 size = sizeof(struct e1000_buffer) * rx_ring->count;
1097 memset(rx_ring->buffer_info, 0, size);
1098 size = sizeof(struct e1000_ps_page)
1099 * (rx_ring->count * PS_PAGE_BUFFERS);
1100 memset(rx_ring->ps_pages, 0, size);
1101
1102 /* Zero out the descriptor ring */ 811 /* Zero out the descriptor ring */
1103 memset(rx_ring->desc, 0, rx_ring->size); 812 memset(rx_ring->desc, 0, rx_ring->size);
1104 813
@@ -1421,7 +1130,8 @@ err:
1421int e1000e_setup_rx_resources(struct e1000_adapter *adapter) 1130int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
1422{ 1131{
1423 struct e1000_ring *rx_ring = adapter->rx_ring; 1132 struct e1000_ring *rx_ring = adapter->rx_ring;
1424 int size, desc_len, err = -ENOMEM; 1133 struct e1000_buffer *buffer_info;
1134 int i, size, desc_len, err = -ENOMEM;
1425 1135
1426 size = sizeof(struct e1000_buffer) * rx_ring->count; 1136 size = sizeof(struct e1000_buffer) * rx_ring->count;
1427 rx_ring->buffer_info = vmalloc(size); 1137 rx_ring->buffer_info = vmalloc(size);
@@ -1429,11 +1139,14 @@ int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
1429 goto err; 1139 goto err;
1430 memset(rx_ring->buffer_info, 0, size); 1140 memset(rx_ring->buffer_info, 0, size);
1431 1141
1432 rx_ring->ps_pages = kcalloc(rx_ring->count * PS_PAGE_BUFFERS, 1142 for (i = 0; i < rx_ring->count; i++) {
1433 sizeof(struct e1000_ps_page), 1143 buffer_info = &rx_ring->buffer_info[i];
1434 GFP_KERNEL); 1144 buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS,
1435 if (!rx_ring->ps_pages) 1145 sizeof(struct e1000_ps_page),
1436 goto err; 1146 GFP_KERNEL);
1147 if (!buffer_info->ps_pages)
1148 goto err_pages;
1149 }
1437 1150
1438 desc_len = sizeof(union e1000_rx_desc_packet_split); 1151 desc_len = sizeof(union e1000_rx_desc_packet_split);
1439 1152
@@ -1443,16 +1156,21 @@ int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
1443 1156
1444 err = e1000_alloc_ring_dma(adapter, rx_ring); 1157 err = e1000_alloc_ring_dma(adapter, rx_ring);
1445 if (err) 1158 if (err)
1446 goto err; 1159 goto err_pages;
1447 1160
1448 rx_ring->next_to_clean = 0; 1161 rx_ring->next_to_clean = 0;
1449 rx_ring->next_to_use = 0; 1162 rx_ring->next_to_use = 0;
1450 rx_ring->rx_skb_top = NULL; 1163 rx_ring->rx_skb_top = NULL;
1451 1164
1452 return 0; 1165 return 0;
1166
1167err_pages:
1168 for (i = 0; i < rx_ring->count; i++) {
1169 buffer_info = &rx_ring->buffer_info[i];
1170 kfree(buffer_info->ps_pages);
1171 }
1453err: 1172err:
1454 vfree(rx_ring->buffer_info); 1173 vfree(rx_ring->buffer_info);
1455 kfree(rx_ring->ps_pages);
1456 ndev_err(adapter->netdev, 1174 ndev_err(adapter->netdev,
1457 "Unable to allocate memory for the transmit descriptor ring\n"); 1175 "Unable to allocate memory for the transmit descriptor ring\n");
1458 return err; 1176 return err;
@@ -1518,15 +1236,17 @@ void e1000e_free_rx_resources(struct e1000_adapter *adapter)
1518{ 1236{
1519 struct pci_dev *pdev = adapter->pdev; 1237 struct pci_dev *pdev = adapter->pdev;
1520 struct e1000_ring *rx_ring = adapter->rx_ring; 1238 struct e1000_ring *rx_ring = adapter->rx_ring;
1239 int i;
1521 1240
1522 e1000_clean_rx_ring(adapter); 1241 e1000_clean_rx_ring(adapter);
1523 1242
1243 for (i = 0; i < rx_ring->count; i++) {
1244 kfree(rx_ring->buffer_info[i].ps_pages);
1245 }
1246
1524 vfree(rx_ring->buffer_info); 1247 vfree(rx_ring->buffer_info);
1525 rx_ring->buffer_info = NULL; 1248 rx_ring->buffer_info = NULL;
1526 1249
1527 kfree(rx_ring->ps_pages);
1528 rx_ring->ps_pages = NULL;
1529
1530 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, 1250 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
1531 rx_ring->dma); 1251 rx_ring->dma);
1532 rx_ring->desc = NULL; 1252 rx_ring->desc = NULL;
@@ -2032,9 +1752,11 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2032 1752
2033 ew32(RFCTL, rfctl); 1753 ew32(RFCTL, rfctl);
2034 1754
2035 /* disable the stripping of CRC because it breaks 1755 /* Enable Packet split descriptors */
2036 * BMC firmware connected over SMBUS */ 1756 rctl |= E1000_RCTL_DTYP_PS;
2037 rctl |= E1000_RCTL_DTYP_PS /* | E1000_RCTL_SECRC */; 1757
1758 /* Enable hardware CRC frame stripping */
1759 rctl |= E1000_RCTL_SECRC;
2038 1760
2039 psrctl |= adapter->rx_ps_bsize0 >> 1761 psrctl |= adapter->rx_ps_bsize0 >>
2040 E1000_PSRCTL_BSIZE0_SHIFT; 1762 E1000_PSRCTL_BSIZE0_SHIFT;
@@ -2077,11 +1799,6 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
2077 sizeof(union e1000_rx_desc_packet_split); 1799 sizeof(union e1000_rx_desc_packet_split);
2078 adapter->clean_rx = e1000_clean_rx_irq_ps; 1800 adapter->clean_rx = e1000_clean_rx_irq_ps;
2079 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; 1801 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
2080 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + VLAN_HLEN + 4) {
2081 rdlen = rx_ring->count *
2082 sizeof(struct e1000_rx_desc);
2083 adapter->clean_rx = e1000_clean_rx_irq_jumbo;
2084 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_jumbo;
2085 } else { 1802 } else {
2086 rdlen = rx_ring->count * 1803 rdlen = rx_ring->count *
2087 sizeof(struct e1000_rx_desc); 1804 sizeof(struct e1000_rx_desc);
@@ -2326,8 +2043,11 @@ void e1000e_reset(struct e1000_adapter *adapter)
2326 struct e1000_mac_info *mac = &adapter->hw.mac; 2043 struct e1000_mac_info *mac = &adapter->hw.mac;
2327 struct e1000_hw *hw = &adapter->hw; 2044 struct e1000_hw *hw = &adapter->hw;
2328 u32 tx_space, min_tx_space, min_rx_space; 2045 u32 tx_space, min_tx_space, min_rx_space;
2046 u32 pba;
2329 u16 hwm; 2047 u16 hwm;
2330 2048
2049 ew32(PBA, adapter->pba);
2050
2331 if (mac->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN ) { 2051 if (mac->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN ) {
2332 /* To maintain wire speed transmits, the Tx FIFO should be 2052 /* To maintain wire speed transmits, the Tx FIFO should be
2333 * large enough to accommodate two full transmit packets, 2053 * large enough to accommodate two full transmit packets,
@@ -2335,11 +2055,11 @@ void e1000e_reset(struct e1000_adapter *adapter)
2335 * the Rx FIFO should be large enough to accommodate at least 2055 * the Rx FIFO should be large enough to accommodate at least
2336 * one full receive packet and is similarly rounded up and 2056 * one full receive packet and is similarly rounded up and
2337 * expressed in KB. */ 2057 * expressed in KB. */
2338 adapter->pba = er32(PBA); 2058 pba = er32(PBA);
2339 /* upper 16 bits has Tx packet buffer allocation size in KB */ 2059 /* upper 16 bits has Tx packet buffer allocation size in KB */
2340 tx_space = adapter->pba >> 16; 2060 tx_space = pba >> 16;
2341 /* lower 16 bits has Rx packet buffer allocation size in KB */ 2061 /* lower 16 bits has Rx packet buffer allocation size in KB */
2342 adapter->pba &= 0xffff; 2062 pba &= 0xffff;
2343 /* the tx fifo also stores 16 bytes of information about the tx 2063 /* the tx fifo also stores 16 bytes of information about the tx
2344 * but don't include ethernet FCS because hardware appends it */ 2064 * but don't include ethernet FCS because hardware appends it */
2345 min_tx_space = (mac->max_frame_size + 2065 min_tx_space = (mac->max_frame_size +
@@ -2355,20 +2075,21 @@ void e1000e_reset(struct e1000_adapter *adapter)
2355 /* If current Tx allocation is less than the min Tx FIFO size, 2075 /* If current Tx allocation is less than the min Tx FIFO size,
2356 * and the min Tx FIFO size is less than the current Rx FIFO 2076 * and the min Tx FIFO size is less than the current Rx FIFO
2357 * allocation, take space away from current Rx allocation */ 2077 * allocation, take space away from current Rx allocation */
2358 if (tx_space < min_tx_space && 2078 if ((tx_space < min_tx_space) &&
2359 ((min_tx_space - tx_space) < adapter->pba)) { 2079 ((min_tx_space - tx_space) < pba)) {
2360 adapter->pba -= - (min_tx_space - tx_space); 2080 pba -= min_tx_space - tx_space;
2361 2081
2362 /* if short on rx space, rx wins and must trump tx 2082 /* if short on rx space, rx wins and must trump tx
2363 * adjustment or use Early Receive if available */ 2083 * adjustment or use Early Receive if available */
2364 if ((adapter->pba < min_rx_space) && 2084 if ((pba < min_rx_space) &&
2365 (!(adapter->flags & FLAG_HAS_ERT))) 2085 (!(adapter->flags & FLAG_HAS_ERT)))
2366 /* ERT enabled in e1000_configure_rx */ 2086 /* ERT enabled in e1000_configure_rx */
2367 adapter->pba = min_rx_space; 2087 pba = min_rx_space;
2368 } 2088 }
2089
2090 ew32(PBA, pba);
2369 } 2091 }
2370 2092
2371 ew32(PBA, adapter->pba);
2372 2093
2373 /* flow control settings */ 2094 /* flow control settings */
2374 /* The high water mark must be low enough to fit one full frame 2095 /* The high water mark must be low enough to fit one full frame
@@ -3624,9 +3345,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3624 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 3345 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3625 * means we reserve 2 more, this pushes us to allocate from the next 3346 * means we reserve 2 more, this pushes us to allocate from the next
3626 * larger slab size. 3347 * larger slab size.
3627 * i.e. RXBUFFER_2048 --> size-4096 slab 3348 * i.e. RXBUFFER_2048 --> size-4096 slab */
3628 * however with the new *_jumbo* routines, jumbo receives will use
3629 * fragmented skbs */
3630 3349
3631 if (max_frame <= 256) 3350 if (max_frame <= 256)
3632 adapter->rx_buffer_len = 256; 3351 adapter->rx_buffer_len = 256;