aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/e1000e
diff options
context:
space:
mode:
authorAuke Kok <auke-jan.h.kok@intel.com>2007-10-25 16:58:03 -0400
committerJeff Garzik <jeff@garzik.org>2007-10-29 05:47:08 -0400
commitf920c186be09718542dfa77f6ebe1814be7d35cb (patch)
treef6409a308a8cc379964e7459e86f49da0f26b31d /drivers/net/e1000e
parent140a74802894e9db57e5cd77ccff77e590ece5f3 (diff)
e1000e: Remove legacy jumbo frame receive code
The legacy jumbo frame receive code is no longer needed since all hardware can do packet split and we're no longer offering a bypass kernel config option to disable packet split. Remove the unused code. Signed-off-by: Auke Kok <auke-jan.h.kok@intel.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/e1000e')
-rw-r--r--drivers/net/e1000e/e1000.h1
-rw-r--r--drivers/net/e1000e/netdev.c282
2 files changed, 1 insertions, 282 deletions
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 811eada595a1..473f78de4be0 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -122,7 +122,6 @@ struct e1000_buffer {
122 u16 next_to_watch; 122 u16 next_to_watch;
123 }; 123 };
124 /* RX */ 124 /* RX */
125 struct page *page;
126 /* arrays of page information for packet split */ 125 /* arrays of page information for packet split */
127 struct e1000_ps_page *ps_pages; 126 struct e1000_ps_page *ps_pages;
128 }; 127 };
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 03fcc70e0198..4fd2e23720b6 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -333,94 +333,6 @@ no_buffers:
333} 333}
334 334
335/** 335/**
336 * e1000_alloc_rx_buffers_jumbo - Replace used jumbo receive buffers
337 *
338 * @adapter: address of board private structure
339 * @cleaned_count: number of buffers to allocate this pass
340 **/
341static void e1000_alloc_rx_buffers_jumbo(struct e1000_adapter *adapter,
342 int cleaned_count)
343{
344 struct net_device *netdev = adapter->netdev;
345 struct pci_dev *pdev = adapter->pdev;
346 struct e1000_ring *rx_ring = adapter->rx_ring;
347 struct e1000_rx_desc *rx_desc;
348 struct e1000_buffer *buffer_info;
349 struct sk_buff *skb;
350 unsigned int i;
351 unsigned int bufsz = 256 -
352 16 /*for skb_reserve */ -
353 NET_IP_ALIGN;
354
355 i = rx_ring->next_to_use;
356 buffer_info = &rx_ring->buffer_info[i];
357
358 while (cleaned_count--) {
359 skb = buffer_info->skb;
360 if (skb) {
361 skb_trim(skb, 0);
362 goto check_page;
363 }
364
365 skb = netdev_alloc_skb(netdev, bufsz);
366 if (!skb) {
367 /* Better luck next round */
368 adapter->alloc_rx_buff_failed++;
369 break;
370 }
371
372 /* Make buffer alignment 2 beyond a 16 byte boundary
373 * this will result in a 16 byte aligned IP header after
374 * the 14 byte MAC header is removed
375 */
376 skb_reserve(skb, NET_IP_ALIGN);
377
378 buffer_info->skb = skb;
379check_page:
380 /* allocate a new page if necessary */
381 if (!buffer_info->page) {
382 buffer_info->page = alloc_page(GFP_ATOMIC);
383 if (!buffer_info->page) {
384 adapter->alloc_rx_buff_failed++;
385 break;
386 }
387 }
388
389 if (!buffer_info->dma)
390 buffer_info->dma = pci_map_page(pdev,
391 buffer_info->page, 0,
392 PAGE_SIZE,
393 PCI_DMA_FROMDEVICE);
394 if (pci_dma_mapping_error(buffer_info->dma)) {
395 dev_err(&adapter->pdev->dev, "RX DMA page map failed\n");
396 adapter->rx_dma_failed++;
397 break;
398 }
399
400 rx_desc = E1000_RX_DESC(*rx_ring, i);
401 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
402
403 i++;
404 if (i == rx_ring->count)
405 i = 0;
406 buffer_info = &rx_ring->buffer_info[i];
407 }
408
409 if (rx_ring->next_to_use != i) {
410 rx_ring->next_to_use = i;
411 if (i-- == 0)
412 i = (rx_ring->count - 1);
413
414 /* Force memory writes to complete before letting h/w
415 * know there are new descriptors to fetch. (Only
416 * applicable for weak-ordered memory model archs,
417 * such as IA-64). */
418 wmb();
419 writel(i, adapter->hw.hw_addr + rx_ring->tail);
420 }
421}
422
423/**
424 * e1000_clean_rx_irq - Send received data up the network stack; legacy 336 * e1000_clean_rx_irq - Send received data up the network stack; legacy
425 * @adapter: board private structure 337 * @adapter: board private structure
426 * 338 *
@@ -549,15 +461,6 @@ next_desc:
549 return cleaned; 461 return cleaned;
550} 462}
551 463
552static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
553 u16 length)
554{
555 bi->page = NULL;
556 skb->len += length;
557 skb->data_len += length;
558 skb->truesize += length;
559}
560
561static void e1000_put_txbuf(struct e1000_adapter *adapter, 464static void e1000_put_txbuf(struct e1000_adapter *adapter,
562 struct e1000_buffer *buffer_info) 465 struct e1000_buffer *buffer_info)
563{ 466{
@@ -694,174 +597,6 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
694} 597}
695 598
696/** 599/**
697 * e1000_clean_rx_irq_jumbo - Send received data up the network stack; legacy
698 * @adapter: board private structure
699 *
700 * the return value indicates whether actual cleaning was done, there
701 * is no guarantee that everything was cleaned
702 **/
703static bool e1000_clean_rx_irq_jumbo(struct e1000_adapter *adapter,
704 int *work_done, int work_to_do)
705{
706 struct net_device *netdev = adapter->netdev;
707 struct pci_dev *pdev = adapter->pdev;
708 struct e1000_ring *rx_ring = adapter->rx_ring;
709 struct e1000_rx_desc *rx_desc, *next_rxd;
710 struct e1000_buffer *buffer_info, *next_buffer;
711 u32 length;
712 unsigned int i;
713 int cleaned_count = 0;
714 bool cleaned = 0;
715 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
716
717 i = rx_ring->next_to_clean;
718 rx_desc = E1000_RX_DESC(*rx_ring, i);
719 buffer_info = &rx_ring->buffer_info[i];
720
721 while (rx_desc->status & E1000_RXD_STAT_DD) {
722 struct sk_buff *skb;
723 u8 status;
724
725 if (*work_done >= work_to_do)
726 break;
727 (*work_done)++;
728
729 status = rx_desc->status;
730 skb = buffer_info->skb;
731 buffer_info->skb = NULL;
732
733 i++;
734 if (i == rx_ring->count)
735 i = 0;
736 next_rxd = E1000_RX_DESC(*rx_ring, i);
737 prefetch(next_rxd);
738
739 next_buffer = &rx_ring->buffer_info[i];
740
741 cleaned = 1;
742 cleaned_count++;
743 pci_unmap_page(pdev,
744 buffer_info->dma,
745 PAGE_SIZE,
746 PCI_DMA_FROMDEVICE);
747 buffer_info->dma = 0;
748
749 length = le16_to_cpu(rx_desc->length);
750
751 /* errors is only valid for DD + EOP descriptors */
752 if ((status & E1000_RXD_STAT_EOP) &&
753 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
754 /* recycle both page and skb */
755 buffer_info->skb = skb;
756 /* an error means any chain goes out the window too */
757 if (rx_ring->rx_skb_top)
758 dev_kfree_skb(rx_ring->rx_skb_top);
759 rx_ring->rx_skb_top = NULL;
760 goto next_desc;
761 }
762
763#define rxtop rx_ring->rx_skb_top
764 if (!(status & E1000_RXD_STAT_EOP)) {
765 /* this descriptor is only the beginning (or middle) */
766 if (!rxtop) {
767 /* this is the beginning of a chain */
768 rxtop = skb;
769 skb_fill_page_desc(rxtop, 0, buffer_info->page,
770 0, length);
771 } else {
772 /* this is the middle of a chain */
773 skb_fill_page_desc(rxtop,
774 skb_shinfo(rxtop)->nr_frags,
775 buffer_info->page, 0,
776 length);
777 /* re-use the skb, only consumed the page */
778 buffer_info->skb = skb;
779 }
780 e1000_consume_page(buffer_info, rxtop, length);
781 goto next_desc;
782 } else {
783 if (rxtop) {
784 /* end of the chain */
785 skb_fill_page_desc(rxtop,
786 skb_shinfo(rxtop)->nr_frags,
787 buffer_info->page, 0, length);
788 /* re-use the current skb, we only consumed the
789 * page */
790 buffer_info->skb = skb;
791 skb = rxtop;
792 rxtop = NULL;
793 e1000_consume_page(buffer_info, skb, length);
794 } else {
795 /* no chain, got EOP, this buf is the packet
796 * copybreak to save the put_page/alloc_page */
797 if (length <= copybreak &&
798 skb_tailroom(skb) >= length) {
799 u8 *vaddr;
800 vaddr = kmap_atomic(buffer_info->page,
801 KM_SKB_DATA_SOFTIRQ);
802 memcpy(skb_tail_pointer(skb),
803 vaddr, length);
804 kunmap_atomic(vaddr,
805 KM_SKB_DATA_SOFTIRQ);
806 /* re-use the page, so don't erase
807 * buffer_info->page */
808 skb_put(skb, length);
809 } else {
810 skb_fill_page_desc(skb, 0,
811 buffer_info->page, 0,
812 length);
813 e1000_consume_page(buffer_info, skb,
814 length);
815 }
816 }
817 }
818
819 /* Receive Checksum Offload XXX recompute due to CRC strip? */
820 e1000_rx_checksum(adapter,
821 (u32)(status) |
822 ((u32)(rx_desc->errors) << 24),
823 le16_to_cpu(rx_desc->csum), skb);
824
825 pskb_trim(skb, skb->len - 4);
826
827 /* probably a little skewed due to removing CRC */
828 total_rx_bytes += skb->len;
829 total_rx_packets++;
830
831 /* eth type trans needs skb->data to point to something */
832 if (!pskb_may_pull(skb, ETH_HLEN)) {
833 ndev_err(netdev, "__pskb_pull_tail failed.\n");
834 dev_kfree_skb(skb);
835 goto next_desc;
836 }
837
838 e1000_receive_skb(adapter, netdev, skb,status,rx_desc->special);
839
840next_desc:
841 rx_desc->status = 0;
842
843 /* return some buffers to hardware, one at a time is too slow */
844 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
845 adapter->alloc_rx_buf(adapter, cleaned_count);
846 cleaned_count = 0;
847 }
848
849 /* use prefetched values */
850 rx_desc = next_rxd;
851 buffer_info = next_buffer;
852 }
853 rx_ring->next_to_clean = i;
854
855 cleaned_count = e1000_desc_unused(rx_ring);
856 if (cleaned_count)
857 adapter->alloc_rx_buf(adapter, cleaned_count);
858
859 adapter->total_rx_packets += total_rx_packets;
860 adapter->total_rx_bytes += total_rx_bytes;
861 return cleaned;
862}
863
864/**
865 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split 600 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
866 * @adapter: board private structure 601 * @adapter: board private structure
867 * 602 *
@@ -1043,9 +778,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
1043 pci_unmap_single(pdev, buffer_info->dma, 778 pci_unmap_single(pdev, buffer_info->dma,
1044 adapter->rx_buffer_len, 779 adapter->rx_buffer_len,
1045 PCI_DMA_FROMDEVICE); 780 PCI_DMA_FROMDEVICE);
1046 else if (adapter->clean_rx == e1000_clean_rx_irq_jumbo)
1047 pci_unmap_page(pdev, buffer_info->dma,
1048 PAGE_SIZE, PCI_DMA_FROMDEVICE);
1049 else if (adapter->clean_rx == e1000_clean_rx_irq_ps) 781 else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
1050 pci_unmap_single(pdev, buffer_info->dma, 782 pci_unmap_single(pdev, buffer_info->dma,
1051 adapter->rx_ps_bsize0, 783 adapter->rx_ps_bsize0,
@@ -1053,11 +785,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
1053 buffer_info->dma = 0; 785 buffer_info->dma = 0;
1054 } 786 }
1055 787
1056 if (buffer_info->page) {
1057 put_page(buffer_info->page);
1058 buffer_info->page = NULL;
1059 }
1060
1061 if (buffer_info->skb) { 788 if (buffer_info->skb) {
1062 dev_kfree_skb(buffer_info->skb); 789 dev_kfree_skb(buffer_info->skb);
1063 buffer_info->skb = NULL; 790 buffer_info->skb = NULL;
@@ -2072,11 +1799,6 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
2072 sizeof(union e1000_rx_desc_packet_split); 1799 sizeof(union e1000_rx_desc_packet_split);
2073 adapter->clean_rx = e1000_clean_rx_irq_ps; 1800 adapter->clean_rx = e1000_clean_rx_irq_ps;
2074 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; 1801 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
2075 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + VLAN_HLEN + 4) {
2076 rdlen = rx_ring->count *
2077 sizeof(struct e1000_rx_desc);
2078 adapter->clean_rx = e1000_clean_rx_irq_jumbo;
2079 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_jumbo;
2080 } else { 1802 } else {
2081 rdlen = rx_ring->count * 1803 rdlen = rx_ring->count *
2082 sizeof(struct e1000_rx_desc); 1804 sizeof(struct e1000_rx_desc);
@@ -3623,9 +3345,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3623 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 3345 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3624 * means we reserve 2 more, this pushes us to allocate from the next 3346 * means we reserve 2 more, this pushes us to allocate from the next
3625 * larger slab size. 3347 * larger slab size.
3626 * i.e. RXBUFFER_2048 --> size-4096 slab 3348 * i.e. RXBUFFER_2048 --> size-4096 slab */
3627 * however with the new *_jumbo* routines, jumbo receives will use
3628 * fragmented skbs */
3629 3349
3630 if (max_frame <= 256) 3350 if (max_frame <= 256)
3631 adapter->rx_buffer_len = 256; 3351 adapter->rx_buffer_len = 256;