aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe/ixgbe_main.c
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2010-11-16 22:26:48 -0500
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2010-11-16 22:26:48 -0500
commitd5f398ed73522b9f76861af6553775c5851de0d0 (patch)
tree63bc695c221d15332c4ff9ec69f2a6e66c903563 /drivers/net/ixgbe/ixgbe_main.c
parent8ad494b0e59950e2b4e587c32cb67a2452795ea0 (diff)
ixgbe: cleanup ixgbe_alloc_rx_buffers
This change re-orders alloc_rx_buffers to make better use of the packet split enabled flag. The new setup should require less branching in the code since now we are down to fewer if statements since we either are handling packet split or aren't. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Ross Brattain <ross.b.brattain@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_main.c')
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c81
1 files changed, 42 insertions, 39 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 480f0b0f038a..e838479d2d95 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -1010,63 +1010,70 @@ static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
1010 **/ 1010 **/
1011void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, 1011void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
1012 struct ixgbe_ring *rx_ring, 1012 struct ixgbe_ring *rx_ring,
1013 int cleaned_count) 1013 u16 cleaned_count)
1014{ 1014{
1015 struct net_device *netdev = adapter->netdev;
1016 struct pci_dev *pdev = adapter->pdev; 1015 struct pci_dev *pdev = adapter->pdev;
1017 union ixgbe_adv_rx_desc *rx_desc; 1016 union ixgbe_adv_rx_desc *rx_desc;
1018 struct ixgbe_rx_buffer *bi; 1017 struct ixgbe_rx_buffer *bi;
1019 unsigned int i; 1018 struct sk_buff *skb;
1020 unsigned int bufsz = rx_ring->rx_buf_len; 1019 u16 i = rx_ring->next_to_use;
1021
1022 i = rx_ring->next_to_use;
1023 bi = &rx_ring->rx_buffer_info[i];
1024 1020
1025 while (cleaned_count--) { 1021 while (cleaned_count--) {
1026 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); 1022 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
1023 bi = &rx_ring->rx_buffer_info[i];
1024 skb = bi->skb;
1027 1025
1028 if (!bi->page_dma && 1026 if (!skb) {
1029 (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) { 1027 skb = netdev_alloc_skb_ip_align(adapter->netdev,
1030 if (!bi->page) { 1028 rx_ring->rx_buf_len);
1031 bi->page = netdev_alloc_page(netdev);
1032 if (!bi->page) {
1033 adapter->alloc_rx_page_failed++;
1034 goto no_buffers;
1035 }
1036 bi->page_offset = 0;
1037 } else {
1038 /* use a half page if we're re-using */
1039 bi->page_offset ^= (PAGE_SIZE / 2);
1040 }
1041
1042 bi->page_dma = dma_map_page(&pdev->dev, bi->page,
1043 bi->page_offset,
1044 (PAGE_SIZE / 2),
1045 DMA_FROM_DEVICE);
1046 }
1047
1048 if (!bi->skb) {
1049 struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev,
1050 bufsz);
1051 bi->skb = skb;
1052
1053 if (!skb) { 1029 if (!skb) {
1054 adapter->alloc_rx_buff_failed++; 1030 adapter->alloc_rx_buff_failed++;
1055 goto no_buffers; 1031 goto no_buffers;
1056 } 1032 }
1057 /* initialize queue mapping */ 1033 /* initialize queue mapping */
1058 skb_record_rx_queue(skb, rx_ring->queue_index); 1034 skb_record_rx_queue(skb, rx_ring->queue_index);
1035 bi->skb = skb;
1059 } 1036 }
1060 1037
1061 if (!bi->dma) { 1038 if (!bi->dma) {
1062 bi->dma = dma_map_single(&pdev->dev, 1039 bi->dma = dma_map_single(&pdev->dev,
1063 bi->skb->data, 1040 skb->data,
1064 rx_ring->rx_buf_len, 1041 rx_ring->rx_buf_len,
1065 DMA_FROM_DEVICE); 1042 DMA_FROM_DEVICE);
1043 if (dma_mapping_error(&pdev->dev, bi->dma)) {
1044 adapter->alloc_rx_buff_failed++;
1045 bi->dma = 0;
1046 goto no_buffers;
1047 }
1066 } 1048 }
1067 /* Refresh the desc even if buffer_addrs didn't change because 1049
1068 * each write-back erases this info. */
1069 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { 1050 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
1051 if (!bi->page) {
1052 bi->page = netdev_alloc_page(adapter->netdev);
1053 if (!bi->page) {
1054 adapter->alloc_rx_page_failed++;
1055 goto no_buffers;
1056 }
1057 }
1058
1059 if (!bi->page_dma) {
1060 /* use a half page if we're re-using */
1061 bi->page_offset ^= PAGE_SIZE / 2;
1062 bi->page_dma = dma_map_page(&pdev->dev,
1063 bi->page,
1064 bi->page_offset,
1065 PAGE_SIZE / 2,
1066 DMA_FROM_DEVICE);
1067 if (dma_mapping_error(&pdev->dev,
1068 bi->page_dma)) {
1069 adapter->alloc_rx_page_failed++;
1070 bi->page_dma = 0;
1071 goto no_buffers;
1072 }
1073 }
1074
1075 /* Refresh the desc even if buffer_addrs didn't change
1076 * because each write-back erases this info. */
1070 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); 1077 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
1071 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); 1078 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
1072 } else { 1079 } else {
@@ -1077,15 +1084,11 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
1077 i++; 1084 i++;
1078 if (i == rx_ring->count) 1085 if (i == rx_ring->count)
1079 i = 0; 1086 i = 0;
1080 bi = &rx_ring->rx_buffer_info[i];
1081 } 1087 }
1082 1088
1083no_buffers: 1089no_buffers:
1084 if (rx_ring->next_to_use != i) { 1090 if (rx_ring->next_to_use != i) {
1085 rx_ring->next_to_use = i; 1091 rx_ring->next_to_use = i;
1086 if (i-- == 0)
1087 i = (rx_ring->count - 1);
1088
1089 ixgbe_release_rx_desc(&adapter->hw, rx_ring, i); 1092 ixgbe_release_rx_desc(&adapter->hw, rx_ring, i);
1090 } 1093 }
1091} 1094}