diff options
-rw-r--r-- | drivers/net/ixgb/ixgb.h | 1 | ||||
-rw-r--r-- | drivers/net/ixgb/ixgb_main.c | 74 |
2 files changed, 45 insertions, 30 deletions
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h index b9c37fdc8418..bdbaf5acccee 100644 --- a/drivers/net/ixgb/ixgb.h +++ b/drivers/net/ixgb/ixgb.h | |||
@@ -200,6 +200,7 @@ struct ixgb_adapter { | |||
200 | struct ixgb_hw hw; | 200 | struct ixgb_hw hw; |
201 | u16 msg_enable; | 201 | u16 msg_enable; |
202 | struct ixgb_hw_stats stats; | 202 | struct ixgb_hw_stats stats; |
203 | uint32_t alloc_rx_buff_failed; | ||
203 | #ifdef CONFIG_PCI_MSI | 204 | #ifdef CONFIG_PCI_MSI |
204 | boolean_t have_msi; | 205 | boolean_t have_msi; |
205 | #endif | 206 | #endif |
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index 0905a8240939..27034b3ba80b 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c | |||
@@ -929,17 +929,20 @@ ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter, | |||
929 | struct ixgb_buffer *buffer_info) | 929 | struct ixgb_buffer *buffer_info) |
930 | { | 930 | { |
931 | struct pci_dev *pdev = adapter->pdev; | 931 | struct pci_dev *pdev = adapter->pdev; |
932 | if(buffer_info->dma) { | 932 | |
933 | pci_unmap_page(pdev, | 933 | if (buffer_info->dma) |
934 | buffer_info->dma, | 934 | pci_unmap_page(pdev, buffer_info->dma, buffer_info->length, |
935 | buffer_info->length, | 935 | PCI_DMA_TODEVICE); |
936 | PCI_DMA_TODEVICE); | 936 | |
937 | buffer_info->dma = 0; | 937 | if (buffer_info->skb) |
938 | } | ||
939 | if(buffer_info->skb) { | ||
940 | dev_kfree_skb_any(buffer_info->skb); | 938 | dev_kfree_skb_any(buffer_info->skb); |
941 | buffer_info->skb = NULL; | 939 | |
942 | } | 940 | buffer_info->skb = NULL; |
941 | buffer_info->dma = 0; | ||
942 | buffer_info->time_stamp = 0; | ||
943 | /* these fields must always be initialized in tx | ||
944 | * buffer_info->length = 0; | ||
945 | * buffer_info->next_to_watch = 0; */ | ||
943 | } | 946 | } |
944 | 947 | ||
945 | /** | 948 | /** |
@@ -1314,6 +1317,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb, | |||
1314 | size, | 1317 | size, |
1315 | PCI_DMA_TODEVICE); | 1318 | PCI_DMA_TODEVICE); |
1316 | buffer_info->time_stamp = jiffies; | 1319 | buffer_info->time_stamp = jiffies; |
1320 | buffer_info->next_to_watch = 0; | ||
1317 | 1321 | ||
1318 | len -= size; | 1322 | len -= size; |
1319 | offset += size; | 1323 | offset += size; |
@@ -1345,6 +1349,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb, | |||
1345 | size, | 1349 | size, |
1346 | PCI_DMA_TODEVICE); | 1350 | PCI_DMA_TODEVICE); |
1347 | buffer_info->time_stamp = jiffies; | 1351 | buffer_info->time_stamp = jiffies; |
1352 | buffer_info->next_to_watch = 0; | ||
1348 | 1353 | ||
1349 | len -= size; | 1354 | len -= size; |
1350 | offset += size; | 1355 | offset += size; |
@@ -1940,6 +1945,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter) | |||
1940 | #endif | 1945 | #endif |
1941 | status = rx_desc->status; | 1946 | status = rx_desc->status; |
1942 | skb = buffer_info->skb; | 1947 | skb = buffer_info->skb; |
1948 | buffer_info->skb = NULL; | ||
1943 | 1949 | ||
1944 | prefetch(skb->data); | 1950 | prefetch(skb->data); |
1945 | 1951 | ||
@@ -2013,7 +2019,6 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter) | |||
2013 | rxdesc_done: | 2019 | rxdesc_done: |
2014 | /* clean up descriptor, might be written over by hw */ | 2020 | /* clean up descriptor, might be written over by hw */ |
2015 | rx_desc->status = 0; | 2021 | rx_desc->status = 0; |
2016 | buffer_info->skb = NULL; | ||
2017 | 2022 | ||
2018 | /* use prefetched values */ | 2023 | /* use prefetched values */ |
2019 | rx_desc = next_rxd; | 2024 | rx_desc = next_rxd; |
@@ -2053,12 +2058,18 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter) | |||
2053 | 2058 | ||
2054 | /* leave three descriptors unused */ | 2059 | /* leave three descriptors unused */ |
2055 | while(--cleancount > 2) { | 2060 | while(--cleancount > 2) { |
2056 | rx_desc = IXGB_RX_DESC(*rx_ring, i); | 2061 | /* recycle! its good for you */ |
2057 | 2062 | if (!(skb = buffer_info->skb)) | |
2058 | skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN); | 2063 | skb = dev_alloc_skb(adapter->rx_buffer_len |
2064 | + NET_IP_ALIGN); | ||
2065 | else { | ||
2066 | skb_trim(skb, 0); | ||
2067 | goto map_skb; | ||
2068 | } | ||
2059 | 2069 | ||
2060 | if(unlikely(!skb)) { | 2070 | if (unlikely(!skb)) { |
2061 | /* Better luck next round */ | 2071 | /* Better luck next round */ |
2072 | adapter->alloc_rx_buff_failed++; | ||
2062 | break; | 2073 | break; |
2063 | } | 2074 | } |
2064 | 2075 | ||
@@ -2072,33 +2083,36 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter) | |||
2072 | 2083 | ||
2073 | buffer_info->skb = skb; | 2084 | buffer_info->skb = skb; |
2074 | buffer_info->length = adapter->rx_buffer_len; | 2085 | buffer_info->length = adapter->rx_buffer_len; |
2075 | buffer_info->dma = | 2086 | map_skb: |
2076 | pci_map_single(pdev, | 2087 | buffer_info->dma = pci_map_single(pdev, |
2077 | skb->data, | 2088 | skb->data, |
2078 | adapter->rx_buffer_len, | 2089 | adapter->rx_buffer_len, |
2079 | PCI_DMA_FROMDEVICE); | 2090 | PCI_DMA_FROMDEVICE); |
2080 | 2091 | ||
2092 | rx_desc = IXGB_RX_DESC(*rx_ring, i); | ||
2081 | rx_desc->buff_addr = cpu_to_le64(buffer_info->dma); | 2093 | rx_desc->buff_addr = cpu_to_le64(buffer_info->dma); |
2082 | /* guarantee DD bit not set now before h/w gets descriptor | 2094 | /* guarantee DD bit not set now before h/w gets descriptor |
2083 | * this is the rest of the workaround for h/w double | 2095 | * this is the rest of the workaround for h/w double |
2084 | * writeback. */ | 2096 | * writeback. */ |
2085 | rx_desc->status = 0; | 2097 | rx_desc->status = 0; |
2086 | 2098 | ||
2087 | if((i & ~(num_group_tail_writes- 1)) == i) { | ||
2088 | /* Force memory writes to complete before letting h/w | ||
2089 | * know there are new descriptors to fetch. (Only | ||
2090 | * applicable for weak-ordered memory model archs, | ||
2091 | * such as IA-64). */ | ||
2092 | wmb(); | ||
2093 | |||
2094 | IXGB_WRITE_REG(&adapter->hw, RDT, i); | ||
2095 | } | ||
2096 | 2099 | ||
2097 | if(++i == rx_ring->count) i = 0; | 2100 | if(++i == rx_ring->count) i = 0; |
2098 | buffer_info = &rx_ring->buffer_info[i]; | 2101 | buffer_info = &rx_ring->buffer_info[i]; |
2099 | } | 2102 | } |
2100 | 2103 | ||
2101 | rx_ring->next_to_use = i; | 2104 | if (likely(rx_ring->next_to_use != i)) { |
2105 | rx_ring->next_to_use = i; | ||
2106 | if (unlikely(i-- == 0)) | ||
2107 | i = (rx_ring->count - 1); | ||
2108 | |||
2109 | /* Force memory writes to complete before letting h/w | ||
2110 | * know there are new descriptors to fetch. (Only | ||
2111 | * applicable for weak-ordered memory model archs, such | ||
2112 | * as IA-64). */ | ||
2113 | wmb(); | ||
2114 | IXGB_WRITE_REG(&adapter->hw, RDT, i); | ||
2115 | } | ||
2102 | } | 2116 | } |
2103 | 2117 | ||
2104 | /** | 2118 | /** |