diff options
author | Jesse Brandeburg <jesse.brandeburg@intel.com> | 2008-08-26 07:27:13 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@redhat.com> | 2008-09-03 10:03:33 -0400 |
commit | e01c31a5f7eb4f8a147cf6205f0f2ef11146068d (patch) | |
tree | 1501c609be3f7931341f42d8edb99180f96c5be0 /drivers/net/ixgbe/ixgbe_main.c | |
parent | 2b9ade935cd2be6db26f5445656950bc3da7055d (diff) |
ixgbe: Implement Tx Head Writeback
Enable Tx Head Writeback in the hardware. This helps performance by
removing adapter writebacks to descriptors on transmit completion.
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_main.c')
-rw-r--r-- | drivers/net/ixgbe/ixgbe_main.c | 198 |
1 files changed, 113 insertions, 85 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index cba7a38bf6bb..95d00416093c 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -148,8 +148,7 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, | |||
148 | *tx_buffer_info) | 148 | *tx_buffer_info) |
149 | { | 149 | { |
150 | if (tx_buffer_info->dma) { | 150 | if (tx_buffer_info->dma) { |
151 | pci_unmap_page(adapter->pdev, | 151 | pci_unmap_page(adapter->pdev, tx_buffer_info->dma, |
152 | tx_buffer_info->dma, | ||
153 | tx_buffer_info->length, PCI_DMA_TODEVICE); | 152 | tx_buffer_info->length, PCI_DMA_TODEVICE); |
154 | tx_buffer_info->dma = 0; | 153 | tx_buffer_info->dma = 0; |
155 | } | 154 | } |
@@ -162,32 +161,35 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, | |||
162 | 161 | ||
163 | static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, | 162 | static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, |
164 | struct ixgbe_ring *tx_ring, | 163 | struct ixgbe_ring *tx_ring, |
165 | unsigned int eop, | 164 | unsigned int eop) |
166 | union ixgbe_adv_tx_desc *eop_desc) | ||
167 | { | 165 | { |
166 | struct ixgbe_hw *hw = &adapter->hw; | ||
167 | u32 head, tail; | ||
168 | |||
168 | /* Detect a transmit hang in hardware, this serializes the | 169 | /* Detect a transmit hang in hardware, this serializes the |
169 | * check with the clearing of time_stamp and movement of i */ | 170 | * check with the clearing of time_stamp and movement of eop */ |
171 | head = IXGBE_READ_REG(hw, tx_ring->head); | ||
172 | tail = IXGBE_READ_REG(hw, tx_ring->tail); | ||
170 | adapter->detect_tx_hung = false; | 173 | adapter->detect_tx_hung = false; |
171 | if (tx_ring->tx_buffer_info[eop].dma && | 174 | if ((head != tail) && |
175 | tx_ring->tx_buffer_info[eop].time_stamp && | ||
172 | time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) && | 176 | time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) && |
173 | !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) { | 177 | !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) { |
174 | /* detected Tx unit hang */ | 178 | /* detected Tx unit hang */ |
179 | union ixgbe_adv_tx_desc *tx_desc; | ||
180 | tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); | ||
175 | DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n" | 181 | DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n" |
176 | " TDH <%x>\n" | 182 | " Tx Queue <%d>\n" |
177 | " TDT <%x>\n" | 183 | " TDH, TDT <%x>, <%x>\n" |
178 | " next_to_use <%x>\n" | 184 | " next_to_use <%x>\n" |
179 | " next_to_clean <%x>\n" | 185 | " next_to_clean <%x>\n" |
180 | "tx_buffer_info[next_to_clean]\n" | 186 | "tx_buffer_info[next_to_clean]\n" |
181 | " time_stamp <%lx>\n" | 187 | " time_stamp <%lx>\n" |
182 | " next_to_watch <%x>\n" | 188 | " jiffies <%lx>\n", |
183 | " jiffies <%lx>\n" | 189 | tx_ring->queue_index, |
184 | " next_to_watch.status <%x>\n", | 190 | head, tail, |
185 | readl(adapter->hw.hw_addr + tx_ring->head), | 191 | tx_ring->next_to_use, eop, |
186 | readl(adapter->hw.hw_addr + tx_ring->tail), | 192 | tx_ring->tx_buffer_info[eop].time_stamp, jiffies); |
187 | tx_ring->next_to_use, | ||
188 | tx_ring->next_to_clean, | ||
189 | tx_ring->tx_buffer_info[eop].time_stamp, | ||
190 | eop, jiffies, eop_desc->wb.status); | ||
191 | return true; | 193 | return true; |
192 | } | 194 | } |
193 | 195 | ||
@@ -203,65 +205,75 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, | |||
203 | #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \ | 205 | #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \ |
204 | MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */ | 206 | MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */ |
205 | 207 | ||
208 | #define GET_TX_HEAD_FROM_RING(ring) (\ | ||
209 | *(volatile u32 *) \ | ||
210 | ((union ixgbe_adv_tx_desc *)(ring)->desc + (ring)->count)) | ||
211 | static void ixgbe_tx_timeout(struct net_device *netdev); | ||
212 | |||
206 | /** | 213 | /** |
207 | * ixgbe_clean_tx_irq - Reclaim resources after transmit completes | 214 | * ixgbe_clean_tx_irq - Reclaim resources after transmit completes |
208 | * @adapter: board private structure | 215 | * @adapter: board private structure |
216 | * @tx_ring: tx ring to clean | ||
209 | **/ | 217 | **/ |
210 | static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter, | 218 | static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter, |
211 | struct ixgbe_ring *tx_ring) | 219 | struct ixgbe_ring *tx_ring) |
212 | { | 220 | { |
213 | struct net_device *netdev = adapter->netdev; | 221 | union ixgbe_adv_tx_desc *tx_desc; |
214 | union ixgbe_adv_tx_desc *tx_desc, *eop_desc; | ||
215 | struct ixgbe_tx_buffer *tx_buffer_info; | 222 | struct ixgbe_tx_buffer *tx_buffer_info; |
216 | unsigned int i, eop; | 223 | struct net_device *netdev = adapter->netdev; |
217 | bool cleaned = false; | 224 | struct sk_buff *skb; |
218 | unsigned int total_tx_bytes = 0, total_tx_packets = 0; | 225 | unsigned int i; |
226 | u32 head, oldhead; | ||
227 | unsigned int count = 0; | ||
228 | unsigned int total_bytes = 0, total_packets = 0; | ||
219 | 229 | ||
230 | rmb(); | ||
231 | head = GET_TX_HEAD_FROM_RING(tx_ring); | ||
232 | head = le32_to_cpu(head); | ||
220 | i = tx_ring->next_to_clean; | 233 | i = tx_ring->next_to_clean; |
221 | eop = tx_ring->tx_buffer_info[i].next_to_watch; | 234 | while (1) { |
222 | eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); | 235 | while (i != head) { |
223 | while (eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) { | ||
224 | cleaned = false; | ||
225 | while (!cleaned) { | ||
226 | tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); | 236 | tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); |
227 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | 237 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; |
228 | cleaned = (i == eop); | 238 | skb = tx_buffer_info->skb; |
229 | 239 | ||
230 | tx_ring->stats.bytes += tx_buffer_info->length; | 240 | if (skb) { |
231 | if (cleaned) { | ||
232 | struct sk_buff *skb = tx_buffer_info->skb; | ||
233 | unsigned int segs, bytecount; | 241 | unsigned int segs, bytecount; |
242 | |||
243 | /* gso_segs is currently only valid for tcp */ | ||
234 | segs = skb_shinfo(skb)->gso_segs ?: 1; | 244 | segs = skb_shinfo(skb)->gso_segs ?: 1; |
235 | /* multiply data chunks by size of headers */ | 245 | /* multiply data chunks by size of headers */ |
236 | bytecount = ((segs - 1) * skb_headlen(skb)) + | 246 | bytecount = ((segs - 1) * skb_headlen(skb)) + |
237 | skb->len; | 247 | skb->len; |
238 | total_tx_packets += segs; | 248 | total_packets += segs; |
239 | total_tx_bytes += bytecount; | 249 | total_bytes += bytecount; |
240 | } | 250 | } |
251 | |||
241 | ixgbe_unmap_and_free_tx_resource(adapter, | 252 | ixgbe_unmap_and_free_tx_resource(adapter, |
242 | tx_buffer_info); | 253 | tx_buffer_info); |
243 | tx_desc->wb.status = 0; | ||
244 | 254 | ||
245 | i++; | 255 | i++; |
246 | if (i == tx_ring->count) | 256 | if (i == tx_ring->count) |
247 | i = 0; | 257 | i = 0; |
248 | } | ||
249 | |||
250 | tx_ring->stats.packets++; | ||
251 | |||
252 | eop = tx_ring->tx_buffer_info[i].next_to_watch; | ||
253 | eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); | ||
254 | |||
255 | /* weight of a sort for tx, avoid endless transmit cleanup */ | ||
256 | if (total_tx_packets >= tx_ring->work_limit) | ||
257 | break; | ||
258 | } | ||
259 | 258 | ||
259 | count++; | ||
260 | if (count == tx_ring->count) | ||
261 | goto done_cleaning; | ||
262 | } | ||
263 | oldhead = head; | ||
264 | rmb(); | ||
265 | head = GET_TX_HEAD_FROM_RING(tx_ring); | ||
266 | head = le32_to_cpu(head); | ||
267 | if (head == oldhead) | ||
268 | goto done_cleaning; | ||
269 | } /* while (1) */ | ||
270 | |||
271 | done_cleaning: | ||
260 | tx_ring->next_to_clean = i; | 272 | tx_ring->next_to_clean = i; |
261 | 273 | ||
262 | #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) | 274 | #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) |
263 | if (total_tx_packets && netif_carrier_ok(netdev) && | 275 | if (unlikely(count && netif_carrier_ok(netdev) && |
264 | (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) { | 276 | (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { |
265 | /* Make sure that anybody stopping the queue after this | 277 | /* Make sure that anybody stopping the queue after this |
266 | * sees the new next_to_clean. | 278 | * sees the new next_to_clean. |
267 | */ | 279 | */ |
@@ -269,23 +281,32 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter, | |||
269 | if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && | 281 | if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && |
270 | !test_bit(__IXGBE_DOWN, &adapter->state)) { | 282 | !test_bit(__IXGBE_DOWN, &adapter->state)) { |
271 | netif_wake_subqueue(netdev, tx_ring->queue_index); | 283 | netif_wake_subqueue(netdev, tx_ring->queue_index); |
272 | adapter->restart_queue++; | 284 | ++adapter->restart_queue; |
273 | } | 285 | } |
274 | } | 286 | } |
275 | 287 | ||
276 | if (adapter->detect_tx_hung) | 288 | if (adapter->detect_tx_hung) { |
277 | if (ixgbe_check_tx_hang(adapter, tx_ring, eop, eop_desc)) | 289 | if (ixgbe_check_tx_hang(adapter, tx_ring, i)) { |
278 | netif_stop_subqueue(netdev, tx_ring->queue_index); | 290 | /* schedule immediate reset if we believe we hung */ |
291 | DPRINTK(PROBE, INFO, | ||
292 | "tx hang %d detected, resetting adapter\n", | ||
293 | adapter->tx_timeout_count + 1); | ||
294 | ixgbe_tx_timeout(adapter->netdev); | ||
295 | } | ||
296 | } | ||
279 | 297 | ||
280 | if (total_tx_packets >= tx_ring->work_limit) | 298 | /* re-arm the interrupt */ |
281 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->eims_value); | 299 | if ((total_packets >= tx_ring->work_limit) || |
300 | (count == tx_ring->count)) | ||
301 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->v_idx); | ||
282 | 302 | ||
283 | tx_ring->total_bytes += total_tx_bytes; | 303 | tx_ring->total_bytes += total_bytes; |
284 | tx_ring->total_packets += total_tx_packets; | 304 | tx_ring->total_packets += total_packets; |
285 | adapter->net_stats.tx_bytes += total_tx_bytes; | 305 | tx_ring->stats.bytes += total_bytes; |
286 | adapter->net_stats.tx_packets += total_tx_packets; | 306 | tx_ring->stats.packets += total_packets; |
287 | cleaned = total_tx_packets ? true : false; | 307 | adapter->net_stats.tx_bytes += total_bytes; |
288 | return cleaned; | 308 | adapter->net_stats.tx_packets += total_packets; |
309 | return (total_packets ? true : false); | ||
289 | } | 310 | } |
290 | 311 | ||
291 | #ifdef CONFIG_DCA | 312 | #ifdef CONFIG_DCA |
@@ -1344,19 +1365,24 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter) | |||
1344 | **/ | 1365 | **/ |
1345 | static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) | 1366 | static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) |
1346 | { | 1367 | { |
1347 | u64 tdba; | 1368 | u64 tdba, tdwba; |
1348 | struct ixgbe_hw *hw = &adapter->hw; | 1369 | struct ixgbe_hw *hw = &adapter->hw; |
1349 | u32 i, j, tdlen, txctrl; | 1370 | u32 i, j, tdlen, txctrl; |
1350 | 1371 | ||
1351 | /* Setup the HW Tx Head and Tail descriptor pointers */ | 1372 | /* Setup the HW Tx Head and Tail descriptor pointers */ |
1352 | for (i = 0; i < adapter->num_tx_queues; i++) { | 1373 | for (i = 0; i < adapter->num_tx_queues; i++) { |
1353 | j = adapter->tx_ring[i].reg_idx; | 1374 | struct ixgbe_ring *ring = &adapter->tx_ring[i]; |
1354 | tdba = adapter->tx_ring[i].dma; | 1375 | j = ring->reg_idx; |
1355 | tdlen = adapter->tx_ring[i].count * | 1376 | tdba = ring->dma; |
1356 | sizeof(union ixgbe_adv_tx_desc); | 1377 | tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc); |
1357 | IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), | 1378 | IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), |
1358 | (tdba & DMA_32BIT_MASK)); | 1379 | (tdba & DMA_32BIT_MASK)); |
1359 | IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); | 1380 | IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); |
1381 | tdwba = ring->dma + | ||
1382 | (ring->count * sizeof(union ixgbe_adv_tx_desc)); | ||
1383 | tdwba |= IXGBE_TDWBAL_HEAD_WB_ENABLE; | ||
1384 | IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(j), tdwba & DMA_32BIT_MASK); | ||
1385 | IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(j), (tdwba >> 32)); | ||
1360 | IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen); | 1386 | IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen); |
1361 | IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); | 1387 | IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); |
1362 | IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); | 1388 | IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); |
@@ -1365,9 +1391,9 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) | |||
1365 | /* Disable Tx Head Writeback RO bit, since this hoses | 1391 | /* Disable Tx Head Writeback RO bit, since this hoses |
1366 | * bookkeeping if things aren't delivered in order. | 1392 | * bookkeeping if things aren't delivered in order. |
1367 | */ | 1393 | */ |
1368 | txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); | 1394 | txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); |
1369 | txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; | 1395 | txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; |
1370 | IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl); | 1396 | IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); |
1371 | } | 1397 | } |
1372 | } | 1398 | } |
1373 | 1399 | ||
@@ -1775,6 +1801,8 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | |||
1775 | for (i = 0; i < adapter->num_tx_queues; i++) { | 1801 | for (i = 0; i < adapter->num_tx_queues; i++) { |
1776 | j = adapter->tx_ring[i].reg_idx; | 1802 | j = adapter->tx_ring[i].reg_idx; |
1777 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); | 1803 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); |
1804 | /* enable WTHRESH=8 descriptors, to encourage burst writeback */ | ||
1805 | txdctl |= (8 << 16); | ||
1778 | txdctl |= IXGBE_TXDCTL_ENABLE; | 1806 | txdctl |= IXGBE_TXDCTL_ENABLE; |
1779 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); | 1807 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); |
1780 | } | 1808 | } |
@@ -2487,38 +2515,38 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
2487 | * Return 0 on success, negative on failure | 2515 | * Return 0 on success, negative on failure |
2488 | **/ | 2516 | **/ |
2489 | int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, | 2517 | int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, |
2490 | struct ixgbe_ring *tx_ring) | 2518 | struct ixgbe_ring *tx_ring) |
2491 | { | 2519 | { |
2492 | struct pci_dev *pdev = adapter->pdev; | 2520 | struct pci_dev *pdev = adapter->pdev; |
2493 | int size; | 2521 | int size; |
2494 | 2522 | ||
2495 | size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; | 2523 | size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; |
2496 | tx_ring->tx_buffer_info = vmalloc(size); | 2524 | tx_ring->tx_buffer_info = vmalloc(size); |
2497 | if (!tx_ring->tx_buffer_info) { | 2525 | if (!tx_ring->tx_buffer_info) |
2498 | DPRINTK(PROBE, ERR, | 2526 | goto err; |
2499 | "Unable to allocate memory for the transmit descriptor ring\n"); | ||
2500 | return -ENOMEM; | ||
2501 | } | ||
2502 | memset(tx_ring->tx_buffer_info, 0, size); | 2527 | memset(tx_ring->tx_buffer_info, 0, size); |
2503 | 2528 | ||
2504 | /* round up to nearest 4K */ | 2529 | /* round up to nearest 4K */ |
2505 | tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); | 2530 | tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc) + |
2531 | sizeof(u32); | ||
2506 | tx_ring->size = ALIGN(tx_ring->size, 4096); | 2532 | tx_ring->size = ALIGN(tx_ring->size, 4096); |
2507 | 2533 | ||
2508 | tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, | 2534 | tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, |
2509 | &tx_ring->dma); | 2535 | &tx_ring->dma); |
2510 | if (!tx_ring->desc) { | 2536 | if (!tx_ring->desc) |
2511 | vfree(tx_ring->tx_buffer_info); | 2537 | goto err; |
2512 | DPRINTK(PROBE, ERR, | ||
2513 | "Memory allocation failed for the tx desc ring\n"); | ||
2514 | return -ENOMEM; | ||
2515 | } | ||
2516 | 2538 | ||
2517 | tx_ring->next_to_use = 0; | 2539 | tx_ring->next_to_use = 0; |
2518 | tx_ring->next_to_clean = 0; | 2540 | tx_ring->next_to_clean = 0; |
2519 | tx_ring->work_limit = tx_ring->count; | 2541 | tx_ring->work_limit = tx_ring->count; |
2520 | |||
2521 | return 0; | 2542 | return 0; |
2543 | |||
2544 | err: | ||
2545 | vfree(tx_ring->tx_buffer_info); | ||
2546 | tx_ring->tx_buffer_info = NULL; | ||
2547 | DPRINTK(PROBE, ERR, "Unable to allocate memory for the transmit " | ||
2548 | "descriptor ring\n"); | ||
2549 | return -ENOMEM; | ||
2522 | } | 2550 | } |
2523 | 2551 | ||
2524 | /** | 2552 | /** |
@@ -2581,7 +2609,7 @@ alloc_failed: | |||
2581 | * Free all transmit software resources | 2609 | * Free all transmit software resources |
2582 | **/ | 2610 | **/ |
2583 | static void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter, | 2611 | static void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter, |
2584 | struct ixgbe_ring *tx_ring) | 2612 | struct ixgbe_ring *tx_ring) |
2585 | { | 2613 | { |
2586 | struct pci_dev *pdev = adapter->pdev; | 2614 | struct pci_dev *pdev = adapter->pdev; |
2587 | 2615 | ||