diff options
author | Alexander Duyck <alexander.h.duyck@intel.com> | 2011-08-26 03:44:43 -0400 |
---|---|---|
committer | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2011-10-07 02:17:50 -0400 |
commit | 8542db05dbc99f603889c349e5cf8f3f81cddbf5 (patch) | |
tree | 8f2ea64f9db7b72cb5748a247ba50131a7264462 | |
parent | 7d13a7d0da74d127457cc6f88e47fd8e85960a13 (diff) |
igb: Make first and tx_buffer_info->next_to_watch into pointers
This change converts two tx_buffer_info index values into pointers. The
advantage to this is that we reduce unnecessary computations and in the case
of next_to_watch we get an added bonus of the value being able to provide
additional information as a NULL value indicates it is unset versus a 0 not
having any meaning for the index value.
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Tested-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
-rw-r--r-- | drivers/net/ethernet/intel/igb/igb.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igb/igb_main.c | 66 |
2 files changed, 37 insertions, 31 deletions
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 56c68fc8bca..7185667bf26 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h | |||
@@ -133,7 +133,7 @@ struct vf_data_storage { | |||
133 | /* wrapper around a pointer to a socket buffer, | 133 | /* wrapper around a pointer to a socket buffer, |
134 | * so a DMA handle can be stored along with the buffer */ | 134 | * so a DMA handle can be stored along with the buffer */ |
135 | struct igb_tx_buffer { | 135 | struct igb_tx_buffer { |
136 | u16 next_to_watch; | 136 | union e1000_adv_tx_desc *next_to_watch; |
137 | unsigned long time_stamp; | 137 | unsigned long time_stamp; |
138 | dma_addr_t dma; | 138 | dma_addr_t dma; |
139 | u32 length; | 139 | u32 length; |
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index a0bb81d9ef1..edc2caeb6c1 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c | |||
@@ -381,7 +381,7 @@ static void igb_dump(struct igb_adapter *adapter) | |||
381 | struct igb_tx_buffer *buffer_info; | 381 | struct igb_tx_buffer *buffer_info; |
382 | tx_ring = adapter->tx_ring[n]; | 382 | tx_ring = adapter->tx_ring[n]; |
383 | buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; | 383 | buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; |
384 | printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n", | 384 | printk(KERN_INFO " %5d %5X %5X %016llX %04X %p %016llX\n", |
385 | n, tx_ring->next_to_use, tx_ring->next_to_clean, | 385 | n, tx_ring->next_to_use, tx_ring->next_to_clean, |
386 | (u64)buffer_info->dma, | 386 | (u64)buffer_info->dma, |
387 | buffer_info->length, | 387 | buffer_info->length, |
@@ -421,7 +421,7 @@ static void igb_dump(struct igb_adapter *adapter) | |||
421 | buffer_info = &tx_ring->tx_buffer_info[i]; | 421 | buffer_info = &tx_ring->tx_buffer_info[i]; |
422 | u0 = (struct my_u0 *)tx_desc; | 422 | u0 = (struct my_u0 *)tx_desc; |
423 | printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX" | 423 | printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX" |
424 | " %04X %3X %016llX %p", i, | 424 | " %04X %p %016llX %p", i, |
425 | le64_to_cpu(u0->a), | 425 | le64_to_cpu(u0->a), |
426 | le64_to_cpu(u0->b), | 426 | le64_to_cpu(u0->b), |
427 | (u64)buffer_info->dma, | 427 | (u64)buffer_info->dma, |
@@ -3161,7 +3161,7 @@ void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring, | |||
3161 | } | 3161 | } |
3162 | buffer_info->time_stamp = 0; | 3162 | buffer_info->time_stamp = 0; |
3163 | buffer_info->length = 0; | 3163 | buffer_info->length = 0; |
3164 | buffer_info->next_to_watch = 0; | 3164 | buffer_info->next_to_watch = NULL; |
3165 | buffer_info->mapped_as_page = false; | 3165 | buffer_info->mapped_as_page = false; |
3166 | } | 3166 | } |
3167 | 3167 | ||
@@ -4107,7 +4107,7 @@ static inline bool igb_tx_csum(struct igb_ring *tx_ring, | |||
4107 | #define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR) | 4107 | #define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR) |
4108 | 4108 | ||
4109 | static inline int igb_tx_map(struct igb_ring *tx_ring, struct sk_buff *skb, | 4109 | static inline int igb_tx_map(struct igb_ring *tx_ring, struct sk_buff *skb, |
4110 | unsigned int first) | 4110 | struct igb_tx_buffer *first) |
4111 | { | 4111 | { |
4112 | struct igb_tx_buffer *buffer_info; | 4112 | struct igb_tx_buffer *buffer_info; |
4113 | struct device *dev = tx_ring->dev; | 4113 | struct device *dev = tx_ring->dev; |
@@ -4121,7 +4121,6 @@ static inline int igb_tx_map(struct igb_ring *tx_ring, struct sk_buff *skb, | |||
4121 | buffer_info = &tx_ring->tx_buffer_info[i]; | 4121 | buffer_info = &tx_ring->tx_buffer_info[i]; |
4122 | BUG_ON(hlen >= IGB_MAX_DATA_PER_TXD); | 4122 | BUG_ON(hlen >= IGB_MAX_DATA_PER_TXD); |
4123 | buffer_info->length = hlen; | 4123 | buffer_info->length = hlen; |
4124 | buffer_info->next_to_watch = i; | ||
4125 | buffer_info->dma = dma_map_single(dev, skb->data, hlen, | 4124 | buffer_info->dma = dma_map_single(dev, skb->data, hlen, |
4126 | DMA_TO_DEVICE); | 4125 | DMA_TO_DEVICE); |
4127 | if (dma_mapping_error(dev, buffer_info->dma)) | 4126 | if (dma_mapping_error(dev, buffer_info->dma)) |
@@ -4139,7 +4138,6 @@ static inline int igb_tx_map(struct igb_ring *tx_ring, struct sk_buff *skb, | |||
4139 | buffer_info = &tx_ring->tx_buffer_info[i]; | 4138 | buffer_info = &tx_ring->tx_buffer_info[i]; |
4140 | BUG_ON(len >= IGB_MAX_DATA_PER_TXD); | 4139 | BUG_ON(len >= IGB_MAX_DATA_PER_TXD); |
4141 | buffer_info->length = len; | 4140 | buffer_info->length = len; |
4142 | buffer_info->next_to_watch = i; | ||
4143 | buffer_info->mapped_as_page = true; | 4141 | buffer_info->mapped_as_page = true; |
4144 | buffer_info->dma = skb_frag_dma_map(dev, frag, 0, len, | 4142 | buffer_info->dma = skb_frag_dma_map(dev, frag, 0, len, |
4145 | DMA_TO_DEVICE); | 4143 | DMA_TO_DEVICE); |
@@ -4153,8 +4151,12 @@ static inline int igb_tx_map(struct igb_ring *tx_ring, struct sk_buff *skb, | |||
4153 | /* multiply data chunks by size of headers */ | 4151 | /* multiply data chunks by size of headers */ |
4154 | buffer_info->bytecount = ((gso_segs - 1) * hlen) + skb->len; | 4152 | buffer_info->bytecount = ((gso_segs - 1) * hlen) + skb->len; |
4155 | buffer_info->gso_segs = gso_segs; | 4153 | buffer_info->gso_segs = gso_segs; |
4156 | tx_ring->tx_buffer_info[first].next_to_watch = i; | 4154 | |
4157 | tx_ring->tx_buffer_info[first].time_stamp = jiffies; | 4155 | /* set the timestamp */ |
4156 | first->time_stamp = jiffies; | ||
4157 | |||
4158 | /* set next_to_watch value indicating a packet is present */ | ||
4159 | first->next_to_watch = IGB_TX_DESC(tx_ring, i); | ||
4158 | 4160 | ||
4159 | return ++count; | 4161 | return ++count; |
4160 | 4162 | ||
@@ -4165,7 +4167,6 @@ dma_error: | |||
4165 | buffer_info->dma = 0; | 4167 | buffer_info->dma = 0; |
4166 | buffer_info->time_stamp = 0; | 4168 | buffer_info->time_stamp = 0; |
4167 | buffer_info->length = 0; | 4169 | buffer_info->length = 0; |
4168 | buffer_info->next_to_watch = 0; | ||
4169 | buffer_info->mapped_as_page = false; | 4170 | buffer_info->mapped_as_page = false; |
4170 | 4171 | ||
4171 | /* clear timestamp and dma mappings for remaining portion of packet */ | 4172 | /* clear timestamp and dma mappings for remaining portion of packet */ |
@@ -4283,9 +4284,9 @@ static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size) | |||
4283 | netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, | 4284 | netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, |
4284 | struct igb_ring *tx_ring) | 4285 | struct igb_ring *tx_ring) |
4285 | { | 4286 | { |
4287 | struct igb_tx_buffer *first; | ||
4286 | int tso, count; | 4288 | int tso, count; |
4287 | u32 tx_flags = 0; | 4289 | u32 tx_flags = 0; |
4288 | u16 first; | ||
4289 | u8 hdr_len = 0; | 4290 | u8 hdr_len = 0; |
4290 | 4291 | ||
4291 | /* need: 1 descriptor per page, | 4292 | /* need: 1 descriptor per page, |
@@ -4311,7 +4312,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, | |||
4311 | if (skb->protocol == htons(ETH_P_IP)) | 4312 | if (skb->protocol == htons(ETH_P_IP)) |
4312 | tx_flags |= IGB_TX_FLAGS_IPV4; | 4313 | tx_flags |= IGB_TX_FLAGS_IPV4; |
4313 | 4314 | ||
4314 | first = tx_ring->next_to_use; | 4315 | /* record the location of the first descriptor for this packet */ |
4316 | first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; | ||
4315 | 4317 | ||
4316 | tso = igb_tso(tx_ring, skb, tx_flags, &hdr_len); | 4318 | tso = igb_tso(tx_ring, skb, tx_flags, &hdr_len); |
4317 | 4319 | ||
@@ -4330,8 +4332,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, | |||
4330 | count = igb_tx_map(tx_ring, skb, first); | 4332 | count = igb_tx_map(tx_ring, skb, first); |
4331 | if (!count) { | 4333 | if (!count) { |
4332 | dev_kfree_skb_any(skb); | 4334 | dev_kfree_skb_any(skb); |
4333 | tx_ring->tx_buffer_info[first].time_stamp = 0; | 4335 | first->time_stamp = 0; |
4334 | tx_ring->next_to_use = first; | 4336 | tx_ring->next_to_use = first - tx_ring->tx_buffer_info; |
4335 | return NETDEV_TX_OK; | 4337 | return NETDEV_TX_OK; |
4336 | } | 4338 | } |
4337 | 4339 | ||
@@ -5568,29 +5570,34 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) | |||
5568 | struct igb_adapter *adapter = q_vector->adapter; | 5570 | struct igb_adapter *adapter = q_vector->adapter; |
5569 | struct igb_ring *tx_ring = q_vector->tx_ring; | 5571 | struct igb_ring *tx_ring = q_vector->tx_ring; |
5570 | struct igb_tx_buffer *tx_buffer; | 5572 | struct igb_tx_buffer *tx_buffer; |
5571 | union e1000_adv_tx_desc *tx_desc; | 5573 | union e1000_adv_tx_desc *tx_desc, *eop_desc; |
5572 | unsigned int total_bytes = 0, total_packets = 0; | 5574 | unsigned int total_bytes = 0, total_packets = 0; |
5573 | unsigned int budget = q_vector->tx_work_limit; | 5575 | unsigned int budget = q_vector->tx_work_limit; |
5574 | u16 i = tx_ring->next_to_clean; | 5576 | unsigned int i = tx_ring->next_to_clean; |
5575 | 5577 | ||
5576 | if (test_bit(__IGB_DOWN, &adapter->state)) | 5578 | if (test_bit(__IGB_DOWN, &adapter->state)) |
5577 | return true; | 5579 | return true; |
5578 | 5580 | ||
5579 | tx_buffer = &tx_ring->tx_buffer_info[i]; | 5581 | tx_buffer = &tx_ring->tx_buffer_info[i]; |
5580 | tx_desc = IGB_TX_DESC(tx_ring, i); | 5582 | tx_desc = IGB_TX_DESC(tx_ring, i); |
5583 | i -= tx_ring->count; | ||
5581 | 5584 | ||
5582 | for (; budget; budget--) { | 5585 | for (; budget; budget--) { |
5583 | u16 eop = tx_buffer->next_to_watch; | 5586 | eop_desc = tx_buffer->next_to_watch; |
5584 | union e1000_adv_tx_desc *eop_desc; | ||
5585 | 5587 | ||
5586 | eop_desc = IGB_TX_DESC(tx_ring, eop); | 5588 | /* prevent any other reads prior to eop_desc */ |
5589 | rmb(); | ||
5590 | |||
5591 | /* if next_to_watch is not set then there is no work pending */ | ||
5592 | if (!eop_desc) | ||
5593 | break; | ||
5587 | 5594 | ||
5588 | /* if DD is not set pending work has not been completed */ | 5595 | /* if DD is not set pending work has not been completed */ |
5589 | if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD))) | 5596 | if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD))) |
5590 | break; | 5597 | break; |
5591 | 5598 | ||
5592 | /* prevent any other reads prior to eop_desc being verified */ | 5599 | /* clear next_to_watch to prevent false hangs */ |
5593 | rmb(); | 5600 | tx_buffer->next_to_watch = NULL; |
5594 | 5601 | ||
5595 | do { | 5602 | do { |
5596 | tx_desc->wb.status = 0; | 5603 | tx_desc->wb.status = 0; |
@@ -5607,14 +5614,15 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) | |||
5607 | tx_buffer++; | 5614 | tx_buffer++; |
5608 | tx_desc++; | 5615 | tx_desc++; |
5609 | i++; | 5616 | i++; |
5610 | if (unlikely(i == tx_ring->count)) { | 5617 | if (unlikely(!i)) { |
5611 | i = 0; | 5618 | i -= tx_ring->count; |
5612 | tx_buffer = tx_ring->tx_buffer_info; | 5619 | tx_buffer = tx_ring->tx_buffer_info; |
5613 | tx_desc = IGB_TX_DESC(tx_ring, 0); | 5620 | tx_desc = IGB_TX_DESC(tx_ring, 0); |
5614 | } | 5621 | } |
5615 | } while (eop_desc); | 5622 | } while (eop_desc); |
5616 | } | 5623 | } |
5617 | 5624 | ||
5625 | i += tx_ring->count; | ||
5618 | tx_ring->next_to_clean = i; | 5626 | tx_ring->next_to_clean = i; |
5619 | u64_stats_update_begin(&tx_ring->tx_syncp); | 5627 | u64_stats_update_begin(&tx_ring->tx_syncp); |
5620 | tx_ring->tx_stats.bytes += total_bytes; | 5628 | tx_ring->tx_stats.bytes += total_bytes; |
@@ -5625,16 +5633,14 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) | |||
5625 | 5633 | ||
5626 | if (tx_ring->detect_tx_hung) { | 5634 | if (tx_ring->detect_tx_hung) { |
5627 | struct e1000_hw *hw = &adapter->hw; | 5635 | struct e1000_hw *hw = &adapter->hw; |
5628 | u16 eop = tx_ring->tx_buffer_info[i].next_to_watch; | ||
5629 | union e1000_adv_tx_desc *eop_desc; | ||
5630 | 5636 | ||
5631 | eop_desc = IGB_TX_DESC(tx_ring, eop); | 5637 | eop_desc = tx_buffer->next_to_watch; |
5632 | 5638 | ||
5633 | /* Detect a transmit hang in hardware, this serializes the | 5639 | /* Detect a transmit hang in hardware, this serializes the |
5634 | * check with the clearing of time_stamp and movement of i */ | 5640 | * check with the clearing of time_stamp and movement of i */ |
5635 | tx_ring->detect_tx_hung = false; | 5641 | tx_ring->detect_tx_hung = false; |
5636 | if (tx_ring->tx_buffer_info[i].time_stamp && | 5642 | if (eop_desc && |
5637 | time_after(jiffies, tx_ring->tx_buffer_info[i].time_stamp + | 5643 | time_after(jiffies, tx_buffer->time_stamp + |
5638 | (adapter->tx_timeout_factor * HZ)) && | 5644 | (adapter->tx_timeout_factor * HZ)) && |
5639 | !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) { | 5645 | !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) { |
5640 | 5646 | ||
@@ -5648,7 +5654,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) | |||
5648 | " next_to_clean <%x>\n" | 5654 | " next_to_clean <%x>\n" |
5649 | "buffer_info[next_to_clean]\n" | 5655 | "buffer_info[next_to_clean]\n" |
5650 | " time_stamp <%lx>\n" | 5656 | " time_stamp <%lx>\n" |
5651 | " next_to_watch <%x>\n" | 5657 | " next_to_watch <%p>\n" |
5652 | " jiffies <%lx>\n" | 5658 | " jiffies <%lx>\n" |
5653 | " desc.status <%x>\n", | 5659 | " desc.status <%x>\n", |
5654 | tx_ring->queue_index, | 5660 | tx_ring->queue_index, |
@@ -5656,8 +5662,8 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) | |||
5656 | readl(tx_ring->tail), | 5662 | readl(tx_ring->tail), |
5657 | tx_ring->next_to_use, | 5663 | tx_ring->next_to_use, |
5658 | tx_ring->next_to_clean, | 5664 | tx_ring->next_to_clean, |
5659 | tx_ring->tx_buffer_info[eop].time_stamp, | 5665 | tx_buffer->time_stamp, |
5660 | eop, | 5666 | eop_desc, |
5661 | jiffies, | 5667 | jiffies, |
5662 | eop_desc->wb.status); | 5668 | eop_desc->wb.status); |
5663 | netif_stop_subqueue(tx_ring->netdev, | 5669 | netif_stop_subqueue(tx_ring->netdev, |