aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2009-03-31 17:34:23 -0400
committerDavid S. Miller <davem@davemloft.net>2009-04-02 04:02:31 -0400
commit44df32c592f5a626c9f2ed56642e939788022408 (patch)
tree39204d61919c1444754c3067508f21ad2b70d0cc
parent8be0e4671d6355b2d905cb8fd051393b2cbf9510 (diff)
ixgbe: refactor tx buffer processing to use skb_dma_map/unmap
This patch resolves an issue with map single being used to map a buffer and then unmap page being used to unmap it. In addition it handles any error conditions that may be detected using skb_dma_map. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Acked-by: Mallikarjuna R Chilakala <mallikarjuna.chilakala@intel.com> Acked-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c79
1 files changed, 41 insertions, 38 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 63c0f19c11a3..f36cff52d48f 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -187,15 +187,14 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
187 struct ixgbe_tx_buffer 187 struct ixgbe_tx_buffer
188 *tx_buffer_info) 188 *tx_buffer_info)
189{ 189{
190 if (tx_buffer_info->dma) { 190 tx_buffer_info->dma = 0;
191 pci_unmap_page(adapter->pdev, tx_buffer_info->dma,
192 tx_buffer_info->length, PCI_DMA_TODEVICE);
193 tx_buffer_info->dma = 0;
194 }
195 if (tx_buffer_info->skb) { 191 if (tx_buffer_info->skb) {
192 skb_dma_unmap(&adapter->pdev->dev, tx_buffer_info->skb,
193 DMA_TO_DEVICE);
196 dev_kfree_skb_any(tx_buffer_info->skb); 194 dev_kfree_skb_any(tx_buffer_info->skb);
197 tx_buffer_info->skb = NULL; 195 tx_buffer_info->skb = NULL;
198 } 196 }
197 tx_buffer_info->time_stamp = 0;
199 /* tx_buffer_info must be completely set up in the transmit path */ 198 /* tx_buffer_info must be completely set up in the transmit path */
200} 199}
201 200
@@ -204,15 +203,11 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
204 unsigned int eop) 203 unsigned int eop)
205{ 204{
206 struct ixgbe_hw *hw = &adapter->hw; 205 struct ixgbe_hw *hw = &adapter->hw;
207 u32 head, tail;
208 206
209 /* Detect a transmit hang in hardware, this serializes the 207 /* Detect a transmit hang in hardware, this serializes the
210 * check with the clearing of time_stamp and movement of eop */ 208 * check with the clearing of time_stamp and movement of eop */
211 head = IXGBE_READ_REG(hw, tx_ring->head);
212 tail = IXGBE_READ_REG(hw, tx_ring->tail);
213 adapter->detect_tx_hung = false; 209 adapter->detect_tx_hung = false;
214 if ((head != tail) && 210 if (tx_ring->tx_buffer_info[eop].time_stamp &&
215 tx_ring->tx_buffer_info[eop].time_stamp &&
216 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) && 211 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
217 !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) { 212 !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) {
218 /* detected Tx unit hang */ 213 /* detected Tx unit hang */
@@ -227,7 +222,8 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
227 " time_stamp <%lx>\n" 222 " time_stamp <%lx>\n"
228 " jiffies <%lx>\n", 223 " jiffies <%lx>\n",
229 tx_ring->queue_index, 224 tx_ring->queue_index,
230 head, tail, 225 IXGBE_READ_REG(hw, tx_ring->head),
226 IXGBE_READ_REG(hw, tx_ring->tail),
231 tx_ring->next_to_use, eop, 227 tx_ring->next_to_use, eop,
232 tx_ring->tx_buffer_info[eop].time_stamp, jiffies); 228 tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
233 return true; 229 return true;
@@ -4164,32 +4160,39 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
4164 struct sk_buff *skb, unsigned int first) 4160 struct sk_buff *skb, unsigned int first)
4165{ 4161{
4166 struct ixgbe_tx_buffer *tx_buffer_info; 4162 struct ixgbe_tx_buffer *tx_buffer_info;
4167 unsigned int len = skb->len; 4163 unsigned int len = skb_headlen(skb);
4168 unsigned int offset = 0, size, count = 0, i; 4164 unsigned int offset = 0, size, count = 0, i;
4169 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 4165 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
4170 unsigned int f; 4166 unsigned int f;
4171 4167 dma_addr_t *map;
4172 len -= skb->data_len;
4173 4168
4174 i = tx_ring->next_to_use; 4169 i = tx_ring->next_to_use;
4175 4170
4171 if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
4172 dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
4173 return 0;
4174 }
4175
4176 map = skb_shinfo(skb)->dma_maps;
4177
4176 while (len) { 4178 while (len) {
4177 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 4179 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4178 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); 4180 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
4179 4181
4180 tx_buffer_info->length = size; 4182 tx_buffer_info->length = size;
4181 tx_buffer_info->dma = pci_map_single(adapter->pdev, 4183 tx_buffer_info->dma = map[0] + offset;
4182 skb->data + offset,
4183 size, PCI_DMA_TODEVICE);
4184 tx_buffer_info->time_stamp = jiffies; 4184 tx_buffer_info->time_stamp = jiffies;
4185 tx_buffer_info->next_to_watch = i; 4185 tx_buffer_info->next_to_watch = i;
4186 4186
4187 len -= size; 4187 len -= size;
4188 offset += size; 4188 offset += size;
4189 count++; 4189 count++;
4190 i++; 4190
4191 if (i == tx_ring->count) 4191 if (len) {
4192 i = 0; 4192 i++;
4193 if (i == tx_ring->count)
4194 i = 0;
4195 }
4193 } 4196 }
4194 4197
4195 for (f = 0; f < nr_frags; f++) { 4198 for (f = 0; f < nr_frags; f++) {
@@ -4197,33 +4200,27 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
4197 4200
4198 frag = &skb_shinfo(skb)->frags[f]; 4201 frag = &skb_shinfo(skb)->frags[f];
4199 len = frag->size; 4202 len = frag->size;
4200 offset = frag->page_offset; 4203 offset = 0;
4201 4204
4202 while (len) { 4205 while (len) {
4206 i++;
4207 if (i == tx_ring->count)
4208 i = 0;
4209
4203 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 4210 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4204 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); 4211 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
4205 4212
4206 tx_buffer_info->length = size; 4213 tx_buffer_info->length = size;
4207 tx_buffer_info->dma = pci_map_page(adapter->pdev, 4214 tx_buffer_info->dma = map[f + 1] + offset;
4208 frag->page,
4209 offset,
4210 size,
4211 PCI_DMA_TODEVICE);
4212 tx_buffer_info->time_stamp = jiffies; 4215 tx_buffer_info->time_stamp = jiffies;
4213 tx_buffer_info->next_to_watch = i; 4216 tx_buffer_info->next_to_watch = i;
4214 4217
4215 len -= size; 4218 len -= size;
4216 offset += size; 4219 offset += size;
4217 count++; 4220 count++;
4218 i++;
4219 if (i == tx_ring->count)
4220 i = 0;
4221 } 4221 }
4222 } 4222 }
4223 if (i == 0) 4223
4224 i = tx_ring->count - 1;
4225 else
4226 i = i - 1;
4227 tx_ring->tx_buffer_info[i].skb = skb; 4224 tx_ring->tx_buffer_info[i].skb = skb;
4228 tx_ring->tx_buffer_info[first].next_to_watch = i; 4225 tx_ring->tx_buffer_info[first].next_to_watch = i;
4229 4226
@@ -4389,13 +4386,19 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
4389 (skb->ip_summed == CHECKSUM_PARTIAL)) 4386 (skb->ip_summed == CHECKSUM_PARTIAL))
4390 tx_flags |= IXGBE_TX_FLAGS_CSUM; 4387 tx_flags |= IXGBE_TX_FLAGS_CSUM;
4391 4388
4392 ixgbe_tx_queue(adapter, tx_ring, tx_flags, 4389 count = ixgbe_tx_map(adapter, tx_ring, skb, first);
4393 ixgbe_tx_map(adapter, tx_ring, skb, first),
4394 skb->len, hdr_len);
4395 4390
4396 netdev->trans_start = jiffies; 4391 if (count) {
4392 ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len,
4393 hdr_len);
4394 netdev->trans_start = jiffies;
4395 ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
4397 4396
4398 ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED); 4397 } else {
4398 dev_kfree_skb_any(skb);
4399 tx_ring->tx_buffer_info[first].time_stamp = 0;
4400 tx_ring->next_to_use = first;
4401 }
4399 4402
4400 return NETDEV_TX_OK; 4403 return NETDEV_TX_OK;
4401} 4404}