aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFlorian Westphal <fw@strlen.de>2014-09-03 09:34:21 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2014-09-12 04:26:42 -0400
commit2b294b18689c6b68f631535acbcdb6c8e6fa11cf (patch)
treefa2e992770128bf82bc76417c4315e14f28dd0e8
parent2037110c96d5f1dd71453fcd0d54e79be12a352b (diff)
e1000: perform copybreak ahead of DMA unmap
Currently we unmap the DMA range, then copy to new skb. Change this so we can keep the mapping in case the data is copied. Signed-off-by: Florian Westphal <fw@strlen.de> Tested-by: Aaron Brown <aaron.f.brown@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c73
1 files changed, 43 insertions, 30 deletions
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index e26a32d2f64f..79626ba7e87a 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -4077,6 +4077,16 @@ static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
4077 return false; 4077 return false;
4078} 4078}
4079 4079
4080static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter,
4081 unsigned int bufsz)
4082{
4083 struct sk_buff *skb = netdev_alloc_skb_ip_align(adapter->netdev, bufsz);
4084
4085 if (unlikely(!skb))
4086 adapter->alloc_rx_buff_failed++;
4087 return skb;
4088}
4089
4080/** 4090/**
4081 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy 4091 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
4082 * @adapter: board private structure 4092 * @adapter: board private structure
@@ -4262,25 +4272,25 @@ next_desc:
4262/* this should improve performance for small packets with large amounts 4272/* this should improve performance for small packets with large amounts
4263 * of reassembly being done in the stack 4273 * of reassembly being done in the stack
4264 */ 4274 */
4265static void e1000_check_copybreak(struct net_device *netdev, 4275static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter,
4266 struct e1000_buffer *buffer_info, 4276 struct e1000_buffer *buffer_info,
4267 u32 length, struct sk_buff **skb) 4277 u32 length, const void *data)
4268{ 4278{
4269 struct sk_buff *new_skb; 4279 struct sk_buff *skb;
4270 4280
4271 if (length > copybreak) 4281 if (length > copybreak)
4272 return; 4282 return NULL;
4273 4283
4274 new_skb = netdev_alloc_skb_ip_align(netdev, length); 4284 skb = e1000_alloc_rx_skb(adapter, length);
4275 if (!new_skb) 4285 if (!skb)
4276 return; 4286 return NULL;
4287
4288 dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma,
4289 length, DMA_FROM_DEVICE);
4290
4291 memcpy(skb_put(skb, length), data, length);
4277 4292
4278 skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN, 4293 return skb;
4279 (*skb)->data - NET_IP_ALIGN,
4280 length + NET_IP_ALIGN);
4281 /* save the skb in buffer_info as good */
4282 buffer_info->skb = *skb;
4283 *skb = new_skb;
4284} 4294}
4285 4295
4286/** 4296/**
@@ -4318,10 +4328,18 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4318 rmb(); /* read descriptor and rx_buffer_info after status DD */ 4328 rmb(); /* read descriptor and rx_buffer_info after status DD */
4319 4329
4320 status = rx_desc->status; 4330 status = rx_desc->status;
4321 skb = buffer_info->skb; 4331 length = le16_to_cpu(rx_desc->length);
4322 buffer_info->skb = NULL;
4323 4332
4324 prefetch(skb->data - NET_IP_ALIGN); 4333 prefetch(buffer_info->skb->data - NET_IP_ALIGN);
4334 skb = e1000_copybreak(adapter, buffer_info, length,
4335 buffer_info->skb->data);
4336 if (!skb) {
4337 skb = buffer_info->skb;
4338 buffer_info->skb = NULL;
4339 dma_unmap_single(&pdev->dev, buffer_info->dma,
4340 buffer_info->length, DMA_FROM_DEVICE);
4341 buffer_info->dma = 0;
4342 }
4325 4343
4326 if (++i == rx_ring->count) i = 0; 4344 if (++i == rx_ring->count) i = 0;
4327 next_rxd = E1000_RX_DESC(*rx_ring, i); 4345 next_rxd = E1000_RX_DESC(*rx_ring, i);
@@ -4331,11 +4349,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4331 4349
4332 cleaned = true; 4350 cleaned = true;
4333 cleaned_count++; 4351 cleaned_count++;
4334 dma_unmap_single(&pdev->dev, buffer_info->dma,
4335 buffer_info->length, DMA_FROM_DEVICE);
4336 buffer_info->dma = 0;
4337 4352
4338 length = le16_to_cpu(rx_desc->length);
4339 /* !EOP means multiple descriptors were used to store a single 4353 /* !EOP means multiple descriptors were used to store a single
4340 * packet, if thats the case we need to toss it. In fact, we 4354 * packet, if thats the case we need to toss it. In fact, we
4341 * to toss every packet with the EOP bit clear and the next 4355 * to toss every packet with the EOP bit clear and the next
@@ -4348,8 +4362,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4348 if (adapter->discarding) { 4362 if (adapter->discarding) {
4349 /* All receives must fit into a single buffer */ 4363 /* All receives must fit into a single buffer */
4350 netdev_dbg(netdev, "Receive packet consumed multiple buffers\n"); 4364 netdev_dbg(netdev, "Receive packet consumed multiple buffers\n");
4351 /* recycle */ 4365 dev_kfree_skb(skb);
4352 buffer_info->skb = skb;
4353 if (status & E1000_RXD_STAT_EOP) 4366 if (status & E1000_RXD_STAT_EOP)
4354 adapter->discarding = false; 4367 adapter->discarding = false;
4355 goto next_desc; 4368 goto next_desc;
@@ -4363,8 +4376,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4363 } else if (netdev->features & NETIF_F_RXALL) { 4376 } else if (netdev->features & NETIF_F_RXALL) {
4364 goto process_skb; 4377 goto process_skb;
4365 } else { 4378 } else {
4366 /* recycle */ 4379 dev_kfree_skb(skb);
4367 buffer_info->skb = skb;
4368 goto next_desc; 4380 goto next_desc;
4369 } 4381 }
4370 } 4382 }
@@ -4379,9 +4391,10 @@ process_skb:
4379 */ 4391 */
4380 length -= 4; 4392 length -= 4;
4381 4393
4382 e1000_check_copybreak(netdev, buffer_info, length, &skb); 4394 if (buffer_info->skb == NULL)
4383 4395 skb_put(skb, length);
4384 skb_put(skb, length); 4396 else /* copybreak skb */
4397 skb_trim(skb, length);
4385 4398
4386 /* Receive Checksum Offload */ 4399 /* Receive Checksum Offload */
4387 e1000_rx_checksum(adapter, 4400 e1000_rx_checksum(adapter,
@@ -4527,7 +4540,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4527 skb = buffer_info->skb; 4540 skb = buffer_info->skb;
4528 if (skb) { 4541 if (skb) {
4529 skb_trim(skb, 0); 4542 skb_trim(skb, 0);
4530 goto map_skb; 4543 goto skip;
4531 } 4544 }
4532 4545
4533 skb = netdev_alloc_skb_ip_align(netdev, bufsz); 4546 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
@@ -4564,7 +4577,6 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4564 } 4577 }
4565 buffer_info->skb = skb; 4578 buffer_info->skb = skb;
4566 buffer_info->length = adapter->rx_buffer_len; 4579 buffer_info->length = adapter->rx_buffer_len;
4567map_skb:
4568 buffer_info->dma = dma_map_single(&pdev->dev, 4580 buffer_info->dma = dma_map_single(&pdev->dev,
4569 skb->data, 4581 skb->data,
4570 buffer_info->length, 4582 buffer_info->length,
@@ -4602,6 +4614,7 @@ map_skb:
4602 rx_desc = E1000_RX_DESC(*rx_ring, i); 4614 rx_desc = E1000_RX_DESC(*rx_ring, i);
4603 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 4615 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4604 4616
4617skip:
4605 if (unlikely(++i == rx_ring->count)) 4618 if (unlikely(++i == rx_ring->count))
4606 i = 0; 4619 i = 0;
4607 buffer_info = &rx_ring->buffer_info[i]; 4620 buffer_info = &rx_ring->buffer_info[i];