aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe/ixgbe_main.c
diff options
context:
space:
mode:
authorMallikarjuna R Chilakala <mallikarjuna.chilakala@intel.com>2010-05-13 13:33:21 -0400
committerDavid S. Miller <davem@davemloft.net>2010-05-14 00:07:33 -0400
commite8171aaad7ec335b8cbd71f56eb08b545f0c404f (patch)
treeefd76846ee2cf0a266386b97c23130dc55d9c11d /drivers/net/ixgbe/ixgbe_main.c
parente433ea1fb03c10debf101019668b83abed041c24 (diff)
ixgbe: Use bool flag to see if the packet unmapping is delayed in HWRSC
We can't use zero magic "bad" value to check if IXGBE_RSC_CB(skb)->dma is valid. It is only valid in x86/arm/m68k/alpha architectures and in spark, powerPC and other architectures it should be ~0. As per Benjamin Herrenschmidt feedback use a bool flag to decide if the packet unmapping is delayed in hardware RSC till EOP is reached Signed-off-by: Mallikarjuna R Chilakala <mallikarjuna.chilakala@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_main.c')
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c17
1 files changed, 11 insertions, 6 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 15032c79e003..3fb9f23c7502 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -1160,6 +1160,7 @@ static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
1160 1160
1161struct ixgbe_rsc_cb { 1161struct ixgbe_rsc_cb {
1162 dma_addr_t dma; 1162 dma_addr_t dma;
1163 bool delay_unmap;
1163}; 1164};
1164 1165
1165#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb) 1166#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
@@ -1215,7 +1216,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1215 if (rx_buffer_info->dma) { 1216 if (rx_buffer_info->dma) {
1216 if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && 1217 if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
1217 (!(staterr & IXGBE_RXD_STAT_EOP)) && 1218 (!(staterr & IXGBE_RXD_STAT_EOP)) &&
1218 (!(skb->prev))) 1219 (!(skb->prev))) {
1219 /* 1220 /*
1220 * When HWRSC is enabled, delay unmapping 1221 * When HWRSC is enabled, delay unmapping
1221 * of the first packet. It carries the 1222 * of the first packet. It carries the
@@ -1223,12 +1224,14 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1223 * access the header after the writeback. 1224 * access the header after the writeback.
1224 * Only unmap it when EOP is reached 1225 * Only unmap it when EOP is reached
1225 */ 1226 */
1227 IXGBE_RSC_CB(skb)->delay_unmap = true;
1226 IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma; 1228 IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
1227 else 1229 } else {
1228 dma_unmap_single(&pdev->dev, 1230 dma_unmap_single(&pdev->dev,
1229 rx_buffer_info->dma, 1231 rx_buffer_info->dma,
1230 rx_ring->rx_buf_len, 1232 rx_ring->rx_buf_len,
1231 DMA_FROM_DEVICE); 1233 DMA_FROM_DEVICE);
1234 }
1232 rx_buffer_info->dma = 0; 1235 rx_buffer_info->dma = 0;
1233 skb_put(skb, len); 1236 skb_put(skb, len);
1234 } 1237 }
@@ -1276,12 +1279,13 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1276 if (skb->prev) 1279 if (skb->prev)
1277 skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count)); 1280 skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count));
1278 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { 1281 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
1279 if (IXGBE_RSC_CB(skb)->dma) { 1282 if (IXGBE_RSC_CB(skb)->delay_unmap) {
1280 dma_unmap_single(&pdev->dev, 1283 dma_unmap_single(&pdev->dev,
1281 IXGBE_RSC_CB(skb)->dma, 1284 IXGBE_RSC_CB(skb)->dma,
1282 rx_ring->rx_buf_len, 1285 rx_ring->rx_buf_len,
1283 DMA_FROM_DEVICE); 1286 DMA_FROM_DEVICE);
1284 IXGBE_RSC_CB(skb)->dma = 0; 1287 IXGBE_RSC_CB(skb)->dma = 0;
1288 IXGBE_RSC_CB(skb)->delay_unmap = false;
1285 } 1289 }
1286 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) 1290 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
1287 rx_ring->rsc_count += skb_shinfo(skb)->nr_frags; 1291 rx_ring->rsc_count += skb_shinfo(skb)->nr_frags;
@@ -3505,12 +3509,13 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3505 rx_buffer_info->skb = NULL; 3509 rx_buffer_info->skb = NULL;
3506 do { 3510 do {
3507 struct sk_buff *this = skb; 3511 struct sk_buff *this = skb;
3508 if (IXGBE_RSC_CB(this)->dma) { 3512 if (IXGBE_RSC_CB(this)->delay_unmap) {
3509 dma_unmap_single(&pdev->dev, 3513 dma_unmap_single(&pdev->dev,
3510 IXGBE_RSC_CB(this)->dma, 3514 IXGBE_RSC_CB(this)->dma,
3511 rx_ring->rx_buf_len, 3515 rx_ring->rx_buf_len,
3512 DMA_FROM_DEVICE); 3516 DMA_FROM_DEVICE);
3513 IXGBE_RSC_CB(this)->dma = 0; 3517 IXGBE_RSC_CB(this)->dma = 0;
3518 IXGBE_RSC_CB(skb)->delay_unmap = false;
3514 } 3519 }
3515 skb = skb->prev; 3520 skb = skb->prev;
3516 dev_kfree_skb(this); 3521 dev_kfree_skb(this);