aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/cxgb3/sge.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/cxgb3/sge.c')
-rw-r--r--drivers/net/cxgb3/sge.c29
1 files changed, 12 insertions, 17 deletions
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 73d569e758ec..29c79eb43beb 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -355,7 +355,7 @@ static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
355 (*d->pg_chunk.p_cnt)--; 355 (*d->pg_chunk.p_cnt)--;
356 if (!*d->pg_chunk.p_cnt) 356 if (!*d->pg_chunk.p_cnt)
357 pci_unmap_page(pdev, 357 pci_unmap_page(pdev,
358 pci_unmap_addr(&d->pg_chunk, mapping), 358 d->pg_chunk.mapping,
359 q->alloc_size, PCI_DMA_FROMDEVICE); 359 q->alloc_size, PCI_DMA_FROMDEVICE);
360 360
361 put_page(d->pg_chunk.page); 361 put_page(d->pg_chunk.page);
@@ -454,7 +454,7 @@ static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
454 q->pg_chunk.offset = 0; 454 q->pg_chunk.offset = 0;
455 mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, 455 mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
456 0, q->alloc_size, PCI_DMA_FROMDEVICE); 456 0, q->alloc_size, PCI_DMA_FROMDEVICE);
457 pci_unmap_addr_set(&q->pg_chunk, mapping, mapping); 457 q->pg_chunk.mapping = mapping;
458 } 458 }
459 sd->pg_chunk = q->pg_chunk; 459 sd->pg_chunk = q->pg_chunk;
460 460
@@ -511,8 +511,7 @@ static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
511nomem: q->alloc_failed++; 511nomem: q->alloc_failed++;
512 break; 512 break;
513 } 513 }
514 mapping = pci_unmap_addr(&sd->pg_chunk, mapping) + 514 mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset;
515 sd->pg_chunk.offset;
516 pci_unmap_addr_set(sd, dma_addr, mapping); 515 pci_unmap_addr_set(sd, dma_addr, mapping);
517 516
518 add_one_rx_chunk(mapping, d, q->gen); 517 add_one_rx_chunk(mapping, d, q->gen);
@@ -882,7 +881,7 @@ recycle:
882 (*sd->pg_chunk.p_cnt)--; 881 (*sd->pg_chunk.p_cnt)--;
883 if (!*sd->pg_chunk.p_cnt) 882 if (!*sd->pg_chunk.p_cnt)
884 pci_unmap_page(adap->pdev, 883 pci_unmap_page(adap->pdev,
885 pci_unmap_addr(&sd->pg_chunk, mapping), 884 sd->pg_chunk.mapping,
886 fl->alloc_size, 885 fl->alloc_size,
887 PCI_DMA_FROMDEVICE); 886 PCI_DMA_FROMDEVICE);
888 if (!skb) { 887 if (!skb) {
@@ -1241,7 +1240,6 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1241 q = &qs->txq[TXQ_ETH]; 1240 q = &qs->txq[TXQ_ETH];
1242 txq = netdev_get_tx_queue(dev, qidx); 1241 txq = netdev_get_tx_queue(dev, qidx);
1243 1242
1244 spin_lock(&q->lock);
1245 reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); 1243 reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1246 1244
1247 credits = q->size - q->in_use; 1245 credits = q->size - q->in_use;
@@ -1252,7 +1250,6 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1252 dev_err(&adap->pdev->dev, 1250 dev_err(&adap->pdev->dev,
1253 "%s: Tx ring %u full while queue awake!\n", 1251 "%s: Tx ring %u full while queue awake!\n",
1254 dev->name, q->cntxt_id & 7); 1252 dev->name, q->cntxt_id & 7);
1255 spin_unlock(&q->lock);
1256 return NETDEV_TX_BUSY; 1253 return NETDEV_TX_BUSY;
1257 } 1254 }
1258 1255
@@ -1286,9 +1283,6 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1286 if (vlan_tx_tag_present(skb) && pi->vlan_grp) 1283 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1287 qs->port_stats[SGE_PSTAT_VLANINS]++; 1284 qs->port_stats[SGE_PSTAT_VLANINS]++;
1288 1285
1289 dev->trans_start = jiffies;
1290 spin_unlock(&q->lock);
1291
1292 /* 1286 /*
1293 * We do not use Tx completion interrupts to free DMAd Tx packets. 1287 * We do not use Tx completion interrupts to free DMAd Tx packets.
1294 * This is good for performamce but means that we rely on new Tx 1288 * This is good for performamce but means that we rely on new Tx
@@ -2096,7 +2090,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2096 (*sd->pg_chunk.p_cnt)--; 2090 (*sd->pg_chunk.p_cnt)--;
2097 if (!*sd->pg_chunk.p_cnt) 2091 if (!*sd->pg_chunk.p_cnt)
2098 pci_unmap_page(adap->pdev, 2092 pci_unmap_page(adap->pdev,
2099 pci_unmap_addr(&sd->pg_chunk, mapping), 2093 sd->pg_chunk.mapping,
2100 fl->alloc_size, 2094 fl->alloc_size,
2101 PCI_DMA_FROMDEVICE); 2095 PCI_DMA_FROMDEVICE);
2102 2096
@@ -2858,11 +2852,12 @@ static void sge_timer_tx(unsigned long data)
2858 unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0}; 2852 unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0};
2859 unsigned long next_period; 2853 unsigned long next_period;
2860 2854
2861 if (spin_trylock(&qs->txq[TXQ_ETH].lock)) { 2855 if (__netif_tx_trylock(qs->tx_q)) {
2862 tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH], 2856 tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
2863 TX_RECLAIM_TIMER_CHUNK); 2857 TX_RECLAIM_TIMER_CHUNK);
2864 spin_unlock(&qs->txq[TXQ_ETH].lock); 2858 __netif_tx_unlock(qs->tx_q);
2865 } 2859 }
2860
2866 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) { 2861 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2867 tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD], 2862 tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD],
2868 TX_RECLAIM_TIMER_CHUNK); 2863 TX_RECLAIM_TIMER_CHUNK);
@@ -2870,8 +2865,8 @@ static void sge_timer_tx(unsigned long data)
2870 } 2865 }
2871 2866
2872 next_period = TX_RECLAIM_PERIOD >> 2867 next_period = TX_RECLAIM_PERIOD >>
2873 (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) / 2868 (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) /
2874 TX_RECLAIM_TIMER_CHUNK); 2869 TX_RECLAIM_TIMER_CHUNK);
2875 mod_timer(&qs->tx_reclaim_timer, jiffies + next_period); 2870 mod_timer(&qs->tx_reclaim_timer, jiffies + next_period);
2876} 2871}
2877 2872