aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDivy Le Ray <divy@chelsio.com>2007-02-24 19:44:06 -0500
committerJeff Garzik <jeff@garzik.org>2007-02-27 04:27:12 -0500
commit99d7cf30b99a8b7863090d8a510d6a4d9ad082cf (patch)
tree4e217b93f9661c66ab708e8a71c6d133f7b3c88d /drivers
parent75d8626fdd172745ebb638c2bf5138071e67a818 (diff)
cxgb3 - Unmap offload packets when they are freed
Offload packets may be DMAed long after their SGE Tx descriptors are done so they must remain mapped until they are freed rather than until their descriptors are freed. Unmap such packets through an skb destructor. Signed-off-by: Divy Le Ray <divy@chelsio.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/cxgb3/sge.c63
1 files changed, 61 insertions, 2 deletions
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 3f2cf8a07c61..822a598c0db1 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -105,6 +105,15 @@ struct unmap_info { /* packet unmapping info, overlays skb->cb */
105}; 105};
106 106
107/* 107/*
108 * Holds unmapping information for Tx packets that need deferred unmapping.
109 * This structure lives at skb->head and must be allocated by callers.
110 */
111struct deferred_unmap_info {
112 struct pci_dev *pdev;
113 dma_addr_t addr[MAX_SKB_FRAGS + 1];
114};
115
116/*
108 * Maps a number of flits to the number of Tx descriptors that can hold them. 117 * Maps a number of flits to the number of Tx descriptors that can hold them.
109 * The formula is 118 * The formula is
110 * 119 *
@@ -252,10 +261,13 @@ static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
252 struct pci_dev *pdev = adapter->pdev; 261 struct pci_dev *pdev = adapter->pdev;
253 unsigned int cidx = q->cidx; 262 unsigned int cidx = q->cidx;
254 263
264 const int need_unmap = need_skb_unmap() &&
265 q->cntxt_id >= FW_TUNNEL_SGEEC_START;
266
255 d = &q->sdesc[cidx]; 267 d = &q->sdesc[cidx];
256 while (n--) { 268 while (n--) {
257 if (d->skb) { /* an SGL is present */ 269 if (d->skb) { /* an SGL is present */
258 if (need_skb_unmap()) 270 if (need_unmap)
259 unmap_skb(d->skb, q, cidx, pdev); 271 unmap_skb(d->skb, q, cidx, pdev);
260 if (d->skb->priority == cidx) 272 if (d->skb->priority == cidx)
261 kfree_skb(d->skb); 273 kfree_skb(d->skb);
@@ -1227,6 +1239,50 @@ int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1227} 1239}
1228 1240
1229/** 1241/**
1242 * deferred_unmap_destructor - unmap a packet when it is freed
1243 * @skb: the packet
1244 *
1245 * This is the packet destructor used for Tx packets that need to remain
1246 * mapped until they are freed rather than until their Tx descriptors are
1247 * freed.
1248 */
1249static void deferred_unmap_destructor(struct sk_buff *skb)
1250{
1251 int i;
1252 const dma_addr_t *p;
1253 const struct skb_shared_info *si;
1254 const struct deferred_unmap_info *dui;
1255 const struct unmap_info *ui = (struct unmap_info *)skb->cb;
1256
1257 dui = (struct deferred_unmap_info *)skb->head;
1258 p = dui->addr;
1259
1260 if (ui->len)
1261 pci_unmap_single(dui->pdev, *p++, ui->len, PCI_DMA_TODEVICE);
1262
1263 si = skb_shinfo(skb);
1264 for (i = 0; i < si->nr_frags; i++)
1265 pci_unmap_page(dui->pdev, *p++, si->frags[i].size,
1266 PCI_DMA_TODEVICE);
1267}
1268
1269static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1270 const struct sg_ent *sgl, int sgl_flits)
1271{
1272 dma_addr_t *p;
1273 struct deferred_unmap_info *dui;
1274
1275 dui = (struct deferred_unmap_info *)skb->head;
1276 dui->pdev = pdev;
1277 for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
1278 *p++ = be64_to_cpu(sgl->addr[0]);
1279 *p++ = be64_to_cpu(sgl->addr[1]);
1280 }
1281 if (sgl_flits)
1282 *p = be64_to_cpu(sgl->addr[0]);
1283}
1284
1285/**
1230 * write_ofld_wr - write an offload work request 1286 * write_ofld_wr - write an offload work request
1231 * @adap: the adapter 1287 * @adap: the adapter
1232 * @skb: the packet to send 1288 * @skb: the packet to send
@@ -1262,8 +1318,11 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1262 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; 1318 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1263 sgl_flits = make_sgl(skb, sgp, skb->h.raw, skb->tail - skb->h.raw, 1319 sgl_flits = make_sgl(skb, sgp, skb->h.raw, skb->tail - skb->h.raw,
1264 adap->pdev); 1320 adap->pdev);
1265 if (need_skb_unmap()) 1321 if (need_skb_unmap()) {
1322 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1323 skb->destructor = deferred_unmap_destructor;
1266 ((struct unmap_info *)skb->cb)->len = skb->tail - skb->h.raw; 1324 ((struct unmap_info *)skb->cb)->len = skb->tail - skb->h.raw;
1325 }
1267 1326
1268 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, 1327 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1269 gen, from->wr_hi, from->wr_lo); 1328 gen, from->wr_hi, from->wr_lo);