diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2011-10-18 17:00:24 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-10-19 03:10:46 -0400 |
commit | 9e903e085262ffbf1fc44a17ac06058aca03524a (patch) | |
tree | 4acefc97ba38c1733474d25c0b2053b56af97db1 /drivers/net/ethernet/chelsio | |
parent | dd767856a36e00b631d65ebc4bb81b19915532d6 (diff) |
net: add skb frag size accessors
To ease skb->truesize sanitization, its better to be able to localize
all references to skb frags size.
Define accessors : skb_frag_size() to fetch frag size, and
skb_frag_size_{set|add|sub}() to manipulate it.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/chelsio')
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb/sge.c | 10 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb3/sge.c | 12 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/sge.c | 26 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4vf/sge.c | 26 |
4 files changed, 37 insertions, 37 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c index 0a511c4a0472..f9b602300040 100644 --- a/drivers/net/ethernet/chelsio/cxgb/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb/sge.c | |||
@@ -1135,8 +1135,8 @@ static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb) | |||
1135 | len -= SGE_TX_DESC_MAX_PLEN; | 1135 | len -= SGE_TX_DESC_MAX_PLEN; |
1136 | } | 1136 | } |
1137 | for (i = 0; nfrags--; i++) { | 1137 | for (i = 0; nfrags--; i++) { |
1138 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 1138 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
1139 | len = frag->size; | 1139 | len = skb_frag_size(frag); |
1140 | while (len > SGE_TX_DESC_MAX_PLEN) { | 1140 | while (len > SGE_TX_DESC_MAX_PLEN) { |
1141 | count++; | 1141 | count++; |
1142 | len -= SGE_TX_DESC_MAX_PLEN; | 1142 | len -= SGE_TX_DESC_MAX_PLEN; |
@@ -1278,9 +1278,9 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb, | |||
1278 | } | 1278 | } |
1279 | 1279 | ||
1280 | mapping = skb_frag_dma_map(&adapter->pdev->dev, frag, 0, | 1280 | mapping = skb_frag_dma_map(&adapter->pdev->dev, frag, 0, |
1281 | frag->size, DMA_TO_DEVICE); | 1281 | skb_frag_size(frag), DMA_TO_DEVICE); |
1282 | desc_mapping = mapping; | 1282 | desc_mapping = mapping; |
1283 | desc_len = frag->size; | 1283 | desc_len = skb_frag_size(frag); |
1284 | 1284 | ||
1285 | pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen, | 1285 | pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen, |
1286 | &desc_mapping, &desc_len, | 1286 | &desc_mapping, &desc_len, |
@@ -1290,7 +1290,7 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb, | |||
1290 | nfrags == 0); | 1290 | nfrags == 0); |
1291 | ce->skb = NULL; | 1291 | ce->skb = NULL; |
1292 | dma_unmap_addr_set(ce, dma_addr, mapping); | 1292 | dma_unmap_addr_set(ce, dma_addr, mapping); |
1293 | dma_unmap_len_set(ce, dma_len, frag->size); | 1293 | dma_unmap_len_set(ce, dma_len, skb_frag_size(frag)); |
1294 | } | 1294 | } |
1295 | ce->skb = skb; | 1295 | ce->skb = skb; |
1296 | wmb(); | 1296 | wmb(); |
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index 2f46b37e5d16..cfb60e1f51da 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c | |||
@@ -254,7 +254,7 @@ static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q, | |||
254 | 254 | ||
255 | while (frag_idx < nfrags && curflit < WR_FLITS) { | 255 | while (frag_idx < nfrags && curflit < WR_FLITS) { |
256 | pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]), | 256 | pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]), |
257 | skb_shinfo(skb)->frags[frag_idx].size, | 257 | skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]), |
258 | PCI_DMA_TODEVICE); | 258 | PCI_DMA_TODEVICE); |
259 | j ^= 1; | 259 | j ^= 1; |
260 | if (j == 0) { | 260 | if (j == 0) { |
@@ -977,11 +977,11 @@ static inline unsigned int make_sgl(const struct sk_buff *skb, | |||
977 | 977 | ||
978 | nfrags = skb_shinfo(skb)->nr_frags; | 978 | nfrags = skb_shinfo(skb)->nr_frags; |
979 | for (i = 0; i < nfrags; i++) { | 979 | for (i = 0; i < nfrags; i++) { |
980 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 980 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
981 | 981 | ||
982 | mapping = skb_frag_dma_map(&pdev->dev, frag, 0, frag->size, | 982 | mapping = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag), |
983 | DMA_TO_DEVICE); | 983 | DMA_TO_DEVICE); |
984 | sgp->len[j] = cpu_to_be32(frag->size); | 984 | sgp->len[j] = cpu_to_be32(skb_frag_size(frag)); |
985 | sgp->addr[j] = cpu_to_be64(mapping); | 985 | sgp->addr[j] = cpu_to_be64(mapping); |
986 | j ^= 1; | 986 | j ^= 1; |
987 | if (j == 0) | 987 | if (j == 0) |
@@ -1544,7 +1544,7 @@ static void deferred_unmap_destructor(struct sk_buff *skb) | |||
1544 | 1544 | ||
1545 | si = skb_shinfo(skb); | 1545 | si = skb_shinfo(skb); |
1546 | for (i = 0; i < si->nr_frags; i++) | 1546 | for (i = 0; i < si->nr_frags; i++) |
1547 | pci_unmap_page(dui->pdev, *p++, si->frags[i].size, | 1547 | pci_unmap_page(dui->pdev, *p++, skb_frag_size(&si->frags[i]), |
1548 | PCI_DMA_TODEVICE); | 1548 | PCI_DMA_TODEVICE); |
1549 | } | 1549 | } |
1550 | 1550 | ||
@@ -2118,7 +2118,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, | |||
2118 | rx_frag += nr_frags; | 2118 | rx_frag += nr_frags; |
2119 | __skb_frag_set_page(rx_frag, sd->pg_chunk.page); | 2119 | __skb_frag_set_page(rx_frag, sd->pg_chunk.page); |
2120 | rx_frag->page_offset = sd->pg_chunk.offset + offset; | 2120 | rx_frag->page_offset = sd->pg_chunk.offset + offset; |
2121 | rx_frag->size = len; | 2121 | skb_frag_size_set(rx_frag, len); |
2122 | 2122 | ||
2123 | skb->len += len; | 2123 | skb->len += len; |
2124 | skb->data_len += len; | 2124 | skb->data_len += len; |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 56adf448b9fe..14f31d3a18d7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c | |||
@@ -215,8 +215,8 @@ static int map_skb(struct device *dev, const struct sk_buff *skb, | |||
215 | end = &si->frags[si->nr_frags]; | 215 | end = &si->frags[si->nr_frags]; |
216 | 216 | ||
217 | for (fp = si->frags; fp < end; fp++) { | 217 | for (fp = si->frags; fp < end; fp++) { |
218 | *++addr = dma_map_page(dev, fp->page, fp->page_offset, fp->size, | 218 | *++addr = dma_map_page(dev, fp->page, fp->page_offset, |
219 | DMA_TO_DEVICE); | 219 | skb_frag_size(fp), DMA_TO_DEVICE); |
220 | if (dma_mapping_error(dev, *addr)) | 220 | if (dma_mapping_error(dev, *addr)) |
221 | goto unwind; | 221 | goto unwind; |
222 | } | 222 | } |
@@ -224,7 +224,7 @@ static int map_skb(struct device *dev, const struct sk_buff *skb, | |||
224 | 224 | ||
225 | unwind: | 225 | unwind: |
226 | while (fp-- > si->frags) | 226 | while (fp-- > si->frags) |
227 | dma_unmap_page(dev, *--addr, fp->size, DMA_TO_DEVICE); | 227 | dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE); |
228 | 228 | ||
229 | dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE); | 229 | dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE); |
230 | out_err: | 230 | out_err: |
@@ -243,7 +243,7 @@ static void unmap_skb(struct device *dev, const struct sk_buff *skb, | |||
243 | si = skb_shinfo(skb); | 243 | si = skb_shinfo(skb); |
244 | end = &si->frags[si->nr_frags]; | 244 | end = &si->frags[si->nr_frags]; |
245 | for (fp = si->frags; fp < end; fp++) | 245 | for (fp = si->frags; fp < end; fp++) |
246 | dma_unmap_page(dev, *addr++, fp->size, DMA_TO_DEVICE); | 246 | dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE); |
247 | } | 247 | } |
248 | 248 | ||
249 | /** | 249 | /** |
@@ -717,7 +717,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q, | |||
717 | sgl->addr0 = cpu_to_be64(addr[0] + start); | 717 | sgl->addr0 = cpu_to_be64(addr[0] + start); |
718 | nfrags++; | 718 | nfrags++; |
719 | } else { | 719 | } else { |
720 | sgl->len0 = htonl(si->frags[0].size); | 720 | sgl->len0 = htonl(skb_frag_size(&si->frags[0])); |
721 | sgl->addr0 = cpu_to_be64(addr[1]); | 721 | sgl->addr0 = cpu_to_be64(addr[1]); |
722 | } | 722 | } |
723 | 723 | ||
@@ -732,13 +732,13 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q, | |||
732 | to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge; | 732 | to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge; |
733 | 733 | ||
734 | for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) { | 734 | for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) { |
735 | to->len[0] = cpu_to_be32(si->frags[i].size); | 735 | to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); |
736 | to->len[1] = cpu_to_be32(si->frags[++i].size); | 736 | to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i])); |
737 | to->addr[0] = cpu_to_be64(addr[i]); | 737 | to->addr[0] = cpu_to_be64(addr[i]); |
738 | to->addr[1] = cpu_to_be64(addr[++i]); | 738 | to->addr[1] = cpu_to_be64(addr[++i]); |
739 | } | 739 | } |
740 | if (nfrags) { | 740 | if (nfrags) { |
741 | to->len[0] = cpu_to_be32(si->frags[i].size); | 741 | to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); |
742 | to->len[1] = cpu_to_be32(0); | 742 | to->len[1] = cpu_to_be32(0); |
743 | to->addr[0] = cpu_to_be64(addr[i + 1]); | 743 | to->addr[0] = cpu_to_be64(addr[i + 1]); |
744 | } | 744 | } |
@@ -1417,7 +1417,7 @@ static inline void copy_frags(struct skb_shared_info *ssi, | |||
1417 | /* usually there's just one frag */ | 1417 | /* usually there's just one frag */ |
1418 | ssi->frags[0].page = gl->frags[0].page; | 1418 | ssi->frags[0].page = gl->frags[0].page; |
1419 | ssi->frags[0].page_offset = gl->frags[0].page_offset + offset; | 1419 | ssi->frags[0].page_offset = gl->frags[0].page_offset + offset; |
1420 | ssi->frags[0].size = gl->frags[0].size - offset; | 1420 | skb_frag_size_set(&ssi->frags[0], skb_frag_size(&gl->frags[0]) - offset); |
1421 | ssi->nr_frags = gl->nfrags; | 1421 | ssi->nr_frags = gl->nfrags; |
1422 | n = gl->nfrags - 1; | 1422 | n = gl->nfrags - 1; |
1423 | if (n) | 1423 | if (n) |
@@ -1718,8 +1718,8 @@ static int process_responses(struct sge_rspq *q, int budget) | |||
1718 | bufsz = get_buf_size(rsd); | 1718 | bufsz = get_buf_size(rsd); |
1719 | fp->page = rsd->page; | 1719 | fp->page = rsd->page; |
1720 | fp->page_offset = q->offset; | 1720 | fp->page_offset = q->offset; |
1721 | fp->size = min(bufsz, len); | 1721 | skb_frag_size_set(fp, min(bufsz, len)); |
1722 | len -= fp->size; | 1722 | len -= skb_frag_size(fp); |
1723 | if (!len) | 1723 | if (!len) |
1724 | break; | 1724 | break; |
1725 | unmap_rx_buf(q->adap, &rxq->fl); | 1725 | unmap_rx_buf(q->adap, &rxq->fl); |
@@ -1731,7 +1731,7 @@ static int process_responses(struct sge_rspq *q, int budget) | |||
1731 | */ | 1731 | */ |
1732 | dma_sync_single_for_cpu(q->adap->pdev_dev, | 1732 | dma_sync_single_for_cpu(q->adap->pdev_dev, |
1733 | get_buf_addr(rsd), | 1733 | get_buf_addr(rsd), |
1734 | fp->size, DMA_FROM_DEVICE); | 1734 | skb_frag_size(fp), DMA_FROM_DEVICE); |
1735 | 1735 | ||
1736 | si.va = page_address(si.frags[0].page) + | 1736 | si.va = page_address(si.frags[0].page) + |
1737 | si.frags[0].page_offset; | 1737 | si.frags[0].page_offset; |
@@ -1740,7 +1740,7 @@ static int process_responses(struct sge_rspq *q, int budget) | |||
1740 | si.nfrags = frags + 1; | 1740 | si.nfrags = frags + 1; |
1741 | ret = q->handler(q, q->cur_desc, &si); | 1741 | ret = q->handler(q, q->cur_desc, &si); |
1742 | if (likely(ret == 0)) | 1742 | if (likely(ret == 0)) |
1743 | q->offset += ALIGN(fp->size, FL_ALIGN); | 1743 | q->offset += ALIGN(skb_frag_size(fp), FL_ALIGN); |
1744 | else | 1744 | else |
1745 | restore_rx_bufs(&si, &rxq->fl, frags); | 1745 | restore_rx_bufs(&si, &rxq->fl, frags); |
1746 | } else if (likely(rsp_type == RSP_TYPE_CPL)) { | 1746 | } else if (likely(rsp_type == RSP_TYPE_CPL)) { |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index cffb328c46c3..c2d456d90c00 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c | |||
@@ -296,8 +296,8 @@ static int map_skb(struct device *dev, const struct sk_buff *skb, | |||
296 | si = skb_shinfo(skb); | 296 | si = skb_shinfo(skb); |
297 | end = &si->frags[si->nr_frags]; | 297 | end = &si->frags[si->nr_frags]; |
298 | for (fp = si->frags; fp < end; fp++) { | 298 | for (fp = si->frags; fp < end; fp++) { |
299 | *++addr = dma_map_page(dev, fp->page, fp->page_offset, fp->size, | 299 | *++addr = dma_map_page(dev, fp->page, fp->page_offset, |
300 | DMA_TO_DEVICE); | 300 | skb_frag_size(fp), DMA_TO_DEVICE); |
301 | if (dma_mapping_error(dev, *addr)) | 301 | if (dma_mapping_error(dev, *addr)) |
302 | goto unwind; | 302 | goto unwind; |
303 | } | 303 | } |
@@ -305,7 +305,7 @@ static int map_skb(struct device *dev, const struct sk_buff *skb, | |||
305 | 305 | ||
306 | unwind: | 306 | unwind: |
307 | while (fp-- > si->frags) | 307 | while (fp-- > si->frags) |
308 | dma_unmap_page(dev, *--addr, fp->size, DMA_TO_DEVICE); | 308 | dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE); |
309 | dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE); | 309 | dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE); |
310 | 310 | ||
311 | out_err: | 311 | out_err: |
@@ -899,7 +899,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq, | |||
899 | sgl->addr0 = cpu_to_be64(addr[0] + start); | 899 | sgl->addr0 = cpu_to_be64(addr[0] + start); |
900 | nfrags++; | 900 | nfrags++; |
901 | } else { | 901 | } else { |
902 | sgl->len0 = htonl(si->frags[0].size); | 902 | sgl->len0 = htonl(skb_frag_size(&si->frags[0])); |
903 | sgl->addr0 = cpu_to_be64(addr[1]); | 903 | sgl->addr0 = cpu_to_be64(addr[1]); |
904 | } | 904 | } |
905 | 905 | ||
@@ -915,13 +915,13 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq, | |||
915 | to = (u8 *)end > (u8 *)tq->stat ? buf : sgl->sge; | 915 | to = (u8 *)end > (u8 *)tq->stat ? buf : sgl->sge; |
916 | 916 | ||
917 | for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) { | 917 | for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) { |
918 | to->len[0] = cpu_to_be32(si->frags[i].size); | 918 | to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); |
919 | to->len[1] = cpu_to_be32(si->frags[++i].size); | 919 | to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i])); |
920 | to->addr[0] = cpu_to_be64(addr[i]); | 920 | to->addr[0] = cpu_to_be64(addr[i]); |
921 | to->addr[1] = cpu_to_be64(addr[++i]); | 921 | to->addr[1] = cpu_to_be64(addr[++i]); |
922 | } | 922 | } |
923 | if (nfrags) { | 923 | if (nfrags) { |
924 | to->len[0] = cpu_to_be32(si->frags[i].size); | 924 | to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); |
925 | to->len[1] = cpu_to_be32(0); | 925 | to->len[1] = cpu_to_be32(0); |
926 | to->addr[0] = cpu_to_be64(addr[i + 1]); | 926 | to->addr[0] = cpu_to_be64(addr[i + 1]); |
927 | } | 927 | } |
@@ -1399,7 +1399,7 @@ struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl, | |||
1399 | ssi = skb_shinfo(skb); | 1399 | ssi = skb_shinfo(skb); |
1400 | ssi->frags[0].page = gl->frags[0].page; | 1400 | ssi->frags[0].page = gl->frags[0].page; |
1401 | ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len; | 1401 | ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len; |
1402 | ssi->frags[0].size = gl->frags[0].size - pull_len; | 1402 | skb_frag_size_set(&ssi->frags[0], skb_frag_size(&gl->frags[0]) - pull_len); |
1403 | if (gl->nfrags > 1) | 1403 | if (gl->nfrags > 1) |
1404 | memcpy(&ssi->frags[1], &gl->frags[1], | 1404 | memcpy(&ssi->frags[1], &gl->frags[1], |
1405 | (gl->nfrags-1) * sizeof(skb_frag_t)); | 1405 | (gl->nfrags-1) * sizeof(skb_frag_t)); |
@@ -1451,7 +1451,7 @@ static inline void copy_frags(struct skb_shared_info *si, | |||
1451 | /* usually there's just one frag */ | 1451 | /* usually there's just one frag */ |
1452 | si->frags[0].page = gl->frags[0].page; | 1452 | si->frags[0].page = gl->frags[0].page; |
1453 | si->frags[0].page_offset = gl->frags[0].page_offset + offset; | 1453 | si->frags[0].page_offset = gl->frags[0].page_offset + offset; |
1454 | si->frags[0].size = gl->frags[0].size - offset; | 1454 | skb_frag_size_set(&si->frags[0], skb_frag_size(&gl->frags[0]) - offset); |
1455 | si->nr_frags = gl->nfrags; | 1455 | si->nr_frags = gl->nfrags; |
1456 | 1456 | ||
1457 | n = gl->nfrags - 1; | 1457 | n = gl->nfrags - 1; |
@@ -1702,8 +1702,8 @@ int process_responses(struct sge_rspq *rspq, int budget) | |||
1702 | bufsz = get_buf_size(sdesc); | 1702 | bufsz = get_buf_size(sdesc); |
1703 | fp->page = sdesc->page; | 1703 | fp->page = sdesc->page; |
1704 | fp->page_offset = rspq->offset; | 1704 | fp->page_offset = rspq->offset; |
1705 | fp->size = min(bufsz, len); | 1705 | skb_frag_size_set(fp, min(bufsz, len)); |
1706 | len -= fp->size; | 1706 | len -= skb_frag_size(fp); |
1707 | if (!len) | 1707 | if (!len) |
1708 | break; | 1708 | break; |
1709 | unmap_rx_buf(rspq->adapter, &rxq->fl); | 1709 | unmap_rx_buf(rspq->adapter, &rxq->fl); |
@@ -1717,7 +1717,7 @@ int process_responses(struct sge_rspq *rspq, int budget) | |||
1717 | */ | 1717 | */ |
1718 | dma_sync_single_for_cpu(rspq->adapter->pdev_dev, | 1718 | dma_sync_single_for_cpu(rspq->adapter->pdev_dev, |
1719 | get_buf_addr(sdesc), | 1719 | get_buf_addr(sdesc), |
1720 | fp->size, DMA_FROM_DEVICE); | 1720 | skb_frag_size(fp), DMA_FROM_DEVICE); |
1721 | gl.va = (page_address(gl.frags[0].page) + | 1721 | gl.va = (page_address(gl.frags[0].page) + |
1722 | gl.frags[0].page_offset); | 1722 | gl.frags[0].page_offset); |
1723 | prefetch(gl.va); | 1723 | prefetch(gl.va); |
@@ -1728,7 +1728,7 @@ int process_responses(struct sge_rspq *rspq, int budget) | |||
1728 | */ | 1728 | */ |
1729 | ret = rspq->handler(rspq, rspq->cur_desc, &gl); | 1729 | ret = rspq->handler(rspq, rspq->cur_desc, &gl); |
1730 | if (likely(ret == 0)) | 1730 | if (likely(ret == 0)) |
1731 | rspq->offset += ALIGN(fp->size, FL_ALIGN); | 1731 | rspq->offset += ALIGN(skb_frag_size(fp), FL_ALIGN); |
1732 | else | 1732 | else |
1733 | restore_rx_bufs(&gl, &rxq->fl, frag); | 1733 | restore_rx_bufs(&gl, &rxq->fl, frag); |
1734 | } else if (likely(rsp_type == RSP_TYPE_CPL)) { | 1734 | } else if (likely(rsp_type == RSP_TYPE_CPL)) { |