aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/emulex
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2011-10-18 17:00:24 -0400
committerDavid S. Miller <davem@davemloft.net>2011-10-19 03:10:46 -0400
commit9e903e085262ffbf1fc44a17ac06058aca03524a (patch)
tree4acefc97ba38c1733474d25c0b2053b56af97db1 /drivers/net/ethernet/emulex
parentdd767856a36e00b631d65ebc4bb81b19915532d6 (diff)
net: add skb frag size accessors
To ease skb->truesize sanitization, its better to be able to localize all references to skb frags size. Define accessors : skb_frag_size() to fetch frag size, and skb_frag_size_{set|add|sub}() to manipulate it. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/emulex')
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 679b8041e43a..706fc5989939 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -636,17 +636,17 @@ static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
636 } 636 }
637 637
638 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 638 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
639 struct skb_frag_struct *frag = 639 const struct skb_frag_struct *frag =
640 &skb_shinfo(skb)->frags[i]; 640 &skb_shinfo(skb)->frags[i];
641 busaddr = skb_frag_dma_map(dev, frag, 0, 641 busaddr = skb_frag_dma_map(dev, frag, 0,
642 frag->size, DMA_TO_DEVICE); 642 skb_frag_size(frag), DMA_TO_DEVICE);
643 if (dma_mapping_error(dev, busaddr)) 643 if (dma_mapping_error(dev, busaddr))
644 goto dma_err; 644 goto dma_err;
645 wrb = queue_head_node(txq); 645 wrb = queue_head_node(txq);
646 wrb_fill(wrb, busaddr, frag->size); 646 wrb_fill(wrb, busaddr, skb_frag_size(frag));
647 be_dws_cpu_to_le(wrb, sizeof(*wrb)); 647 be_dws_cpu_to_le(wrb, sizeof(*wrb));
648 queue_head_inc(txq); 648 queue_head_inc(txq);
649 copied += frag->size; 649 copied += skb_frag_size(frag);
650 } 650 }
651 651
652 if (dummy_wrb) { 652 if (dummy_wrb) {
@@ -1069,7 +1069,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
1069 skb_frag_set_page(skb, 0, page_info->page); 1069 skb_frag_set_page(skb, 0, page_info->page);
1070 skb_shinfo(skb)->frags[0].page_offset = 1070 skb_shinfo(skb)->frags[0].page_offset =
1071 page_info->page_offset + hdr_len; 1071 page_info->page_offset + hdr_len;
1072 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len; 1072 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1073 skb->data_len = curr_frag_len - hdr_len; 1073 skb->data_len = curr_frag_len - hdr_len;
1074 skb->truesize += rx_frag_size; 1074 skb->truesize += rx_frag_size;
1075 skb->tail += hdr_len; 1075 skb->tail += hdr_len;
@@ -1095,13 +1095,13 @@ static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
1095 skb_frag_set_page(skb, j, page_info->page); 1095 skb_frag_set_page(skb, j, page_info->page);
1096 skb_shinfo(skb)->frags[j].page_offset = 1096 skb_shinfo(skb)->frags[j].page_offset =
1097 page_info->page_offset; 1097 page_info->page_offset;
1098 skb_shinfo(skb)->frags[j].size = 0; 1098 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1099 skb_shinfo(skb)->nr_frags++; 1099 skb_shinfo(skb)->nr_frags++;
1100 } else { 1100 } else {
1101 put_page(page_info->page); 1101 put_page(page_info->page);
1102 } 1102 }
1103 1103
1104 skb_shinfo(skb)->frags[j].size += curr_frag_len; 1104 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1105 skb->len += curr_frag_len; 1105 skb->len += curr_frag_len;
1106 skb->data_len += curr_frag_len; 1106 skb->data_len += curr_frag_len;
1107 skb->truesize += rx_frag_size; 1107 skb->truesize += rx_frag_size;
@@ -1176,11 +1176,11 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
1176 skb_frag_set_page(skb, j, page_info->page); 1176 skb_frag_set_page(skb, j, page_info->page);
1177 skb_shinfo(skb)->frags[j].page_offset = 1177 skb_shinfo(skb)->frags[j].page_offset =
1178 page_info->page_offset; 1178 page_info->page_offset;
1179 skb_shinfo(skb)->frags[j].size = 0; 1179 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1180 } else { 1180 } else {
1181 put_page(page_info->page); 1181 put_page(page_info->page);
1182 } 1182 }
1183 skb_shinfo(skb)->frags[j].size += curr_frag_len; 1183 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1184 skb->truesize += rx_frag_size; 1184 skb->truesize += rx_frag_size;
1185 remaining -= curr_frag_len; 1185 remaining -= curr_frag_len;
1186 index_inc(&rxcp->rxq_idx, rxq->len); 1186 index_inc(&rxcp->rxq_idx, rxq->len);