aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRoss Lagerwall <ross.lagerwall@citrix.com>2016-10-04 05:29:18 -0400
committerDavid S. Miller <davem@davemloft.net>2016-10-06 20:37:36 -0400
commit2167ca029c2449018314fdf8637c1eb3f123036e (patch)
tree1abce754bca4f063d9f54040b8cd3c4188b81f12
parenta37f12298c251a48bc74d4012e07bf0d78175f46 (diff)
xen/netback: add fraglist support for to-guest rx
This allows full 64K skbuffs (with 1500 mtu ethernet, composed of 45 fragments) to be handled by netback for to-guest rx. Signed-off-by: Ross Lagerwall <ross.lagerwall@citrix.com> [re-based] Signed-off-by: Paul Durrant <paul.durrant@citrix.com> Reviewed-by: David Vrabel <david.vrabel@citrix.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/xen-netback/interface.c2
-rw-r--r--drivers/net/xen-netback/rx.c38
2 files changed, 30 insertions, 10 deletions
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 211d542a830b..4af532a67d95 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -467,7 +467,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
467 dev->netdev_ops = &xenvif_netdev_ops; 467 dev->netdev_ops = &xenvif_netdev_ops;
468 dev->hw_features = NETIF_F_SG | 468 dev->hw_features = NETIF_F_SG |
469 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 469 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
470 NETIF_F_TSO | NETIF_F_TSO6; 470 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_FRAGLIST;
471 dev->features = dev->hw_features | NETIF_F_RXCSUM; 471 dev->features = dev->hw_features | NETIF_F_RXCSUM;
472 dev->ethtool_ops = &xenvif_ethtool_ops; 472 dev->ethtool_ops = &xenvif_ethtool_ops;
473 473
diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c
index 8c8c5b5883eb..8e9ade6ccf18 100644
--- a/drivers/net/xen-netback/rx.c
+++ b/drivers/net/xen-netback/rx.c
@@ -215,7 +215,8 @@ static unsigned int xenvif_gso_type(struct sk_buff *skb)
215struct xenvif_pkt_state { 215struct xenvif_pkt_state {
216 struct sk_buff *skb; 216 struct sk_buff *skb;
217 size_t remaining_len; 217 size_t remaining_len;
218 int frag; /* frag == -1 => skb->head */ 218 struct sk_buff *frag_iter;
219 int frag; /* frag == -1 => frag_iter->head */
219 unsigned int frag_offset; 220 unsigned int frag_offset;
220 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; 221 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
221 unsigned int extra_count; 222 unsigned int extra_count;
@@ -237,6 +238,7 @@ static void xenvif_rx_next_skb(struct xenvif_queue *queue,
237 memset(pkt, 0, sizeof(struct xenvif_pkt_state)); 238 memset(pkt, 0, sizeof(struct xenvif_pkt_state));
238 239
239 pkt->skb = skb; 240 pkt->skb = skb;
241 pkt->frag_iter = skb;
240 pkt->remaining_len = skb->len; 242 pkt->remaining_len = skb->len;
241 pkt->frag = -1; 243 pkt->frag = -1;
242 244
@@ -293,20 +295,40 @@ static void xenvif_rx_complete(struct xenvif_queue *queue,
293 __skb_queue_tail(queue->rx_copy.completed, pkt->skb); 295 __skb_queue_tail(queue->rx_copy.completed, pkt->skb);
294} 296}
295 297
298static void xenvif_rx_next_frag(struct xenvif_pkt_state *pkt)
299{
300 struct sk_buff *frag_iter = pkt->frag_iter;
301 unsigned int nr_frags = skb_shinfo(frag_iter)->nr_frags;
302
303 pkt->frag++;
304 pkt->frag_offset = 0;
305
306 if (pkt->frag >= nr_frags) {
307 if (frag_iter == pkt->skb)
308 pkt->frag_iter = skb_shinfo(frag_iter)->frag_list;
309 else
310 pkt->frag_iter = frag_iter->next;
311
312 pkt->frag = -1;
313 }
314}
315
296static void xenvif_rx_next_chunk(struct xenvif_queue *queue, 316static void xenvif_rx_next_chunk(struct xenvif_queue *queue,
297 struct xenvif_pkt_state *pkt, 317 struct xenvif_pkt_state *pkt,
298 unsigned int offset, void **data, 318 unsigned int offset, void **data,
299 size_t *len) 319 size_t *len)
300{ 320{
301 struct sk_buff *skb = pkt->skb; 321 struct sk_buff *frag_iter = pkt->frag_iter;
302 void *frag_data; 322 void *frag_data;
303 size_t frag_len, chunk_len; 323 size_t frag_len, chunk_len;
304 324
325 BUG_ON(!frag_iter);
326
305 if (pkt->frag == -1) { 327 if (pkt->frag == -1) {
306 frag_data = skb->data; 328 frag_data = frag_iter->data;
307 frag_len = skb_headlen(skb); 329 frag_len = skb_headlen(frag_iter);
308 } else { 330 } else {
309 skb_frag_t *frag = &skb_shinfo(skb)->frags[pkt->frag]; 331 skb_frag_t *frag = &skb_shinfo(frag_iter)->frags[pkt->frag];
310 332
311 frag_data = skb_frag_address(frag); 333 frag_data = skb_frag_address(frag);
312 frag_len = skb_frag_size(frag); 334 frag_len = skb_frag_size(frag);
@@ -322,10 +344,8 @@ static void xenvif_rx_next_chunk(struct xenvif_queue *queue,
322 pkt->frag_offset += chunk_len; 344 pkt->frag_offset += chunk_len;
323 345
324 /* Advance to next frag? */ 346 /* Advance to next frag? */
325 if (frag_len == chunk_len) { 347 if (frag_len == chunk_len)
326 pkt->frag++; 348 xenvif_rx_next_frag(pkt);
327 pkt->frag_offset = 0;
328 }
329 349
330 *data = frag_data; 350 *data = frag_data;
331 *len = chunk_len; 351 *len = chunk_len;