aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/xen-netback/netback.c26
1 files changed, 12 insertions, 14 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 45755f9aa3f9..4a509f715fe8 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -82,6 +82,16 @@ MODULE_PARM_DESC(max_queues,
82static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT; 82static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
83module_param(fatal_skb_slots, uint, 0444); 83module_param(fatal_skb_slots, uint, 0444);
84 84
85/* The amount to copy out of the first guest Tx slot into the skb's
86 * linear area. If the first slot has more data, it will be mapped
87 * and put into the first frag.
88 *
89 * This is sized to avoid pulling headers from the frags for most
90 * TCP/IP packets.
91 */
92#define XEN_NETBACK_TX_COPY_LEN 128
93
94
85static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, 95static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
86 u8 status); 96 u8 status);
87 97
@@ -125,13 +135,6 @@ static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf)
125 pending_tx_info[0]); 135 pending_tx_info[0]);
126} 136}
127 137
128/* This is a miniumum size for the linear area to avoid lots of
129 * calls to __pskb_pull_tail() as we set up checksum offsets. The
130 * value 128 was chosen as it covers all IPv4 and most likely
131 * IPv6 headers.
132 */
133#define PKT_PROT_LEN 128
134
135static u16 frag_get_pending_idx(skb_frag_t *frag) 138static u16 frag_get_pending_idx(skb_frag_t *frag)
136{ 139{
137 return (u16)frag->page_offset; 140 return (u16)frag->page_offset;
@@ -1446,9 +1449,9 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1446 index = pending_index(queue->pending_cons); 1449 index = pending_index(queue->pending_cons);
1447 pending_idx = queue->pending_ring[index]; 1450 pending_idx = queue->pending_ring[index];
1448 1451
1449 data_len = (txreq.size > PKT_PROT_LEN && 1452 data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN &&
1450 ret < XEN_NETBK_LEGACY_SLOTS_MAX) ? 1453 ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
1451 PKT_PROT_LEN : txreq.size; 1454 XEN_NETBACK_TX_COPY_LEN : txreq.size;
1452 1455
1453 skb = xenvif_alloc_skb(data_len); 1456 skb = xenvif_alloc_skb(data_len);
1454 if (unlikely(skb == NULL)) { 1457 if (unlikely(skb == NULL)) {
@@ -1653,11 +1656,6 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
1653 } 1656 }
1654 } 1657 }
1655 1658
1656 if (skb_is_nonlinear(skb) && skb_headlen(skb) < PKT_PROT_LEN) {
1657 int target = min_t(int, skb->len, PKT_PROT_LEN);
1658 __pskb_pull_tail(skb, target - skb_headlen(skb));
1659 }
1660
1661 skb->dev = queue->vif->dev; 1659 skb->dev = queue->vif->dev;
1662 skb->protocol = eth_type_trans(skb, skb->dev); 1660 skb->protocol = eth_type_trans(skb, skb->dev);
1663 skb_reset_network_header(skb); 1661 skb_reset_network_header(skb);