aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/netback.c
diff options
context:
space:
mode:
authorIgor Druzhinin <igor.druzhinin@citrix.com>2019-02-28 07:48:03 -0500
committerDavid S. Miller <davem@davemloft.net>2019-02-28 13:36:38 -0500
commit99e87f56b48f490fb16b6e0f74691c1e664dea95 (patch)
tree6fb6efe46efded920b4627389f47f9dfc7b72ad9 /drivers/net/xen-netback/netback.c
parentac5105052dc8be5cef34d79e1f4186d39b2f3ca3 (diff)
xen-netback: fix occasional leak of grant ref mappings under memory pressure
Zero-copy callback flag is not yet set on frag list skb at the moment xenvif_handle_frag_list() returns -ENOMEM. This eventually results in leaking grant ref mappings since xenvif_zerocopy_callback() is never called for these fragments. Those eventually build up and cause Xen to kill Dom0 as the slots get reused for new mappings: "d0v0 Attempt to implicitly unmap a granted PTE c010000329fce005" That behavior is observed under certain workloads where sudden spikes of page cache writes coexist with active atomic skb allocations from network traffic. Additionally, rework the logic to deal with frag_list deallocation in a single place. Signed-off-by: Paul Durrant <paul.durrant@citrix.com> Signed-off-by: Igor Druzhinin <igor.druzhinin@citrix.com> Acked-by: Wei Liu <wei.liu2@citrix.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/xen-netback/netback.c')
-rw-r--r--drivers/net/xen-netback/netback.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 80aae3a32c2a..f09948b009dd 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1072,11 +1072,6 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
1072 skb_frag_size_set(&frags[i], len); 1072 skb_frag_size_set(&frags[i], len);
1073 } 1073 }
1074 1074
1075 /* Copied all the bits from the frag list -- free it. */
1076 skb_frag_list_init(skb);
1077 xenvif_skb_zerocopy_prepare(queue, nskb);
1078 kfree_skb(nskb);
1079
1080 /* Release all the original (foreign) frags. */ 1075 /* Release all the original (foreign) frags. */
1081 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 1076 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1082 skb_frag_unref(skb, f); 1077 skb_frag_unref(skb, f);
@@ -1145,6 +1140,8 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
1145 xenvif_fill_frags(queue, skb); 1140 xenvif_fill_frags(queue, skb);
1146 1141
1147 if (unlikely(skb_has_frag_list(skb))) { 1142 if (unlikely(skb_has_frag_list(skb))) {
1143 struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1144 xenvif_skb_zerocopy_prepare(queue, nskb);
1148 if (xenvif_handle_frag_list(queue, skb)) { 1145 if (xenvif_handle_frag_list(queue, skb)) {
1149 if (net_ratelimit()) 1146 if (net_ratelimit())
1150 netdev_err(queue->vif->dev, 1147 netdev_err(queue->vif->dev,
@@ -1153,6 +1150,9 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
1153 kfree_skb(skb); 1150 kfree_skb(skb);
1154 continue; 1151 continue;
1155 } 1152 }
1153 /* Copied all the bits from the frag list -- free it. */
1154 skb_frag_list_init(skb);
1155 kfree_skb(nskb);
1156 } 1156 }
1157 1157
1158 skb->dev = queue->vif->dev; 1158 skb->dev = queue->vif->dev;