aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatthew Daley <mattjd@gmail.com>2013-02-06 18:41:36 -0500
committerDavid S. Miller <davem@davemloft.net>2013-02-07 23:29:28 -0500
commit7d5145d8eb2b9791533ffe4dc003b129b9696c48 (patch)
treeeb538cbb2a3d76ae2085c22bf6bc5804ab284e5c
parent48856286b64e4b66ec62b94e504d0b29c1ade664 (diff)
xen/netback: don't leak pages on failure in xen_netbk_tx_check_gop.
Signed-off-by: Matthew Daley <mattjd@gmail.com> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Acked-by: Ian Campbell <ian.campbell@citrix.com> Acked-by: Jan Beulich <JBeulich@suse.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/xen-netback/netback.c38
1 files changed, 13 insertions, 25 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index c2e3336b4f98..bf692df966a9 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -147,7 +147,8 @@ void xen_netbk_remove_xenvif(struct xenvif *vif)
147 atomic_dec(&netbk->netfront_count); 147 atomic_dec(&netbk->netfront_count);
148} 148}
149 149
150static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx); 150static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
151 u8 status);
151static void make_tx_response(struct xenvif *vif, 152static void make_tx_response(struct xenvif *vif,
152 struct xen_netif_tx_request *txp, 153 struct xen_netif_tx_request *txp,
153 s8 st); 154 s8 st);
@@ -1007,30 +1008,20 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
1007{ 1008{
1008 struct gnttab_copy *gop = *gopp; 1009 struct gnttab_copy *gop = *gopp;
1009 u16 pending_idx = *((u16 *)skb->data); 1010 u16 pending_idx = *((u16 *)skb->data);
1010 struct pending_tx_info *pending_tx_info = netbk->pending_tx_info;
1011 struct xenvif *vif = pending_tx_info[pending_idx].vif;
1012 struct xen_netif_tx_request *txp;
1013 struct skb_shared_info *shinfo = skb_shinfo(skb); 1011 struct skb_shared_info *shinfo = skb_shinfo(skb);
1014 int nr_frags = shinfo->nr_frags; 1012 int nr_frags = shinfo->nr_frags;
1015 int i, err, start; 1013 int i, err, start;
1016 1014
1017 /* Check status of header. */ 1015 /* Check status of header. */
1018 err = gop->status; 1016 err = gop->status;
1019 if (unlikely(err)) { 1017 if (unlikely(err))
1020 pending_ring_idx_t index; 1018 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
1021 index = pending_index(netbk->pending_prod++);
1022 txp = &pending_tx_info[pending_idx].req;
1023 make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
1024 netbk->pending_ring[index] = pending_idx;
1025 xenvif_put(vif);
1026 }
1027 1019
1028 /* Skip first skb fragment if it is on same page as header fragment. */ 1020 /* Skip first skb fragment if it is on same page as header fragment. */
1029 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); 1021 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
1030 1022
1031 for (i = start; i < nr_frags; i++) { 1023 for (i = start; i < nr_frags; i++) {
1032 int j, newerr; 1024 int j, newerr;
1033 pending_ring_idx_t index;
1034 1025
1035 pending_idx = frag_get_pending_idx(&shinfo->frags[i]); 1026 pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
1036 1027
@@ -1039,16 +1030,12 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
1039 if (likely(!newerr)) { 1030 if (likely(!newerr)) {
1040 /* Had a previous error? Invalidate this fragment. */ 1031 /* Had a previous error? Invalidate this fragment. */
1041 if (unlikely(err)) 1032 if (unlikely(err))
1042 xen_netbk_idx_release(netbk, pending_idx); 1033 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1043 continue; 1034 continue;
1044 } 1035 }
1045 1036
1046 /* Error on this fragment: respond to client with an error. */ 1037 /* Error on this fragment: respond to client with an error. */
1047 txp = &netbk->pending_tx_info[pending_idx].req; 1038 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
1048 make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
1049 index = pending_index(netbk->pending_prod++);
1050 netbk->pending_ring[index] = pending_idx;
1051 xenvif_put(vif);
1052 1039
1053 /* Not the first error? Preceding frags already invalidated. */ 1040 /* Not the first error? Preceding frags already invalidated. */
1054 if (err) 1041 if (err)
@@ -1056,10 +1043,10 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
1056 1043
1057 /* First error: invalidate header and preceding fragments. */ 1044 /* First error: invalidate header and preceding fragments. */
1058 pending_idx = *((u16 *)skb->data); 1045 pending_idx = *((u16 *)skb->data);
1059 xen_netbk_idx_release(netbk, pending_idx); 1046 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1060 for (j = start; j < i; j++) { 1047 for (j = start; j < i; j++) {
1061 pending_idx = frag_get_pending_idx(&shinfo->frags[j]); 1048 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1062 xen_netbk_idx_release(netbk, pending_idx); 1049 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1063 } 1050 }
1064 1051
1065 /* Remember the error: invalidate all subsequent fragments. */ 1052 /* Remember the error: invalidate all subsequent fragments. */
@@ -1093,7 +1080,7 @@ static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
1093 1080
1094 /* Take an extra reference to offset xen_netbk_idx_release */ 1081 /* Take an extra reference to offset xen_netbk_idx_release */
1095 get_page(netbk->mmap_pages[pending_idx]); 1082 get_page(netbk->mmap_pages[pending_idx]);
1096 xen_netbk_idx_release(netbk, pending_idx); 1083 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1097 } 1084 }
1098} 1085}
1099 1086
@@ -1476,7 +1463,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
1476 txp->size -= data_len; 1463 txp->size -= data_len;
1477 } else { 1464 } else {
1478 /* Schedule a response immediately. */ 1465 /* Schedule a response immediately. */
1479 xen_netbk_idx_release(netbk, pending_idx); 1466 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1480 } 1467 }
1481 1468
1482 if (txp->flags & XEN_NETTXF_csum_blank) 1469 if (txp->flags & XEN_NETTXF_csum_blank)
@@ -1528,7 +1515,8 @@ static void xen_netbk_tx_action(struct xen_netbk *netbk)
1528 xen_netbk_tx_submit(netbk); 1515 xen_netbk_tx_submit(netbk);
1529} 1516}
1530 1517
1531static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx) 1518static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
1519 u8 status)
1532{ 1520{
1533 struct xenvif *vif; 1521 struct xenvif *vif;
1534 struct pending_tx_info *pending_tx_info; 1522 struct pending_tx_info *pending_tx_info;
@@ -1542,7 +1530,7 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
1542 1530
1543 vif = pending_tx_info->vif; 1531 vif = pending_tx_info->vif;
1544 1532
1545 make_tx_response(vif, &pending_tx_info->req, XEN_NETIF_RSP_OKAY); 1533 make_tx_response(vif, &pending_tx_info->req, status);
1546 1534
1547 index = pending_index(netbk->pending_prod++); 1535 index = pending_index(netbk->pending_prod++);
1548 netbk->pending_ring[index] = pending_idx; 1536 netbk->pending_ring[index] = pending_idx;