aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netfront.c
diff options
context:
space:
mode:
authorAnnie Li <annie.li@oracle.com>2014-01-27 22:35:42 -0500
committerDavid S. Miller <davem@davemloft.net>2014-01-27 22:48:45 -0500
commitcefe0078eea52af17411eb1248946a94afb84ca5 (patch)
treed2eb8ccdfee40d0dbb043cacee7cb67e40ebce4b /drivers/net/xen-netfront.c
parentce60e0c4df5f95086d5c2662c5cfa0beb8181c6d (diff)
xen-netfront: fix resource leak in netfront
This patch removes grant transfer releasing code from netfront, and uses gnttab_end_foreign_access to end grant access since gnttab_end_foreign_access_ref may fail when the grant entry is currently used for reading or writing. * clean up grant transfer code kept from old netfront(2.6.18) which grants pages for access/map and transfer. But grant transfer is deprecated in current netfront, so remove corresponding release code for transfer. * fix resource leak, release grant access (through gnttab_end_foreign_access) and skb for tx/rx path, use get_page to ensure page is released when grant access is completed successfully. Xen-blkfront/xen-tpmfront/xen-pcifront also have similar issue, but patches for them will be created separately. V6: Correct subject line and commit message. V5: Remove unecessary change in xennet_end_access. V4: Revert put_page in gnttab_end_foreign_access, and keep netfront change in single patch. V3: Changes as suggestion from David Vrabel, ensure pages are not freed untill grant acess is ended. V2: Improve patch comments. Signed-off-by: Annie Li <annie.li@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/xen-netfront.c')
-rw-r--r--drivers/net/xen-netfront.c88
1 files changed, 26 insertions, 62 deletions
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index e955c5692986..ff04d4f95baa 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -117,6 +117,7 @@ struct netfront_info {
117 } tx_skbs[NET_TX_RING_SIZE]; 117 } tx_skbs[NET_TX_RING_SIZE];
118 grant_ref_t gref_tx_head; 118 grant_ref_t gref_tx_head;
119 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE]; 119 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
120 struct page *grant_tx_page[NET_TX_RING_SIZE];
120 unsigned tx_skb_freelist; 121 unsigned tx_skb_freelist;
121 122
122 spinlock_t rx_lock ____cacheline_aligned_in_smp; 123 spinlock_t rx_lock ____cacheline_aligned_in_smp;
@@ -396,6 +397,7 @@ static void xennet_tx_buf_gc(struct net_device *dev)
396 gnttab_release_grant_reference( 397 gnttab_release_grant_reference(
397 &np->gref_tx_head, np->grant_tx_ref[id]); 398 &np->gref_tx_head, np->grant_tx_ref[id]);
398 np->grant_tx_ref[id] = GRANT_INVALID_REF; 399 np->grant_tx_ref[id] = GRANT_INVALID_REF;
400 np->grant_tx_page[id] = NULL;
399 add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id); 401 add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id);
400 dev_kfree_skb_irq(skb); 402 dev_kfree_skb_irq(skb);
401 } 403 }
@@ -452,6 +454,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
452 gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, 454 gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
453 mfn, GNTMAP_readonly); 455 mfn, GNTMAP_readonly);
454 456
457 np->grant_tx_page[id] = virt_to_page(data);
455 tx->gref = np->grant_tx_ref[id] = ref; 458 tx->gref = np->grant_tx_ref[id] = ref;
456 tx->offset = offset; 459 tx->offset = offset;
457 tx->size = len; 460 tx->size = len;
@@ -497,6 +500,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
497 np->xbdev->otherend_id, 500 np->xbdev->otherend_id,
498 mfn, GNTMAP_readonly); 501 mfn, GNTMAP_readonly);
499 502
503 np->grant_tx_page[id] = page;
500 tx->gref = np->grant_tx_ref[id] = ref; 504 tx->gref = np->grant_tx_ref[id] = ref;
501 tx->offset = offset; 505 tx->offset = offset;
502 tx->size = bytes; 506 tx->size = bytes;
@@ -596,6 +600,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
596 mfn = virt_to_mfn(data); 600 mfn = virt_to_mfn(data);
597 gnttab_grant_foreign_access_ref( 601 gnttab_grant_foreign_access_ref(
598 ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly); 602 ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
603 np->grant_tx_page[id] = virt_to_page(data);
599 tx->gref = np->grant_tx_ref[id] = ref; 604 tx->gref = np->grant_tx_ref[id] = ref;
600 tx->offset = offset; 605 tx->offset = offset;
601 tx->size = len; 606 tx->size = len;
@@ -1085,10 +1090,11 @@ static void xennet_release_tx_bufs(struct netfront_info *np)
1085 continue; 1090 continue;
1086 1091
1087 skb = np->tx_skbs[i].skb; 1092 skb = np->tx_skbs[i].skb;
1088 gnttab_end_foreign_access_ref(np->grant_tx_ref[i], 1093 get_page(np->grant_tx_page[i]);
1089 GNTMAP_readonly); 1094 gnttab_end_foreign_access(np->grant_tx_ref[i],
1090 gnttab_release_grant_reference(&np->gref_tx_head, 1095 GNTMAP_readonly,
1091 np->grant_tx_ref[i]); 1096 (unsigned long)page_address(np->grant_tx_page[i]));
1097 np->grant_tx_page[i] = NULL;
1092 np->grant_tx_ref[i] = GRANT_INVALID_REF; 1098 np->grant_tx_ref[i] = GRANT_INVALID_REF;
1093 add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i); 1099 add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i);
1094 dev_kfree_skb_irq(skb); 1100 dev_kfree_skb_irq(skb);
@@ -1097,78 +1103,35 @@ static void xennet_release_tx_bufs(struct netfront_info *np)
1097 1103
1098static void xennet_release_rx_bufs(struct netfront_info *np) 1104static void xennet_release_rx_bufs(struct netfront_info *np)
1099{ 1105{
1100 struct mmu_update *mmu = np->rx_mmu;
1101 struct multicall_entry *mcl = np->rx_mcl;
1102 struct sk_buff_head free_list;
1103 struct sk_buff *skb;
1104 unsigned long mfn;
1105 int xfer = 0, noxfer = 0, unused = 0;
1106 int id, ref; 1106 int id, ref;
1107 1107
1108 dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n",
1109 __func__);
1110 return;
1111
1112 skb_queue_head_init(&free_list);
1113
1114 spin_lock_bh(&np->rx_lock); 1108 spin_lock_bh(&np->rx_lock);
1115 1109
1116 for (id = 0; id < NET_RX_RING_SIZE; id++) { 1110 for (id = 0; id < NET_RX_RING_SIZE; id++) {
1117 ref = np->grant_rx_ref[id]; 1111 struct sk_buff *skb;
1118 if (ref == GRANT_INVALID_REF) { 1112 struct page *page;
1119 unused++;
1120 continue;
1121 }
1122 1113
1123 skb = np->rx_skbs[id]; 1114 skb = np->rx_skbs[id];
1124 mfn = gnttab_end_foreign_transfer_ref(ref); 1115 if (!skb)
1125 gnttab_release_grant_reference(&np->gref_rx_head, ref);
1126 np->grant_rx_ref[id] = GRANT_INVALID_REF;
1127
1128 if (0 == mfn) {
1129 skb_shinfo(skb)->nr_frags = 0;
1130 dev_kfree_skb(skb);
1131 noxfer++;
1132 continue; 1116 continue;
1133 }
1134 1117
1135 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 1118 ref = np->grant_rx_ref[id];
1136 /* Remap the page. */ 1119 if (ref == GRANT_INVALID_REF)
1137 const struct page *page = 1120 continue;
1138 skb_frag_page(&skb_shinfo(skb)->frags[0]);
1139 unsigned long pfn = page_to_pfn(page);
1140 void *vaddr = page_address(page);
1141 1121
1142 MULTI_update_va_mapping(mcl, (unsigned long)vaddr, 1122 page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
1143 mfn_pte(mfn, PAGE_KERNEL),
1144 0);
1145 mcl++;
1146 mmu->ptr = ((u64)mfn << PAGE_SHIFT)
1147 | MMU_MACHPHYS_UPDATE;
1148 mmu->val = pfn;
1149 mmu++;
1150 1123
1151 set_phys_to_machine(pfn, mfn); 1124 /* gnttab_end_foreign_access() needs a page ref until
1152 } 1125 * foreign access is ended (which may be deferred).
1153 __skb_queue_tail(&free_list, skb); 1126 */
1154 xfer++; 1127 get_page(page);
1155 } 1128 gnttab_end_foreign_access(ref, 0,
1156 1129 (unsigned long)page_address(page));
1157 dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n", 1130 np->grant_rx_ref[id] = GRANT_INVALID_REF;
1158 __func__, xfer, noxfer, unused);
1159 1131
1160 if (xfer) { 1132 kfree_skb(skb);
1161 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1162 /* Do all the remapping work and M2P updates. */
1163 MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu,
1164 NULL, DOMID_SELF);
1165 mcl++;
1166 HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl);
1167 }
1168 } 1133 }
1169 1134
1170 __skb_queue_purge(&free_list);
1171
1172 spin_unlock_bh(&np->rx_lock); 1135 spin_unlock_bh(&np->rx_lock);
1173} 1136}
1174 1137
@@ -1339,6 +1302,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1339 for (i = 0; i < NET_RX_RING_SIZE; i++) { 1302 for (i = 0; i < NET_RX_RING_SIZE; i++) {
1340 np->rx_skbs[i] = NULL; 1303 np->rx_skbs[i] = NULL;
1341 np->grant_rx_ref[i] = GRANT_INVALID_REF; 1304 np->grant_rx_ref[i] = GRANT_INVALID_REF;
1305 np->grant_tx_page[i] = NULL;
1342 } 1306 }
1343 1307
1344 /* A grant for every tx ring slot */ 1308 /* A grant for every tx ring slot */