aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/virtio_net.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-11-13 03:40:34 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-11-13 03:40:34 -0500
commit42a2d923cc349583ebf6fdd52a7d35e1c2f7e6bd (patch)
tree2b2b0c03b5389c1301800119333967efafd994ca /drivers/net/virtio_net.c
parent5cbb3d216e2041700231bcfc383ee5f8b7fc8b74 (diff)
parent75ecab1df14d90e86cebef9ec5c76befde46e65f (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) The addition of nftables. No longer will we need protocol aware firewall filtering modules, it can all live in userspace. At the core of nftables is a, for lack of a better term, virtual machine that executes byte codes to inspect packet or metadata (arriving interface index, etc.) and make verdict decisions. Besides support for loading packet contents and comparing them, the interpreter supports lookups in various datastructures as fundamental operations. For example sets are supports, and therefore one could create a set of whitelist IP address entries which have ACCEPT verdicts attached to them, and use the appropriate byte codes to do such lookups. Since the interpreted code is composed in userspace, userspace can do things like optimize things before giving it to the kernel. Another major improvement is the capability of atomically updating portions of the ruleset. In the existing netfilter implementation, one has to update the entire rule set in order to make a change and this is very expensive. Userspace tools exist to create nftables rules using existing netfilter rule sets, but both kernel implementations will need to co-exist for quite some time as we transition from the old to the new stuff. Kudos to Patrick McHardy, Pablo Neira Ayuso, and others who have worked so hard on this. 2) Daniel Borkmann and Hannes Frederic Sowa made several improvements to our pseudo-random number generator, mostly used for things like UDP port randomization and netfitler, amongst other things. In particular the taus88 generater is updated to taus113, and test cases are added. 3) Support 64-bit rates in HTB and TBF schedulers, from Eric Dumazet and Yang Yingliang. 4) Add support for new 577xx tigon3 chips to tg3 driver, from Nithin Sujir. 5) Fix two fatal flaws in TCP dynamic right sizing, from Eric Dumazet, Neal Cardwell, and Yuchung Cheng. 6) Allow IP_TOS and IP_TTL to be specified in sendmsg() ancillary control message data, much like other socket option attributes. From Francesco Fusco. 7) Allow applications to specify a cap on the rate computed automatically by the kernel for pacing flows, via a new SO_MAX_PACING_RATE socket option. From Eric Dumazet. 8) Make the initial autotuned send buffer sizing in TCP more closely reflect actual needs, from Eric Dumazet. 9) Currently early socket demux only happens for TCP sockets, but we can do it for connected UDP sockets too. Implementation from Shawn Bohrer. 10) Refactor inet socket demux with the goal of improving hash demux performance for listening sockets. With the main goals being able to use RCU lookups on even request sockets, and eliminating the listening lock contention. From Eric Dumazet. 11) The bonding layer has many demuxes in it's fast path, and an RCU conversion was started back in 3.11, several changes here extend the RCU usage to even more locations. From Ding Tianhong and Wang Yufen, based upon suggestions by Nikolay Aleksandrov and Veaceslav Falico. 12) Allow stackability of segmentation offloads to, in particular, allow segmentation offloading over tunnels. From Eric Dumazet. 13) Significantly improve the handling of secret keys we input into the various hash functions in the inet hashtables, TCP fast open, as well as syncookies. From Hannes Frederic Sowa. The key fundamental operation is "net_get_random_once()" which uses static keys. Hannes even extended this to ipv4/ipv6 fragmentation handling and our generic flow dissector. 14) The generic driver layer takes care now to set the driver data to NULL on device removal, so it's no longer necessary for drivers to explicitly set it to NULL any more. Many drivers have been cleaned up in this way, from Jingoo Han. 15) Add a BPF based packet scheduler classifier, from Daniel Borkmann. 16) Improve CRC32 interfaces and generic SKB checksum iterators so that SCTP's checksumming can more cleanly be handled. Also from Daniel Borkmann. 17) Add a new PMTU discovery mode, IP_PMTUDISC_INTERFACE, which forces using the interface MTU value. This helps avoid PMTU attacks, particularly on DNS servers. From Hannes Frederic Sowa. 18) Use generic XPS for transmit queue steering rather than internal (re-)implementation in virtio-net. From Jason Wang. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1622 commits) random32: add test cases for taus113 implementation random32: upgrade taus88 generator to taus113 from errata paper random32: move rnd_state to linux/random.h random32: add prandom_reseed_late() and call when nonblocking pool becomes initialized random32: add periodic reseeding random32: fix off-by-one in seeding requirement PHY: Add RTL8201CP phy_driver to realtek xtsonic: add missing platform_set_drvdata() in xtsonic_probe() macmace: add missing platform_set_drvdata() in mace_probe() ethernet/arc/arc_emac: add missing platform_set_drvdata() in arc_emac_probe() ipv6: protect for_each_sk_fl_rcu in mem_check with rcu_read_lock_bh vlan: Implement vlan_dev_get_egress_qos_mask as an inline. ixgbe: add warning when max_vfs is out of range. igb: Update link modes display in ethtool netfilter: push reasm skb through instead of original frag skbs ip6_output: fragment outgoing reassembled skb properly MAINTAINERS: mv643xx_eth: take over maintainership from Lennart net_sched: tbf: support of 64bit rates ixgbe: deleting dfwd stations out of order can cause null ptr deref ixgbe: fix build err, num_rx_queues is only available with CONFIG_RPS ...
Diffstat (limited to 'drivers/net/virtio_net.c')
-rw-r--r--drivers/net/virtio_net.c219
1 files changed, 115 insertions, 104 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index bbc9cb84ec1f..01f4eb5c8b78 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -124,12 +124,14 @@ struct virtnet_info {
124 /* Lock for config space updates */ 124 /* Lock for config space updates */
125 struct mutex config_lock; 125 struct mutex config_lock;
126 126
127 /* Page_frag for GFP_KERNEL packet buffer allocation when we run
128 * low on memory.
129 */
130 struct page_frag alloc_frag;
131
127 /* Does the affinity hint is set for virtqueues? */ 132 /* Does the affinity hint is set for virtqueues? */
128 bool affinity_hint_set; 133 bool affinity_hint_set;
129 134
130 /* Per-cpu variable to show the mapping from CPU to virtqueue */
131 int __percpu *vq_index;
132
133 /* CPU hot plug notifier */ 135 /* CPU hot plug notifier */
134 struct notifier_block nb; 136 struct notifier_block nb;
135}; 137};
@@ -217,33 +219,18 @@ static void skb_xmit_done(struct virtqueue *vq)
217 netif_wake_subqueue(vi->dev, vq2txq(vq)); 219 netif_wake_subqueue(vi->dev, vq2txq(vq));
218} 220}
219 221
220static void set_skb_frag(struct sk_buff *skb, struct page *page,
221 unsigned int offset, unsigned int *len)
222{
223 int size = min((unsigned)PAGE_SIZE - offset, *len);
224 int i = skb_shinfo(skb)->nr_frags;
225
226 __skb_fill_page_desc(skb, i, page, offset, size);
227
228 skb->data_len += size;
229 skb->len += size;
230 skb->truesize += PAGE_SIZE;
231 skb_shinfo(skb)->nr_frags++;
232 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
233 *len -= size;
234}
235
236/* Called from bottom half context */ 222/* Called from bottom half context */
237static struct sk_buff *page_to_skb(struct receive_queue *rq, 223static struct sk_buff *page_to_skb(struct receive_queue *rq,
238 struct page *page, unsigned int len) 224 struct page *page, unsigned int offset,
225 unsigned int len, unsigned int truesize)
239{ 226{
240 struct virtnet_info *vi = rq->vq->vdev->priv; 227 struct virtnet_info *vi = rq->vq->vdev->priv;
241 struct sk_buff *skb; 228 struct sk_buff *skb;
242 struct skb_vnet_hdr *hdr; 229 struct skb_vnet_hdr *hdr;
243 unsigned int copy, hdr_len, offset; 230 unsigned int copy, hdr_len, hdr_padded_len;
244 char *p; 231 char *p;
245 232
246 p = page_address(page); 233 p = page_address(page) + offset;
247 234
248 /* copy small packet so we can reuse these pages for small data */ 235 /* copy small packet so we can reuse these pages for small data */
249 skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN); 236 skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
@@ -254,16 +241,17 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
254 241
255 if (vi->mergeable_rx_bufs) { 242 if (vi->mergeable_rx_bufs) {
256 hdr_len = sizeof hdr->mhdr; 243 hdr_len = sizeof hdr->mhdr;
257 offset = hdr_len; 244 hdr_padded_len = sizeof hdr->mhdr;
258 } else { 245 } else {
259 hdr_len = sizeof hdr->hdr; 246 hdr_len = sizeof hdr->hdr;
260 offset = sizeof(struct padded_vnet_hdr); 247 hdr_padded_len = sizeof(struct padded_vnet_hdr);
261 } 248 }
262 249
263 memcpy(hdr, p, hdr_len); 250 memcpy(hdr, p, hdr_len);
264 251
265 len -= hdr_len; 252 len -= hdr_len;
266 p += offset; 253 offset += hdr_padded_len;
254 p += hdr_padded_len;
267 255
268 copy = len; 256 copy = len;
269 if (copy > skb_tailroom(skb)) 257 if (copy > skb_tailroom(skb))
@@ -273,6 +261,14 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
273 len -= copy; 261 len -= copy;
274 offset += copy; 262 offset += copy;
275 263
264 if (vi->mergeable_rx_bufs) {
265 if (len)
266 skb_add_rx_frag(skb, 0, page, offset, len, truesize);
267 else
268 put_page(page);
269 return skb;
270 }
271
276 /* 272 /*
277 * Verify that we can indeed put this data into a skb. 273 * Verify that we can indeed put this data into a skb.
278 * This is here to handle cases when the device erroneously 274 * This is here to handle cases when the device erroneously
@@ -284,9 +280,12 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
284 dev_kfree_skb(skb); 280 dev_kfree_skb(skb);
285 return NULL; 281 return NULL;
286 } 282 }
287 283 BUG_ON(offset >= PAGE_SIZE);
288 while (len) { 284 while (len) {
289 set_skb_frag(skb, page, offset, &len); 285 unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
286 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
287 frag_size, truesize);
288 len -= frag_size;
290 page = (struct page *)page->private; 289 page = (struct page *)page->private;
291 offset = 0; 290 offset = 0;
292 } 291 }
@@ -297,33 +296,59 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
297 return skb; 296 return skb;
298} 297}
299 298
300static int receive_mergeable(struct receive_queue *rq, struct sk_buff *skb) 299static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb)
301{ 300{
302 struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); 301 struct skb_vnet_hdr *hdr = skb_vnet_hdr(head_skb);
302 struct sk_buff *curr_skb = head_skb;
303 char *buf;
303 struct page *page; 304 struct page *page;
304 int num_buf, i, len; 305 int num_buf, len, offset;
305 306
306 num_buf = hdr->mhdr.num_buffers; 307 num_buf = hdr->mhdr.num_buffers;
307 while (--num_buf) { 308 while (--num_buf) {
308 i = skb_shinfo(skb)->nr_frags; 309 int num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
309 if (i >= MAX_SKB_FRAGS) { 310 buf = virtqueue_get_buf(rq->vq, &len);
310 pr_debug("%s: packet too long\n", skb->dev->name); 311 if (unlikely(!buf)) {
311 skb->dev->stats.rx_length_errors++;
312 return -EINVAL;
313 }
314 page = virtqueue_get_buf(rq->vq, &len);
315 if (!page) {
316 pr_debug("%s: rx error: %d buffers missing\n", 312 pr_debug("%s: rx error: %d buffers missing\n",
317 skb->dev->name, hdr->mhdr.num_buffers); 313 head_skb->dev->name, hdr->mhdr.num_buffers);
318 skb->dev->stats.rx_length_errors++; 314 head_skb->dev->stats.rx_length_errors++;
319 return -EINVAL; 315 return -EINVAL;
320 } 316 }
321 317 if (unlikely(len > MAX_PACKET_LEN)) {
322 if (len > PAGE_SIZE) 318 pr_debug("%s: rx error: merge buffer too long\n",
323 len = PAGE_SIZE; 319 head_skb->dev->name);
324 320 len = MAX_PACKET_LEN;
325 set_skb_frag(skb, page, 0, &len); 321 }
326 322 if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
323 struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
324 if (unlikely(!nskb)) {
325 head_skb->dev->stats.rx_dropped++;
326 return -ENOMEM;
327 }
328 if (curr_skb == head_skb)
329 skb_shinfo(curr_skb)->frag_list = nskb;
330 else
331 curr_skb->next = nskb;
332 curr_skb = nskb;
333 head_skb->truesize += nskb->truesize;
334 num_skb_frags = 0;
335 }
336 if (curr_skb != head_skb) {
337 head_skb->data_len += len;
338 head_skb->len += len;
339 head_skb->truesize += MAX_PACKET_LEN;
340 }
341 page = virt_to_head_page(buf);
342 offset = buf - (char *)page_address(page);
343 if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
344 put_page(page);
345 skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
346 len, MAX_PACKET_LEN);
347 } else {
348 skb_add_rx_frag(curr_skb, num_skb_frags, page,
349 offset, len,
350 MAX_PACKET_LEN);
351 }
327 --rq->num; 352 --rq->num;
328 } 353 }
329 return 0; 354 return 0;
@@ -341,8 +366,10 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
341 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { 366 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
342 pr_debug("%s: short packet %i\n", dev->name, len); 367 pr_debug("%s: short packet %i\n", dev->name, len);
343 dev->stats.rx_length_errors++; 368 dev->stats.rx_length_errors++;
344 if (vi->mergeable_rx_bufs || vi->big_packets) 369 if (vi->big_packets)
345 give_pages(rq, buf); 370 give_pages(rq, buf);
371 else if (vi->mergeable_rx_bufs)
372 put_page(virt_to_head_page(buf));
346 else 373 else
347 dev_kfree_skb(buf); 374 dev_kfree_skb(buf);
348 return; 375 return;
@@ -352,19 +379,28 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
352 skb = buf; 379 skb = buf;
353 len -= sizeof(struct virtio_net_hdr); 380 len -= sizeof(struct virtio_net_hdr);
354 skb_trim(skb, len); 381 skb_trim(skb, len);
382 } else if (vi->mergeable_rx_bufs) {
383 struct page *page = virt_to_head_page(buf);
384 skb = page_to_skb(rq, page,
385 (char *)buf - (char *)page_address(page),
386 len, MAX_PACKET_LEN);
387 if (unlikely(!skb)) {
388 dev->stats.rx_dropped++;
389 put_page(page);
390 return;
391 }
392 if (receive_mergeable(rq, skb)) {
393 dev_kfree_skb(skb);
394 return;
395 }
355 } else { 396 } else {
356 page = buf; 397 page = buf;
357 skb = page_to_skb(rq, page, len); 398 skb = page_to_skb(rq, page, 0, len, PAGE_SIZE);
358 if (unlikely(!skb)) { 399 if (unlikely(!skb)) {
359 dev->stats.rx_dropped++; 400 dev->stats.rx_dropped++;
360 give_pages(rq, page); 401 give_pages(rq, page);
361 return; 402 return;
362 } 403 }
363 if (vi->mergeable_rx_bufs)
364 if (receive_mergeable(rq, skb)) {
365 dev_kfree_skb(skb);
366 return;
367 }
368 } 404 }
369 405
370 hdr = skb_vnet_hdr(skb); 406 hdr = skb_vnet_hdr(skb);
@@ -501,18 +537,28 @@ static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp)
501 537
502static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp) 538static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp)
503{ 539{
504 struct page *page; 540 struct virtnet_info *vi = rq->vq->vdev->priv;
541 char *buf = NULL;
505 int err; 542 int err;
506 543
507 page = get_a_page(rq, gfp); 544 if (gfp & __GFP_WAIT) {
508 if (!page) 545 if (skb_page_frag_refill(MAX_PACKET_LEN, &vi->alloc_frag,
546 gfp)) {
547 buf = (char *)page_address(vi->alloc_frag.page) +
548 vi->alloc_frag.offset;
549 get_page(vi->alloc_frag.page);
550 vi->alloc_frag.offset += MAX_PACKET_LEN;
551 }
552 } else {
553 buf = netdev_alloc_frag(MAX_PACKET_LEN);
554 }
555 if (!buf)
509 return -ENOMEM; 556 return -ENOMEM;
510 557
511 sg_init_one(rq->sg, page_address(page), PAGE_SIZE); 558 sg_init_one(rq->sg, buf, MAX_PACKET_LEN);
512 559 err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, buf, gfp);
513 err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, page, gfp);
514 if (err < 0) 560 if (err < 0)
515 give_pages(rq, page); 561 put_page(virt_to_head_page(buf));
516 562
517 return err; 563 return err;
518} 564}
@@ -1065,7 +1111,6 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
1065static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu) 1111static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
1066{ 1112{
1067 int i; 1113 int i;
1068 int cpu;
1069 1114
1070 if (vi->affinity_hint_set) { 1115 if (vi->affinity_hint_set) {
1071 for (i = 0; i < vi->max_queue_pairs; i++) { 1116 for (i = 0; i < vi->max_queue_pairs; i++) {
@@ -1075,16 +1120,6 @@ static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
1075 1120
1076 vi->affinity_hint_set = false; 1121 vi->affinity_hint_set = false;
1077 } 1122 }
1078
1079 i = 0;
1080 for_each_online_cpu(cpu) {
1081 if (cpu == hcpu) {
1082 *per_cpu_ptr(vi->vq_index, cpu) = -1;
1083 } else {
1084 *per_cpu_ptr(vi->vq_index, cpu) =
1085 ++i % vi->curr_queue_pairs;
1086 }
1087 }
1088} 1123}
1089 1124
1090static void virtnet_set_affinity(struct virtnet_info *vi) 1125static void virtnet_set_affinity(struct virtnet_info *vi)
@@ -1106,7 +1141,7 @@ static void virtnet_set_affinity(struct virtnet_info *vi)
1106 for_each_online_cpu(cpu) { 1141 for_each_online_cpu(cpu) {
1107 virtqueue_set_affinity(vi->rq[i].vq, cpu); 1142 virtqueue_set_affinity(vi->rq[i].vq, cpu);
1108 virtqueue_set_affinity(vi->sq[i].vq, cpu); 1143 virtqueue_set_affinity(vi->sq[i].vq, cpu);
1109 *per_cpu_ptr(vi->vq_index, cpu) = i; 1144 netif_set_xps_queue(vi->dev, cpumask_of(cpu), i);
1110 i++; 1145 i++;
1111 } 1146 }
1112 1147
@@ -1220,28 +1255,6 @@ static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
1220 return 0; 1255 return 0;
1221} 1256}
1222 1257
1223/* To avoid contending a lock hold by a vcpu who would exit to host, select the
1224 * txq based on the processor id.
1225 */
1226static u16 virtnet_select_queue(struct net_device *dev, struct sk_buff *skb)
1227{
1228 int txq;
1229 struct virtnet_info *vi = netdev_priv(dev);
1230
1231 if (skb_rx_queue_recorded(skb)) {
1232 txq = skb_get_rx_queue(skb);
1233 } else {
1234 txq = *__this_cpu_ptr(vi->vq_index);
1235 if (txq == -1)
1236 txq = 0;
1237 }
1238
1239 while (unlikely(txq >= dev->real_num_tx_queues))
1240 txq -= dev->real_num_tx_queues;
1241
1242 return txq;
1243}
1244
1245static const struct net_device_ops virtnet_netdev = { 1258static const struct net_device_ops virtnet_netdev = {
1246 .ndo_open = virtnet_open, 1259 .ndo_open = virtnet_open,
1247 .ndo_stop = virtnet_close, 1260 .ndo_stop = virtnet_close,
@@ -1253,7 +1266,6 @@ static const struct net_device_ops virtnet_netdev = {
1253 .ndo_get_stats64 = virtnet_stats, 1266 .ndo_get_stats64 = virtnet_stats,
1254 .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, 1267 .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
1255 .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, 1268 .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
1256 .ndo_select_queue = virtnet_select_queue,
1257#ifdef CONFIG_NET_POLL_CONTROLLER 1269#ifdef CONFIG_NET_POLL_CONTROLLER
1258 .ndo_poll_controller = virtnet_netpoll, 1270 .ndo_poll_controller = virtnet_netpoll,
1259#endif 1271#endif
@@ -1336,8 +1348,10 @@ static void free_unused_bufs(struct virtnet_info *vi)
1336 struct virtqueue *vq = vi->rq[i].vq; 1348 struct virtqueue *vq = vi->rq[i].vq;
1337 1349
1338 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { 1350 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
1339 if (vi->mergeable_rx_bufs || vi->big_packets) 1351 if (vi->big_packets)
1340 give_pages(&vi->rq[i], buf); 1352 give_pages(&vi->rq[i], buf);
1353 else if (vi->mergeable_rx_bufs)
1354 put_page(virt_to_head_page(buf));
1341 else 1355 else
1342 dev_kfree_skb(buf); 1356 dev_kfree_skb(buf);
1343 --vi->rq[i].num; 1357 --vi->rq[i].num;
@@ -1562,10 +1576,6 @@ static int virtnet_probe(struct virtio_device *vdev)
1562 if (vi->stats == NULL) 1576 if (vi->stats == NULL)
1563 goto free; 1577 goto free;
1564 1578
1565 vi->vq_index = alloc_percpu(int);
1566 if (vi->vq_index == NULL)
1567 goto free_stats;
1568
1569 mutex_init(&vi->config_lock); 1579 mutex_init(&vi->config_lock);
1570 vi->config_enable = true; 1580 vi->config_enable = true;
1571 INIT_WORK(&vi->config_work, virtnet_config_changed_work); 1581 INIT_WORK(&vi->config_work, virtnet_config_changed_work);
@@ -1592,7 +1602,7 @@ static int virtnet_probe(struct virtio_device *vdev)
1592 /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ 1602 /* Allocate/initialize the rx/tx queues, and invoke find_vqs */
1593 err = init_vqs(vi); 1603 err = init_vqs(vi);
1594 if (err) 1604 if (err)
1595 goto free_index; 1605 goto free_stats;
1596 1606
1597 netif_set_real_num_tx_queues(dev, 1); 1607 netif_set_real_num_tx_queues(dev, 1);
1598 netif_set_real_num_rx_queues(dev, 1); 1608 netif_set_real_num_rx_queues(dev, 1);
@@ -1643,8 +1653,8 @@ free_recv_bufs:
1643free_vqs: 1653free_vqs:
1644 cancel_delayed_work_sync(&vi->refill); 1654 cancel_delayed_work_sync(&vi->refill);
1645 virtnet_del_vqs(vi); 1655 virtnet_del_vqs(vi);
1646free_index: 1656 if (vi->alloc_frag.page)
1647 free_percpu(vi->vq_index); 1657 put_page(vi->alloc_frag.page);
1648free_stats: 1658free_stats:
1649 free_percpu(vi->stats); 1659 free_percpu(vi->stats);
1650free: 1660free:
@@ -1678,10 +1688,11 @@ static void virtnet_remove(struct virtio_device *vdev)
1678 unregister_netdev(vi->dev); 1688 unregister_netdev(vi->dev);
1679 1689
1680 remove_vq_common(vi); 1690 remove_vq_common(vi);
1691 if (vi->alloc_frag.page)
1692 put_page(vi->alloc_frag.page);
1681 1693
1682 flush_work(&vi->config_work); 1694 flush_work(&vi->config_work);
1683 1695
1684 free_percpu(vi->vq_index);
1685 free_percpu(vi->stats); 1696 free_percpu(vi->stats);
1686 free_netdev(vi->dev); 1697 free_netdev(vi->dev);
1687} 1698}