aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/virtio_net.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/virtio_net.c')
-rw-r--r--drivers/net/virtio_net.c474
1 files changed, 269 insertions, 205 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 9ead30bd00c..25dc77ccbf5 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -56,10 +56,6 @@ struct virtnet_info
56 /* Host will merge rx buffers for big packets (shake it! shake it!) */ 56 /* Host will merge rx buffers for big packets (shake it! shake it!) */
57 bool mergeable_rx_bufs; 57 bool mergeable_rx_bufs;
58 58
59 /* Receive & send queues. */
60 struct sk_buff_head recv;
61 struct sk_buff_head send;
62
63 /* Work struct for refilling if we run low on memory. */ 59 /* Work struct for refilling if we run low on memory. */
64 struct delayed_work refill; 60 struct delayed_work refill;
65 61
@@ -75,34 +71,44 @@ struct skb_vnet_hdr {
75 unsigned int num_sg; 71 unsigned int num_sg;
76}; 72};
77 73
74struct padded_vnet_hdr {
75 struct virtio_net_hdr hdr;
76 /*
77 * virtio_net_hdr should be in a separated sg buffer because of a
78 * QEMU bug, and data sg buffer shares same page with this header sg.
79 * This padding makes next sg 16 byte aligned after virtio_net_hdr.
80 */
81 char padding[6];
82};
83
78static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb) 84static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
79{ 85{
80 return (struct skb_vnet_hdr *)skb->cb; 86 return (struct skb_vnet_hdr *)skb->cb;
81} 87}
82 88
83static void give_a_page(struct virtnet_info *vi, struct page *page) 89/*
84{ 90 * private is used to chain pages for big packets, put the whole
85 page->private = (unsigned long)vi->pages; 91 * most recent used list in the beginning for reuse
86 vi->pages = page; 92 */
87} 93static void give_pages(struct virtnet_info *vi, struct page *page)
88
89static void trim_pages(struct virtnet_info *vi, struct sk_buff *skb)
90{ 94{
91 unsigned int i; 95 struct page *end;
92 96
93 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 97 /* Find end of list, sew whole thing into vi->pages. */
94 give_a_page(vi, skb_shinfo(skb)->frags[i].page); 98 for (end = page; end->private; end = (struct page *)end->private);
95 skb_shinfo(skb)->nr_frags = 0; 99 end->private = (unsigned long)vi->pages;
96 skb->data_len = 0; 100 vi->pages = page;
97} 101}
98 102
99static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask) 103static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask)
100{ 104{
101 struct page *p = vi->pages; 105 struct page *p = vi->pages;
102 106
103 if (p) 107 if (p) {
104 vi->pages = (struct page *)p->private; 108 vi->pages = (struct page *)p->private;
105 else 109 /* clear private here, it is used to chain pages */
110 p->private = 0;
111 } else
106 p = alloc_page(gfp_mask); 112 p = alloc_page(gfp_mask);
107 return p; 113 return p;
108} 114}
@@ -118,99 +124,142 @@ static void skb_xmit_done(struct virtqueue *svq)
118 netif_wake_queue(vi->dev); 124 netif_wake_queue(vi->dev);
119} 125}
120 126
121static void receive_skb(struct net_device *dev, struct sk_buff *skb, 127static void set_skb_frag(struct sk_buff *skb, struct page *page,
122 unsigned len) 128 unsigned int offset, unsigned int *len)
123{ 129{
124 struct virtnet_info *vi = netdev_priv(dev); 130 int i = skb_shinfo(skb)->nr_frags;
125 struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); 131 skb_frag_t *f;
126 int err; 132
127 int i; 133 f = &skb_shinfo(skb)->frags[i];
134 f->size = min((unsigned)PAGE_SIZE - offset, *len);
135 f->page_offset = offset;
136 f->page = page;
137
138 skb->data_len += f->size;
139 skb->len += f->size;
140 skb_shinfo(skb)->nr_frags++;
141 *len -= f->size;
142}
128 143
129 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { 144static struct sk_buff *page_to_skb(struct virtnet_info *vi,
130 pr_debug("%s: short packet %i\n", dev->name, len); 145 struct page *page, unsigned int len)
131 dev->stats.rx_length_errors++; 146{
132 goto drop; 147 struct sk_buff *skb;
133 } 148 struct skb_vnet_hdr *hdr;
149 unsigned int copy, hdr_len, offset;
150 char *p;
134 151
135 if (vi->mergeable_rx_bufs) { 152 p = page_address(page);
136 unsigned int copy;
137 char *p = page_address(skb_shinfo(skb)->frags[0].page);
138 153
139 if (len > PAGE_SIZE) 154 /* copy small packet so we can reuse these pages for small data */
140 len = PAGE_SIZE; 155 skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
141 len -= sizeof(struct virtio_net_hdr_mrg_rxbuf); 156 if (unlikely(!skb))
142 157 return NULL;
143 memcpy(&hdr->mhdr, p, sizeof(hdr->mhdr));
144 p += sizeof(hdr->mhdr);
145 158
146 copy = len; 159 hdr = skb_vnet_hdr(skb);
147 if (copy > skb_tailroom(skb))
148 copy = skb_tailroom(skb);
149 160
150 memcpy(skb_put(skb, copy), p, copy); 161 if (vi->mergeable_rx_bufs) {
162 hdr_len = sizeof hdr->mhdr;
163 offset = hdr_len;
164 } else {
165 hdr_len = sizeof hdr->hdr;
166 offset = sizeof(struct padded_vnet_hdr);
167 }
151 168
152 len -= copy; 169 memcpy(hdr, p, hdr_len);
153 170
154 if (!len) { 171 len -= hdr_len;
155 give_a_page(vi, skb_shinfo(skb)->frags[0].page); 172 p += offset;
156 skb_shinfo(skb)->nr_frags--;
157 } else {
158 skb_shinfo(skb)->frags[0].page_offset +=
159 sizeof(hdr->mhdr) + copy;
160 skb_shinfo(skb)->frags[0].size = len;
161 skb->data_len += len;
162 skb->len += len;
163 }
164 173
165 while (--hdr->mhdr.num_buffers) { 174 copy = len;
166 struct sk_buff *nskb; 175 if (copy > skb_tailroom(skb))
176 copy = skb_tailroom(skb);
177 memcpy(skb_put(skb, copy), p, copy);
167 178
168 i = skb_shinfo(skb)->nr_frags; 179 len -= copy;
169 if (i >= MAX_SKB_FRAGS) { 180 offset += copy;
170 pr_debug("%s: packet too long %d\n", dev->name,
171 len);
172 dev->stats.rx_length_errors++;
173 goto drop;
174 }
175 181
176 nskb = vi->rvq->vq_ops->get_buf(vi->rvq, &len); 182 while (len) {
177 if (!nskb) { 183 set_skb_frag(skb, page, offset, &len);
178 pr_debug("%s: rx error: %d buffers missing\n", 184 page = (struct page *)page->private;
179 dev->name, hdr->mhdr.num_buffers); 185 offset = 0;
180 dev->stats.rx_length_errors++; 186 }
181 goto drop;
182 }
183 187
184 __skb_unlink(nskb, &vi->recv); 188 if (page)
185 vi->num--; 189 give_pages(vi, page);
186 190
187 skb_shinfo(skb)->frags[i] = skb_shinfo(nskb)->frags[0]; 191 return skb;
188 skb_shinfo(nskb)->nr_frags = 0; 192}
189 kfree_skb(nskb);
190 193
191 if (len > PAGE_SIZE) 194static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb)
192 len = PAGE_SIZE; 195{
196 struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
197 struct page *page;
198 int num_buf, i, len;
199
200 num_buf = hdr->mhdr.num_buffers;
201 while (--num_buf) {
202 i = skb_shinfo(skb)->nr_frags;
203 if (i >= MAX_SKB_FRAGS) {
204 pr_debug("%s: packet too long\n", skb->dev->name);
205 skb->dev->stats.rx_length_errors++;
206 return -EINVAL;
207 }
193 208
194 skb_shinfo(skb)->frags[i].size = len; 209 page = vi->rvq->vq_ops->get_buf(vi->rvq, &len);
195 skb_shinfo(skb)->nr_frags++; 210 if (!page) {
196 skb->data_len += len; 211 pr_debug("%s: rx error: %d buffers missing\n",
197 skb->len += len; 212 skb->dev->name, hdr->mhdr.num_buffers);
213 skb->dev->stats.rx_length_errors++;
214 return -EINVAL;
198 } 215 }
199 } else { 216 if (len > PAGE_SIZE)
200 len -= sizeof(hdr->hdr); 217 len = PAGE_SIZE;
218
219 set_skb_frag(skb, page, 0, &len);
220
221 --vi->num;
222 }
223 return 0;
224}
225
226static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
227{
228 struct virtnet_info *vi = netdev_priv(dev);
229 struct sk_buff *skb;
230 struct page *page;
231 struct skb_vnet_hdr *hdr;
201 232
202 if (len <= MAX_PACKET_LEN) 233 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
203 trim_pages(vi, skb); 234 pr_debug("%s: short packet %i\n", dev->name, len);
235 dev->stats.rx_length_errors++;
236 if (vi->mergeable_rx_bufs || vi->big_packets)
237 give_pages(vi, buf);
238 else
239 dev_kfree_skb(buf);
240 return;
241 }
204 242
205 err = pskb_trim(skb, len); 243 if (!vi->mergeable_rx_bufs && !vi->big_packets) {
206 if (err) { 244 skb = buf;
207 pr_debug("%s: pskb_trim failed %i %d\n", dev->name, 245 len -= sizeof(struct virtio_net_hdr);
208 len, err); 246 skb_trim(skb, len);
247 } else {
248 page = buf;
249 skb = page_to_skb(vi, page, len);
250 if (unlikely(!skb)) {
209 dev->stats.rx_dropped++; 251 dev->stats.rx_dropped++;
210 goto drop; 252 give_pages(vi, page);
253 return;
211 } 254 }
255 if (vi->mergeable_rx_bufs)
256 if (receive_mergeable(vi, skb)) {
257 dev_kfree_skb(skb);
258 return;
259 }
212 } 260 }
213 261
262 hdr = skb_vnet_hdr(skb);
214 skb->truesize += skb->data_len; 263 skb->truesize += skb->data_len;
215 dev->stats.rx_bytes += skb->len; 264 dev->stats.rx_bytes += skb->len;
216 dev->stats.rx_packets++; 265 dev->stats.rx_packets++;
@@ -267,110 +316,119 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
267 316
268frame_err: 317frame_err:
269 dev->stats.rx_frame_errors++; 318 dev->stats.rx_frame_errors++;
270drop:
271 dev_kfree_skb(skb); 319 dev_kfree_skb(skb);
272} 320}
273 321
274static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp) 322static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
275{ 323{
276 struct sk_buff *skb; 324 struct sk_buff *skb;
277 struct scatterlist sg[2+MAX_SKB_FRAGS]; 325 struct skb_vnet_hdr *hdr;
278 int num, err, i; 326 struct scatterlist sg[2];
279 bool oom = false; 327 int err;
280
281 sg_init_table(sg, 2+MAX_SKB_FRAGS);
282 do {
283 struct skb_vnet_hdr *hdr;
284 328
285 skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN); 329 skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN);
286 if (unlikely(!skb)) { 330 if (unlikely(!skb))
287 oom = true; 331 return -ENOMEM;
288 break;
289 }
290 332
291 skb_put(skb, MAX_PACKET_LEN); 333 skb_put(skb, MAX_PACKET_LEN);
292 334
293 hdr = skb_vnet_hdr(skb); 335 hdr = skb_vnet_hdr(skb);
294 sg_set_buf(sg, &hdr->hdr, sizeof(hdr->hdr)); 336 sg_set_buf(sg, &hdr->hdr, sizeof hdr->hdr);
295 337
296 if (vi->big_packets) { 338 skb_to_sgvec(skb, sg + 1, 0, skb->len);
297 for (i = 0; i < MAX_SKB_FRAGS; i++) {
298 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
299 f->page = get_a_page(vi, gfp);
300 if (!f->page)
301 break;
302 339
303 f->page_offset = 0; 340 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 2, skb);
304 f->size = PAGE_SIZE; 341 if (err < 0)
342 dev_kfree_skb(skb);
305 343
306 skb->data_len += PAGE_SIZE; 344 return err;
307 skb->len += PAGE_SIZE; 345}
308 346
309 skb_shinfo(skb)->nr_frags++; 347static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
310 } 348{
349 struct scatterlist sg[MAX_SKB_FRAGS + 2];
350 struct page *first, *list = NULL;
351 char *p;
352 int i, err, offset;
353
354 /* page in sg[MAX_SKB_FRAGS + 1] is list tail */
355 for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
356 first = get_a_page(vi, gfp);
357 if (!first) {
358 if (list)
359 give_pages(vi, list);
360 return -ENOMEM;
311 } 361 }
362 sg_set_buf(&sg[i], page_address(first), PAGE_SIZE);
312 363
313 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; 364 /* chain new page in list head to match sg */
314 skb_queue_head(&vi->recv, skb); 365 first->private = (unsigned long)list;
366 list = first;
367 }
315 368
316 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, num, skb); 369 first = get_a_page(vi, gfp);
317 if (err < 0) { 370 if (!first) {
318 skb_unlink(skb, &vi->recv); 371 give_pages(vi, list);
319 trim_pages(vi, skb); 372 return -ENOMEM;
320 kfree_skb(skb); 373 }
321 break; 374 p = page_address(first);
322 } 375
323 vi->num++; 376 /* sg[0], sg[1] share the same page */
324 } while (err >= num); 377 /* a separated sg[0] for virtio_net_hdr only during to QEMU bug*/
325 if (unlikely(vi->num > vi->max)) 378 sg_set_buf(&sg[0], p, sizeof(struct virtio_net_hdr));
326 vi->max = vi->num; 379
327 vi->rvq->vq_ops->kick(vi->rvq); 380 /* sg[1] for data packet, from offset */
328 return !oom; 381 offset = sizeof(struct padded_vnet_hdr);
382 sg_set_buf(&sg[1], p + offset, PAGE_SIZE - offset);
383
384 /* chain first in list head */
385 first->private = (unsigned long)list;
386 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, MAX_SKB_FRAGS + 2,
387 first);
388 if (err < 0)
389 give_pages(vi, first);
390
391 return err;
329} 392}
330 393
331/* Returns false if we couldn't fill entirely (OOM). */ 394static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
332static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
333{ 395{
334 struct sk_buff *skb; 396 struct page *page;
335 struct scatterlist sg[1]; 397 struct scatterlist sg;
336 int err; 398 int err;
337 bool oom = false;
338 399
339 if (!vi->mergeable_rx_bufs) 400 page = get_a_page(vi, gfp);
340 return try_fill_recv_maxbufs(vi, gfp); 401 if (!page)
402 return -ENOMEM;
341 403
342 do { 404 sg_init_one(&sg, page_address(page), PAGE_SIZE);
343 skb_frag_t *f;
344 405
345 skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN); 406 err = vi->rvq->vq_ops->add_buf(vi->rvq, &sg, 0, 1, page);
346 if (unlikely(!skb)) { 407 if (err < 0)
347 oom = true; 408 give_pages(vi, page);
348 break;
349 }
350 409
351 f = &skb_shinfo(skb)->frags[0]; 410 return err;
352 f->page = get_a_page(vi, gfp); 411}
353 if (!f->page) {
354 oom = true;
355 kfree_skb(skb);
356 break;
357 }
358
359 f->page_offset = 0;
360 f->size = PAGE_SIZE;
361 412
362 skb_shinfo(skb)->nr_frags++; 413/* Returns false if we couldn't fill entirely (OOM). */
414static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
415{
416 int err;
417 bool oom = false;
363 418
364 sg_init_one(sg, page_address(f->page), PAGE_SIZE); 419 do {
365 skb_queue_head(&vi->recv, skb); 420 if (vi->mergeable_rx_bufs)
421 err = add_recvbuf_mergeable(vi, gfp);
422 else if (vi->big_packets)
423 err = add_recvbuf_big(vi, gfp);
424 else
425 err = add_recvbuf_small(vi, gfp);
366 426
367 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 1, skb);
368 if (err < 0) { 427 if (err < 0) {
369 skb_unlink(skb, &vi->recv); 428 oom = true;
370 kfree_skb(skb);
371 break; 429 break;
372 } 430 }
373 vi->num++; 431 ++vi->num;
374 } while (err > 0); 432 } while (err > 0);
375 if (unlikely(vi->num > vi->max)) 433 if (unlikely(vi->num > vi->max))
376 vi->max = vi->num; 434 vi->max = vi->num;
@@ -407,15 +465,14 @@ static void refill_work(struct work_struct *work)
407static int virtnet_poll(struct napi_struct *napi, int budget) 465static int virtnet_poll(struct napi_struct *napi, int budget)
408{ 466{
409 struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi); 467 struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
410 struct sk_buff *skb = NULL; 468 void *buf;
411 unsigned int len, received = 0; 469 unsigned int len, received = 0;
412 470
413again: 471again:
414 while (received < budget && 472 while (received < budget &&
415 (skb = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) { 473 (buf = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) {
416 __skb_unlink(skb, &vi->recv); 474 receive_buf(vi->dev, buf, len);
417 receive_skb(vi->dev, skb, len); 475 --vi->num;
418 vi->num--;
419 received++; 476 received++;
420 } 477 }
421 478
@@ -445,7 +502,6 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
445 502
446 while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) { 503 while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
447 pr_debug("Sent skb %p\n", skb); 504 pr_debug("Sent skb %p\n", skb);
448 __skb_unlink(skb, &vi->send);
449 vi->dev->stats.tx_bytes += skb->len; 505 vi->dev->stats.tx_bytes += skb->len;
450 vi->dev->stats.tx_packets++; 506 vi->dev->stats.tx_packets++;
451 tot_sgs += skb_vnet_hdr(skb)->num_sg; 507 tot_sgs += skb_vnet_hdr(skb)->num_sg;
@@ -495,9 +551,9 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
495 551
496 /* Encode metadata header at front. */ 552 /* Encode metadata header at front. */
497 if (vi->mergeable_rx_bufs) 553 if (vi->mergeable_rx_bufs)
498 sg_set_buf(sg, &hdr->mhdr, sizeof(hdr->mhdr)); 554 sg_set_buf(sg, &hdr->mhdr, sizeof hdr->mhdr);
499 else 555 else
500 sg_set_buf(sg, &hdr->hdr, sizeof(hdr->hdr)); 556 sg_set_buf(sg, &hdr->hdr, sizeof hdr->hdr);
501 557
502 hdr->num_sg = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; 558 hdr->num_sg = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
503 return vi->svq->vq_ops->add_buf(vi->svq, sg, hdr->num_sg, 0, skb); 559 return vi->svq->vq_ops->add_buf(vi->svq, sg, hdr->num_sg, 0, skb);
@@ -528,15 +584,6 @@ again:
528 } 584 }
529 vi->svq->vq_ops->kick(vi->svq); 585 vi->svq->vq_ops->kick(vi->svq);
530 586
531 /*
532 * Put new one in send queue. You'd expect we'd need this before
533 * xmit_skb calls add_buf(), since the callback can be triggered
534 * immediately after that. But since the callback just triggers
535 * another call back here, normal network xmit locking prevents the
536 * race.
537 */
538 __skb_queue_head(&vi->send, skb);
539
540 /* Don't wait up for transmitted skbs to be freed. */ 587 /* Don't wait up for transmitted skbs to be freed. */
541 skb_orphan(skb); 588 skb_orphan(skb);
542 nf_reset(skb); 589 nf_reset(skb);
@@ -674,6 +721,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
674 struct virtio_net_ctrl_mac *mac_data; 721 struct virtio_net_ctrl_mac *mac_data;
675 struct dev_addr_list *addr; 722 struct dev_addr_list *addr;
676 struct netdev_hw_addr *ha; 723 struct netdev_hw_addr *ha;
724 int uc_count;
725 int mc_count;
677 void *buf; 726 void *buf;
678 int i; 727 int i;
679 728
@@ -700,9 +749,12 @@ static void virtnet_set_rx_mode(struct net_device *dev)
700 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", 749 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
701 allmulti ? "en" : "dis"); 750 allmulti ? "en" : "dis");
702 751
752 uc_count = netdev_uc_count(dev);
753 mc_count = netdev_mc_count(dev);
703 /* MAC filter - use one buffer for both lists */ 754 /* MAC filter - use one buffer for both lists */
704 mac_data = buf = kzalloc(((dev->uc.count + dev->mc_count) * ETH_ALEN) + 755 buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
705 (2 * sizeof(mac_data->entries)), GFP_ATOMIC); 756 (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
757 mac_data = buf;
706 if (!buf) { 758 if (!buf) {
707 dev_warn(&dev->dev, "No memory for MAC address buffer\n"); 759 dev_warn(&dev->dev, "No memory for MAC address buffer\n");
708 return; 760 return;
@@ -711,24 +763,24 @@ static void virtnet_set_rx_mode(struct net_device *dev)
711 sg_init_table(sg, 2); 763 sg_init_table(sg, 2);
712 764
713 /* Store the unicast list and count in the front of the buffer */ 765 /* Store the unicast list and count in the front of the buffer */
714 mac_data->entries = dev->uc.count; 766 mac_data->entries = uc_count;
715 i = 0; 767 i = 0;
716 list_for_each_entry(ha, &dev->uc.list, list) 768 netdev_for_each_uc_addr(ha, dev)
717 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); 769 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
718 770
719 sg_set_buf(&sg[0], mac_data, 771 sg_set_buf(&sg[0], mac_data,
720 sizeof(mac_data->entries) + (dev->uc.count * ETH_ALEN)); 772 sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
721 773
722 /* multicast list and count fill the end */ 774 /* multicast list and count fill the end */
723 mac_data = (void *)&mac_data->macs[dev->uc.count][0]; 775 mac_data = (void *)&mac_data->macs[uc_count][0];
724 776
725 mac_data->entries = dev->mc_count; 777 mac_data->entries = mc_count;
726 addr = dev->mc_list; 778 i = 0;
727 for (i = 0; i < dev->mc_count; i++, addr = addr->next) 779 netdev_for_each_mc_addr(addr, dev)
728 memcpy(&mac_data->macs[i][0], addr->da_addr, ETH_ALEN); 780 memcpy(&mac_data->macs[i++][0], addr->da_addr, ETH_ALEN);
729 781
730 sg_set_buf(&sg[1], mac_data, 782 sg_set_buf(&sg[1], mac_data,
731 sizeof(mac_data->entries) + (dev->mc_count * ETH_ALEN)); 783 sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
732 784
733 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, 785 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
734 VIRTIO_NET_CTRL_MAC_TABLE_SET, 786 VIRTIO_NET_CTRL_MAC_TABLE_SET,
@@ -915,10 +967,6 @@ static int virtnet_probe(struct virtio_device *vdev)
915 dev->features |= NETIF_F_HW_VLAN_FILTER; 967 dev->features |= NETIF_F_HW_VLAN_FILTER;
916 } 968 }
917 969
918 /* Initialize our empty receive and send queues. */
919 skb_queue_head_init(&vi->recv);
920 skb_queue_head_init(&vi->send);
921
922 err = register_netdev(dev); 970 err = register_netdev(dev);
923 if (err) { 971 if (err) {
924 pr_debug("virtio_net: registering device failed\n"); 972 pr_debug("virtio_net: registering device failed\n");
@@ -951,26 +999,42 @@ free:
951 return err; 999 return err;
952} 1000}
953 1001
1002static void free_unused_bufs(struct virtnet_info *vi)
1003{
1004 void *buf;
1005 while (1) {
1006 buf = vi->svq->vq_ops->detach_unused_buf(vi->svq);
1007 if (!buf)
1008 break;
1009 dev_kfree_skb(buf);
1010 }
1011 while (1) {
1012 buf = vi->rvq->vq_ops->detach_unused_buf(vi->rvq);
1013 if (!buf)
1014 break;
1015 if (vi->mergeable_rx_bufs || vi->big_packets)
1016 give_pages(vi, buf);
1017 else
1018 dev_kfree_skb(buf);
1019 --vi->num;
1020 }
1021 BUG_ON(vi->num != 0);
1022}
1023
954static void __devexit virtnet_remove(struct virtio_device *vdev) 1024static void __devexit virtnet_remove(struct virtio_device *vdev)
955{ 1025{
956 struct virtnet_info *vi = vdev->priv; 1026 struct virtnet_info *vi = vdev->priv;
957 struct sk_buff *skb;
958 1027
959 /* Stop all the virtqueues. */ 1028 /* Stop all the virtqueues. */
960 vdev->config->reset(vdev); 1029 vdev->config->reset(vdev);
961 1030
962 /* Free our skbs in send and recv queues, if any. */
963 while ((skb = __skb_dequeue(&vi->recv)) != NULL) {
964 kfree_skb(skb);
965 vi->num--;
966 }
967 __skb_queue_purge(&vi->send);
968
969 BUG_ON(vi->num != 0);
970 1031
971 unregister_netdev(vi->dev); 1032 unregister_netdev(vi->dev);
972 cancel_delayed_work_sync(&vi->refill); 1033 cancel_delayed_work_sync(&vi->refill);
973 1034
1035 /* Free unused buffers in both send and recv, if any. */
1036 free_unused_bufs(vi);
1037
974 vdev->config->del_vqs(vi->vdev); 1038 vdev->config->del_vqs(vi->vdev);
975 1039
976 while (vi->pages) 1040 while (vi->pages)