aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-05-02 17:14:04 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-02 17:14:04 -0400
commit736a2dd2571ac56b11ed95a7814d838d5311be04 (patch)
treede10d107025970c6e51d5b6faeba799ed4b9caae /drivers/net
parent0b2e3b6bb4a415379f16e38fc92db42379be47a1 (diff)
parent01d779a14ef800b74684d9692add4944df052461 (diff)
Merge tag 'virtio-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux
Pull virtio & lguest updates from Rusty Russell: "Lots of virtio work which wasn't quite ready for last merge window. Plus I dived into lguest again, reworking the pagetable code so we can move the switcher page: our fixmaps sometimes take more than 2MB now..." Ugh. Annoying conflicts with the tcm_vhost -> vhost_scsi rename. Hopefully correctly resolved. * tag 'virtio-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux: (57 commits) caif_virtio: Remove bouncing email addresses lguest: improve code readability in lg_cpu_start. virtio-net: fill only rx queues which are being used lguest: map Switcher below fixmap. lguest: cache last cpu we ran on. lguest: map Switcher text whenever we allocate a new pagetable. lguest: don't share Switcher PTE pages between guests. lguest: expost switcher_pages array (as lg_switcher_pages). lguest: extract shadow PTE walking / allocating. lguest: make check_gpte et. al return bool. lguest: assume Switcher text is a single page. lguest: rename switcher_page to switcher_pages. lguest: remove RESERVE_MEM constant. lguest: check vaddr not pgd for Switcher protection. lguest: prepare to make SWITCHER_ADDR a variable. virtio: console: replace EMFILE with EBUSY for already-open port virtio-scsi: reset virtqueue affinity when doing cpu hotplug virtio-scsi: introduce multiqueue support virtio-scsi: push vq lock/unlock into virtscsi_vq_done virtio-scsi: pass struct virtio_scsi to virtqueue completion function ...
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/caif/Kconfig14
-rw-r--r--drivers/net/caif/Makefile3
-rw-r--r--drivers/net/caif/caif_virtio.c790
-rw-r--r--drivers/net/virtio_net.c77
4 files changed, 848 insertions, 36 deletions
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index a966128c2a7a..7ffc756131a2 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -40,3 +40,17 @@ config CAIF_HSI
40 The caif low level driver for CAIF over HSI. 40 The caif low level driver for CAIF over HSI.
41 Be aware that if you enable this then you also need to 41 Be aware that if you enable this then you also need to
42 enable a low-level HSI driver. 42 enable a low-level HSI driver.
43
44config CAIF_VIRTIO
45 tristate "CAIF virtio transport driver"
46 depends on CAIF
47 select VHOST_RING
48 select VIRTIO
49 select GENERIC_ALLOCATOR
50 default n
51 ---help---
52 The caif driver for CAIF over Virtio.
53
54if CAIF_VIRTIO
55source "drivers/vhost/Kconfig"
56endif
diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile
index 15a9d2fc753d..9bbd45391f6c 100644
--- a/drivers/net/caif/Makefile
+++ b/drivers/net/caif/Makefile
@@ -9,3 +9,6 @@ obj-$(CONFIG_CAIF_SPI_SLAVE) += cfspi_slave.o
9 9
10# HSI interface 10# HSI interface
11obj-$(CONFIG_CAIF_HSI) += caif_hsi.o 11obj-$(CONFIG_CAIF_HSI) += caif_hsi.o
12
13# Virtio interface
14obj-$(CONFIG_CAIF_VIRTIO) += caif_virtio.o
diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
new file mode 100644
index 000000000000..b9ed1288ce2d
--- /dev/null
+++ b/drivers/net/caif/caif_virtio.c
@@ -0,0 +1,790 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2013
3 * Authors: Vicram Arv
4 * Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
5 * Sjur Brendeland
6 * License terms: GNU General Public License (GPL) version 2
7 */
8#include <linux/module.h>
9#include <linux/if_arp.h>
10#include <linux/virtio.h>
11#include <linux/vringh.h>
12#include <linux/debugfs.h>
13#include <linux/spinlock.h>
14#include <linux/genalloc.h>
15#include <linux/interrupt.h>
16#include <linux/netdevice.h>
17#include <linux/rtnetlink.h>
18#include <linux/virtio_ids.h>
19#include <linux/virtio_caif.h>
20#include <linux/virtio_ring.h>
21#include <linux/dma-mapping.h>
22#include <net/caif/caif_dev.h>
23#include <linux/virtio_config.h>
24
25MODULE_LICENSE("GPL v2");
26MODULE_AUTHOR("Vicram Arv");
27MODULE_AUTHOR("Sjur Brendeland");
28MODULE_DESCRIPTION("Virtio CAIF Driver");
29
30/* NAPI schedule quota */
31#define CFV_DEFAULT_QUOTA 32
32
33/* Defaults used if virtio config space is unavailable */
34#define CFV_DEF_MTU_SIZE 4096
35#define CFV_DEF_HEADROOM 32
36#define CFV_DEF_TAILROOM 32
37
38/* Required IP header alignment */
39#define IP_HDR_ALIGN 4
40
41/* struct cfv_napi_contxt - NAPI context info
42 * @riov: IOV holding data read from the ring. Note that riov may
43 * still hold data when cfv_rx_poll() returns.
44 * @head: Last descriptor ID we received from vringh_getdesc_kern.
45 * We use this to put descriptor back on the used ring. USHRT_MAX is
46 * used to indicate invalid head-id.
47 */
48struct cfv_napi_context {
49 struct vringh_kiov riov;
50 unsigned short head;
51};
52
53/* struct cfv_stats - statistics for debugfs
54 * @rx_napi_complete: Number of NAPI completions (RX)
55 * @rx_napi_resched: Number of calls where the full quota was used (RX)
56 * @rx_nomem: Number of SKB alloc failures (RX)
57 * @rx_kicks: Number of RX kicks
58 * @tx_full_ring: Number times TX ring was full
59 * @tx_no_mem: Number of times TX went out of memory
60 * @tx_flow_on: Number of flow on (TX)
61 * @tx_kicks: Number of TX kicks
62 */
63struct cfv_stats {
64 u32 rx_napi_complete;
65 u32 rx_napi_resched;
66 u32 rx_nomem;
67 u32 rx_kicks;
68 u32 tx_full_ring;
69 u32 tx_no_mem;
70 u32 tx_flow_on;
71 u32 tx_kicks;
72};
73
74/* struct cfv_info - Caif Virtio control structure
75 * @cfdev: caif common header
76 * @vdev: Associated virtio device
77 * @vr_rx: rx/downlink host vring
78 * @vq_tx: tx/uplink virtqueue
79 * @ndev: CAIF link layer device
80 * @watermark_tx: indicates number of free descriptors we need
81 * to reopen the tx-queues after overload.
82 * @tx_lock: protects vq_tx from concurrent use
83 * @tx_release_tasklet: Tasklet for freeing consumed TX buffers
84 * @napi: Napi context used in cfv_rx_poll()
85 * @ctx: Context data used in cfv_rx_poll()
86 * @tx_hr: transmit headroom
87 * @rx_hr: receive headroom
88 * @tx_tr: transmit tail room
89 * @rx_tr: receive tail room
90 * @mtu: transmit max size
91 * @mru: receive max size
92 * @allocsz: size of dma memory reserved for TX buffers
93 * @alloc_addr: virtual address to dma memory for TX buffers
94 * @alloc_dma: dma address to dma memory for TX buffers
95 * @genpool: Gen Pool used for allocating TX buffers
96 * @reserved_mem: Pointer to memory reserve allocated from genpool
97 * @reserved_size: Size of memory reserve allocated from genpool
98 * @stats: Statistics exposed in sysfs
99 * @debugfs: Debugfs dentry for statistic counters
100 */
101struct cfv_info {
102 struct caif_dev_common cfdev;
103 struct virtio_device *vdev;
104 struct vringh *vr_rx;
105 struct virtqueue *vq_tx;
106 struct net_device *ndev;
107 unsigned int watermark_tx;
108 /* Protect access to vq_tx */
109 spinlock_t tx_lock;
110 struct tasklet_struct tx_release_tasklet;
111 struct napi_struct napi;
112 struct cfv_napi_context ctx;
113 u16 tx_hr;
114 u16 rx_hr;
115 u16 tx_tr;
116 u16 rx_tr;
117 u32 mtu;
118 u32 mru;
119 size_t allocsz;
120 void *alloc_addr;
121 dma_addr_t alloc_dma;
122 struct gen_pool *genpool;
123 unsigned long reserved_mem;
124 size_t reserved_size;
125 struct cfv_stats stats;
126 struct dentry *debugfs;
127};
128
129/* struct buf_info - maintains transmit buffer data handle
130 * @size: size of transmit buffer
131 * @dma_handle: handle to allocated dma device memory area
132 * @vaddr: virtual address mapping to allocated memory area
133 */
134struct buf_info {
135 size_t size;
136 u8 *vaddr;
137};
138
139/* Called from virtio device, in IRQ context */
140static void cfv_release_cb(struct virtqueue *vq_tx)
141{
142 struct cfv_info *cfv = vq_tx->vdev->priv;
143
144 ++cfv->stats.tx_kicks;
145 tasklet_schedule(&cfv->tx_release_tasklet);
146}
147
148static void free_buf_info(struct cfv_info *cfv, struct buf_info *buf_info)
149{
150 if (!buf_info)
151 return;
152 gen_pool_free(cfv->genpool, (unsigned long) buf_info->vaddr,
153 buf_info->size);
154 kfree(buf_info);
155}
156
157/* This is invoked whenever the remote processor completed processing
158 * a TX msg we just sent, and the buffer is put back to the used ring.
159 */
160static void cfv_release_used_buf(struct virtqueue *vq_tx)
161{
162 struct cfv_info *cfv = vq_tx->vdev->priv;
163 unsigned long flags;
164
165 BUG_ON(vq_tx != cfv->vq_tx);
166
167 for (;;) {
168 unsigned int len;
169 struct buf_info *buf_info;
170
171 /* Get used buffer from used ring to recycle used descriptors */
172 spin_lock_irqsave(&cfv->tx_lock, flags);
173 buf_info = virtqueue_get_buf(vq_tx, &len);
174 spin_unlock_irqrestore(&cfv->tx_lock, flags);
175
176 /* Stop looping if there are no more buffers to free */
177 if (!buf_info)
178 break;
179
180 free_buf_info(cfv, buf_info);
181
182 /* watermark_tx indicates if we previously stopped the tx
183 * queues. If we have enough free stots in the virtio ring,
184 * re-establish memory reserved and open up tx queues.
185 */
186 if (cfv->vq_tx->num_free <= cfv->watermark_tx)
187 continue;
188
189 /* Re-establish memory reserve */
190 if (cfv->reserved_mem == 0 && cfv->genpool)
191 cfv->reserved_mem =
192 gen_pool_alloc(cfv->genpool,
193 cfv->reserved_size);
194
195 /* Open up the tx queues */
196 if (cfv->reserved_mem) {
197 cfv->watermark_tx =
198 virtqueue_get_vring_size(cfv->vq_tx);
199 netif_tx_wake_all_queues(cfv->ndev);
200 /* Buffers are recycled in cfv_netdev_tx, so
201 * disable notifications when queues are opened.
202 */
203 virtqueue_disable_cb(cfv->vq_tx);
204 ++cfv->stats.tx_flow_on;
205 } else {
206 /* if no memory reserve, wait for more free slots */
207 WARN_ON(cfv->watermark_tx >
208 virtqueue_get_vring_size(cfv->vq_tx));
209 cfv->watermark_tx +=
210 virtqueue_get_vring_size(cfv->vq_tx) / 4;
211 }
212 }
213}
214
215/* Allocate a SKB and copy packet data to it */
216static struct sk_buff *cfv_alloc_and_copy_skb(int *err,
217 struct cfv_info *cfv,
218 u8 *frm, u32 frm_len)
219{
220 struct sk_buff *skb;
221 u32 cfpkt_len, pad_len;
222
223 *err = 0;
224 /* Verify that packet size with down-link header and mtu size */
225 if (frm_len > cfv->mru || frm_len <= cfv->rx_hr + cfv->rx_tr) {
226 netdev_err(cfv->ndev,
227 "Invalid frmlen:%u mtu:%u hr:%d tr:%d\n",
228 frm_len, cfv->mru, cfv->rx_hr,
229 cfv->rx_tr);
230 *err = -EPROTO;
231 return NULL;
232 }
233
234 cfpkt_len = frm_len - (cfv->rx_hr + cfv->rx_tr);
235 pad_len = (unsigned long)(frm + cfv->rx_hr) & (IP_HDR_ALIGN - 1);
236
237 skb = netdev_alloc_skb(cfv->ndev, frm_len + pad_len);
238 if (!skb) {
239 *err = -ENOMEM;
240 return NULL;
241 }
242
243 skb_reserve(skb, cfv->rx_hr + pad_len);
244
245 memcpy(skb_put(skb, cfpkt_len), frm + cfv->rx_hr, cfpkt_len);
246 return skb;
247}
248
249/* Get packets from the host vring */
250static int cfv_rx_poll(struct napi_struct *napi, int quota)
251{
252 struct cfv_info *cfv = container_of(napi, struct cfv_info, napi);
253 int rxcnt = 0;
254 int err = 0;
255 void *buf;
256 struct sk_buff *skb;
257 struct vringh_kiov *riov = &cfv->ctx.riov;
258 unsigned int skb_len;
259
260again:
261 do {
262 skb = NULL;
263
264 /* Put the previous iovec back on the used ring and
265 * fetch a new iovec if we have processed all elements.
266 */
267 if (riov->i == riov->used) {
268 if (cfv->ctx.head != USHRT_MAX) {
269 vringh_complete_kern(cfv->vr_rx,
270 cfv->ctx.head,
271 0);
272 cfv->ctx.head = USHRT_MAX;
273 }
274
275 err = vringh_getdesc_kern(
276 cfv->vr_rx,
277 riov,
278 NULL,
279 &cfv->ctx.head,
280 GFP_ATOMIC);
281
282 if (err <= 0)
283 goto exit;
284 }
285
286 buf = phys_to_virt((unsigned long) riov->iov[riov->i].iov_base);
287 /* TODO: Add check on valid buffer address */
288
289 skb = cfv_alloc_and_copy_skb(&err, cfv, buf,
290 riov->iov[riov->i].iov_len);
291 if (unlikely(err))
292 goto exit;
293
294 /* Push received packet up the stack. */
295 skb_len = skb->len;
296 skb->protocol = htons(ETH_P_CAIF);
297 skb_reset_mac_header(skb);
298 skb->dev = cfv->ndev;
299 err = netif_receive_skb(skb);
300 if (unlikely(err)) {
301 ++cfv->ndev->stats.rx_dropped;
302 } else {
303 ++cfv->ndev->stats.rx_packets;
304 cfv->ndev->stats.rx_bytes += skb_len;
305 }
306
307 ++riov->i;
308 ++rxcnt;
309 } while (rxcnt < quota);
310
311 ++cfv->stats.rx_napi_resched;
312 goto out;
313
314exit:
315 switch (err) {
316 case 0:
317 ++cfv->stats.rx_napi_complete;
318
319 /* Really out of patckets? (stolen from virtio_net)*/
320 napi_complete(napi);
321 if (unlikely(!vringh_notify_enable_kern(cfv->vr_rx)) &&
322 napi_schedule_prep(napi)) {
323 vringh_notify_disable_kern(cfv->vr_rx);
324 __napi_schedule(napi);
325 goto again;
326 }
327 break;
328
329 case -ENOMEM:
330 ++cfv->stats.rx_nomem;
331 dev_kfree_skb(skb);
332 /* Stop NAPI poll on OOM, we hope to be polled later */
333 napi_complete(napi);
334 vringh_notify_enable_kern(cfv->vr_rx);
335 break;
336
337 default:
338 /* We're doomed, any modem fault is fatal */
339 netdev_warn(cfv->ndev, "Bad ring, disable device\n");
340 cfv->ndev->stats.rx_dropped = riov->used - riov->i;
341 napi_complete(napi);
342 vringh_notify_disable_kern(cfv->vr_rx);
343 netif_carrier_off(cfv->ndev);
344 break;
345 }
346out:
347 if (rxcnt && vringh_need_notify_kern(cfv->vr_rx) > 0)
348 vringh_notify(cfv->vr_rx);
349 return rxcnt;
350}
351
352static void cfv_recv(struct virtio_device *vdev, struct vringh *vr_rx)
353{
354 struct cfv_info *cfv = vdev->priv;
355
356 ++cfv->stats.rx_kicks;
357 vringh_notify_disable_kern(cfv->vr_rx);
358 napi_schedule(&cfv->napi);
359}
360
361static void cfv_destroy_genpool(struct cfv_info *cfv)
362{
363 if (cfv->alloc_addr)
364 dma_free_coherent(cfv->vdev->dev.parent->parent,
365 cfv->allocsz, cfv->alloc_addr,
366 cfv->alloc_dma);
367
368 if (!cfv->genpool)
369 return;
370 gen_pool_free(cfv->genpool, cfv->reserved_mem,
371 cfv->reserved_size);
372 gen_pool_destroy(cfv->genpool);
373 cfv->genpool = NULL;
374}
375
376static int cfv_create_genpool(struct cfv_info *cfv)
377{
378 int err;
379
380 /* dma_alloc can only allocate whole pages, and we need a more
381 * fine graned allocation so we use genpool. We ask for space needed
382 * by IP and a full ring. If the dma allcoation fails we retry with a
383 * smaller allocation size.
384 */
385 err = -ENOMEM;
386 cfv->allocsz = (virtqueue_get_vring_size(cfv->vq_tx) *
387 (ETH_DATA_LEN + cfv->tx_hr + cfv->tx_tr) * 11)/10;
388 if (cfv->allocsz <= (num_possible_cpus() + 1) * cfv->ndev->mtu)
389 return -EINVAL;
390
391 for (;;) {
392 if (cfv->allocsz <= num_possible_cpus() * cfv->ndev->mtu) {
393 netdev_info(cfv->ndev, "Not enough device memory\n");
394 return -ENOMEM;
395 }
396
397 cfv->alloc_addr = dma_alloc_coherent(
398 cfv->vdev->dev.parent->parent,
399 cfv->allocsz, &cfv->alloc_dma,
400 GFP_ATOMIC);
401 if (cfv->alloc_addr)
402 break;
403
404 cfv->allocsz = (cfv->allocsz * 3) >> 2;
405 }
406
407 netdev_dbg(cfv->ndev, "Allocated %zd bytes from dma-memory\n",
408 cfv->allocsz);
409
410 /* Allocate on 128 bytes boundaries (1 << 7)*/
411 cfv->genpool = gen_pool_create(7, -1);
412 if (!cfv->genpool)
413 goto err;
414
415 err = gen_pool_add_virt(cfv->genpool, (unsigned long)cfv->alloc_addr,
416 (phys_addr_t)virt_to_phys(cfv->alloc_addr),
417 cfv->allocsz, -1);
418 if (err)
419 goto err;
420
421 /* Reserve some memory for low memory situations. If we hit the roof
422 * in the memory pool, we stop TX flow and release the reserve.
423 */
424 cfv->reserved_size = num_possible_cpus() * cfv->ndev->mtu;
425 cfv->reserved_mem = gen_pool_alloc(cfv->genpool,
426 cfv->reserved_size);
427 if (!cfv->reserved_mem) {
428 err = -ENOMEM;
429 goto err;
430 }
431
432 cfv->watermark_tx = virtqueue_get_vring_size(cfv->vq_tx);
433 return 0;
434err:
435 cfv_destroy_genpool(cfv);
436 return err;
437}
438
439/* Enable the CAIF interface and allocate the memory-pool */
440static int cfv_netdev_open(struct net_device *netdev)
441{
442 struct cfv_info *cfv = netdev_priv(netdev);
443
444 if (cfv_create_genpool(cfv))
445 return -ENOMEM;
446
447 netif_carrier_on(netdev);
448 napi_enable(&cfv->napi);
449
450 /* Schedule NAPI to read any pending packets */
451 napi_schedule(&cfv->napi);
452 return 0;
453}
454
455/* Disable the CAIF interface and free the memory-pool */
456static int cfv_netdev_close(struct net_device *netdev)
457{
458 struct cfv_info *cfv = netdev_priv(netdev);
459 unsigned long flags;
460 struct buf_info *buf_info;
461
462 /* Disable interrupts, queues and NAPI polling */
463 netif_carrier_off(netdev);
464 virtqueue_disable_cb(cfv->vq_tx);
465 vringh_notify_disable_kern(cfv->vr_rx);
466 napi_disable(&cfv->napi);
467
468 /* Release any TX buffers on both used and avilable rings */
469 cfv_release_used_buf(cfv->vq_tx);
470 spin_lock_irqsave(&cfv->tx_lock, flags);
471 while ((buf_info = virtqueue_detach_unused_buf(cfv->vq_tx)))
472 free_buf_info(cfv, buf_info);
473 spin_unlock_irqrestore(&cfv->tx_lock, flags);
474
475 /* Release all dma allocated memory and destroy the pool */
476 cfv_destroy_genpool(cfv);
477 return 0;
478}
479
480/* Allocate a buffer in dma-memory and copy skb to it */
481static struct buf_info *cfv_alloc_and_copy_to_shm(struct cfv_info *cfv,
482 struct sk_buff *skb,
483 struct scatterlist *sg)
484{
485 struct caif_payload_info *info = (void *)&skb->cb;
486 struct buf_info *buf_info = NULL;
487 u8 pad_len, hdr_ofs;
488
489 if (!cfv->genpool)
490 goto err;
491
492 if (unlikely(cfv->tx_hr + skb->len + cfv->tx_tr > cfv->mtu)) {
493 netdev_warn(cfv->ndev, "Invalid packet len (%d > %d)\n",
494 cfv->tx_hr + skb->len + cfv->tx_tr, cfv->mtu);
495 goto err;
496 }
497
498 buf_info = kmalloc(sizeof(struct buf_info), GFP_ATOMIC);
499 if (unlikely(!buf_info))
500 goto err;
501
502 /* Make the IP header aligned in tbe buffer */
503 hdr_ofs = cfv->tx_hr + info->hdr_len;
504 pad_len = hdr_ofs & (IP_HDR_ALIGN - 1);
505 buf_info->size = cfv->tx_hr + skb->len + cfv->tx_tr + pad_len;
506
507 /* allocate dma memory buffer */
508 buf_info->vaddr = (void *)gen_pool_alloc(cfv->genpool, buf_info->size);
509 if (unlikely(!buf_info->vaddr))
510 goto err;
511
512 /* copy skbuf contents to send buffer */
513 skb_copy_bits(skb, 0, buf_info->vaddr + cfv->tx_hr + pad_len, skb->len);
514 sg_init_one(sg, buf_info->vaddr + pad_len,
515 skb->len + cfv->tx_hr + cfv->rx_hr);
516
517 return buf_info;
518err:
519 kfree(buf_info);
520 return NULL;
521}
522
523/* Put the CAIF packet on the virtio ring and kick the receiver */
524static int cfv_netdev_tx(struct sk_buff *skb, struct net_device *netdev)
525{
526 struct cfv_info *cfv = netdev_priv(netdev);
527 struct buf_info *buf_info;
528 struct scatterlist sg;
529 unsigned long flags;
530 bool flow_off = false;
531 int ret;
532
533 /* garbage collect released buffers */
534 cfv_release_used_buf(cfv->vq_tx);
535 spin_lock_irqsave(&cfv->tx_lock, flags);
536
537 /* Flow-off check takes into account number of cpus to make sure
538 * virtqueue will not be overfilled in any possible smp conditions.
539 *
540 * Flow-on is triggered when sufficient buffers are freed
541 */
542 if (unlikely(cfv->vq_tx->num_free <= num_present_cpus())) {
543 flow_off = true;
544 cfv->stats.tx_full_ring++;
545 }
546
547 /* If we run out of memory, we release the memory reserve and retry
548 * allocation.
549 */
550 buf_info = cfv_alloc_and_copy_to_shm(cfv, skb, &sg);
551 if (unlikely(!buf_info)) {
552 cfv->stats.tx_no_mem++;
553 flow_off = true;
554
555 if (cfv->reserved_mem && cfv->genpool) {
556 gen_pool_free(cfv->genpool, cfv->reserved_mem,
557 cfv->reserved_size);
558 cfv->reserved_mem = 0;
559 buf_info = cfv_alloc_and_copy_to_shm(cfv, skb, &sg);
560 }
561 }
562
563 if (unlikely(flow_off)) {
564 /* Turn flow on when a 1/4 of the descriptors are released */
565 cfv->watermark_tx = virtqueue_get_vring_size(cfv->vq_tx) / 4;
566 /* Enable notifications of recycled TX buffers */
567 virtqueue_enable_cb(cfv->vq_tx);
568 netif_tx_stop_all_queues(netdev);
569 }
570
571 if (unlikely(!buf_info)) {
572 /* If the memory reserve does it's job, this shouldn't happen */
573 netdev_warn(cfv->ndev, "Out of gen_pool memory\n");
574 goto err;
575 }
576
577 ret = virtqueue_add_outbuf(cfv->vq_tx, &sg, 1, buf_info, GFP_ATOMIC);
578 if (unlikely((ret < 0))) {
579 /* If flow control works, this shouldn't happen */
580 netdev_warn(cfv->ndev, "Failed adding buffer to TX vring:%d\n",
581 ret);
582 goto err;
583 }
584
585 /* update netdev statistics */
586 cfv->ndev->stats.tx_packets++;
587 cfv->ndev->stats.tx_bytes += skb->len;
588 spin_unlock_irqrestore(&cfv->tx_lock, flags);
589
590 /* tell the remote processor it has a pending message to read */
591 virtqueue_kick(cfv->vq_tx);
592
593 dev_kfree_skb(skb);
594 return NETDEV_TX_OK;
595err:
596 spin_unlock_irqrestore(&cfv->tx_lock, flags);
597 cfv->ndev->stats.tx_dropped++;
598 free_buf_info(cfv, buf_info);
599 dev_kfree_skb(skb);
600 return NETDEV_TX_OK;
601}
602
603static void cfv_tx_release_tasklet(unsigned long drv)
604{
605 struct cfv_info *cfv = (struct cfv_info *)drv;
606 cfv_release_used_buf(cfv->vq_tx);
607}
608
609static const struct net_device_ops cfv_netdev_ops = {
610 .ndo_open = cfv_netdev_open,
611 .ndo_stop = cfv_netdev_close,
612 .ndo_start_xmit = cfv_netdev_tx,
613};
614
615static void cfv_netdev_setup(struct net_device *netdev)
616{
617 netdev->netdev_ops = &cfv_netdev_ops;
618 netdev->type = ARPHRD_CAIF;
619 netdev->tx_queue_len = 100;
620 netdev->flags = IFF_POINTOPOINT | IFF_NOARP;
621 netdev->mtu = CFV_DEF_MTU_SIZE;
622 netdev->destructor = free_netdev;
623}
624
625/* Create debugfs counters for the device */
626static inline void debugfs_init(struct cfv_info *cfv)
627{
628 cfv->debugfs =
629 debugfs_create_dir(netdev_name(cfv->ndev), NULL);
630
631 if (IS_ERR(cfv->debugfs))
632 return;
633
634 debugfs_create_u32("rx-napi-complete", S_IRUSR, cfv->debugfs,
635 &cfv->stats.rx_napi_complete);
636 debugfs_create_u32("rx-napi-resched", S_IRUSR, cfv->debugfs,
637 &cfv->stats.rx_napi_resched);
638 debugfs_create_u32("rx-nomem", S_IRUSR, cfv->debugfs,
639 &cfv->stats.rx_nomem);
640 debugfs_create_u32("rx-kicks", S_IRUSR, cfv->debugfs,
641 &cfv->stats.rx_kicks);
642 debugfs_create_u32("tx-full-ring", S_IRUSR, cfv->debugfs,
643 &cfv->stats.tx_full_ring);
644 debugfs_create_u32("tx-no-mem", S_IRUSR, cfv->debugfs,
645 &cfv->stats.tx_no_mem);
646 debugfs_create_u32("tx-kicks", S_IRUSR, cfv->debugfs,
647 &cfv->stats.tx_kicks);
648 debugfs_create_u32("tx-flow-on", S_IRUSR, cfv->debugfs,
649 &cfv->stats.tx_flow_on);
650}
651
652/* Setup CAIF for the a virtio device */
653static int cfv_probe(struct virtio_device *vdev)
654{
655 vq_callback_t *vq_cbs = cfv_release_cb;
656 vrh_callback_t *vrh_cbs = cfv_recv;
657 const char *names = "output";
658 const char *cfv_netdev_name = "cfvrt";
659 struct net_device *netdev;
660 struct cfv_info *cfv;
661 int err = -EINVAL;
662
663 netdev = alloc_netdev(sizeof(struct cfv_info), cfv_netdev_name,
664 cfv_netdev_setup);
665 if (!netdev)
666 return -ENOMEM;
667
668 cfv = netdev_priv(netdev);
669 cfv->vdev = vdev;
670 cfv->ndev = netdev;
671
672 spin_lock_init(&cfv->tx_lock);
673
674 /* Get the RX virtio ring. This is a "host side vring". */
675 err = -ENODEV;
676 if (!vdev->vringh_config || !vdev->vringh_config->find_vrhs)
677 goto err;
678
679 err = vdev->vringh_config->find_vrhs(vdev, 1, &cfv->vr_rx, &vrh_cbs);
680 if (err)
681 goto err;
682
683 /* Get the TX virtio ring. This is a "guest side vring". */
684 err = vdev->config->find_vqs(vdev, 1, &cfv->vq_tx, &vq_cbs, &names);
685 if (err)
686 goto err;
687
688 /* Get the CAIF configuration from virtio config space, if available */
689#define GET_VIRTIO_CONFIG_OPS(_v, _var, _f) \
690 ((_v)->config->get(_v, offsetof(struct virtio_caif_transf_config, _f), \
691 &_var, \
692 FIELD_SIZEOF(struct virtio_caif_transf_config, _f)))
693
694 if (vdev->config->get) {
695 GET_VIRTIO_CONFIG_OPS(vdev, cfv->tx_hr, headroom);
696 GET_VIRTIO_CONFIG_OPS(vdev, cfv->rx_hr, headroom);
697 GET_VIRTIO_CONFIG_OPS(vdev, cfv->tx_tr, tailroom);
698 GET_VIRTIO_CONFIG_OPS(vdev, cfv->rx_tr, tailroom);
699 GET_VIRTIO_CONFIG_OPS(vdev, cfv->mtu, mtu);
700 GET_VIRTIO_CONFIG_OPS(vdev, cfv->mru, mtu);
701 } else {
702 cfv->tx_hr = CFV_DEF_HEADROOM;
703 cfv->rx_hr = CFV_DEF_HEADROOM;
704 cfv->tx_tr = CFV_DEF_TAILROOM;
705 cfv->rx_tr = CFV_DEF_TAILROOM;
706 cfv->mtu = CFV_DEF_MTU_SIZE;
707 cfv->mru = CFV_DEF_MTU_SIZE;
708 }
709
710 netdev->needed_headroom = cfv->tx_hr;
711 netdev->needed_tailroom = cfv->tx_tr;
712
713 /* Disable buffer release interrupts unless we have stopped TX queues */
714 virtqueue_disable_cb(cfv->vq_tx);
715
716 netdev->mtu = cfv->mtu - cfv->tx_tr;
717 vdev->priv = cfv;
718
719 /* Initialize NAPI poll context data */
720 vringh_kiov_init(&cfv->ctx.riov, NULL, 0);
721 cfv->ctx.head = USHRT_MAX;
722 netif_napi_add(netdev, &cfv->napi, cfv_rx_poll, CFV_DEFAULT_QUOTA);
723
724 tasklet_init(&cfv->tx_release_tasklet,
725 cfv_tx_release_tasklet,
726 (unsigned long)cfv);
727
728 /* Carrier is off until netdevice is opened */
729 netif_carrier_off(netdev);
730
731 /* register Netdev */
732 err = register_netdev(netdev);
733 if (err) {
734 dev_err(&vdev->dev, "Unable to register netdev (%d)\n", err);
735 goto err;
736 }
737
738 debugfs_init(cfv);
739
740 return 0;
741err:
742 netdev_warn(cfv->ndev, "CAIF Virtio probe failed:%d\n", err);
743
744 if (cfv->vr_rx)
745 vdev->vringh_config->del_vrhs(cfv->vdev);
746 if (cfv->vdev)
747 vdev->config->del_vqs(cfv->vdev);
748 free_netdev(netdev);
749 return err;
750}
751
752static void cfv_remove(struct virtio_device *vdev)
753{
754 struct cfv_info *cfv = vdev->priv;
755
756 rtnl_lock();
757 dev_close(cfv->ndev);
758 rtnl_unlock();
759
760 tasklet_kill(&cfv->tx_release_tasklet);
761 debugfs_remove_recursive(cfv->debugfs);
762
763 vringh_kiov_cleanup(&cfv->ctx.riov);
764 vdev->config->reset(vdev);
765 vdev->vringh_config->del_vrhs(cfv->vdev);
766 cfv->vr_rx = NULL;
767 vdev->config->del_vqs(cfv->vdev);
768 unregister_netdev(cfv->ndev);
769}
770
771static struct virtio_device_id id_table[] = {
772 { VIRTIO_ID_CAIF, VIRTIO_DEV_ANY_ID },
773 { 0 },
774};
775
776static unsigned int features[] = {
777};
778
779static struct virtio_driver caif_virtio_driver = {
780 .feature_table = features,
781 .feature_table_size = ARRAY_SIZE(features),
782 .driver.name = KBUILD_MODNAME,
783 .driver.owner = THIS_MODULE,
784 .id_table = id_table,
785 .probe = cfv_probe,
786 .remove = cfv_remove,
787};
788
789module_virtio_driver(caif_virtio_driver);
790MODULE_DEVICE_TABLE(virtio, id_table);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 50077753a0e5..3c23fdc27bf0 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -39,7 +39,6 @@ module_param(gso, bool, 0444);
39#define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) 39#define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
40#define GOOD_COPY_LEN 128 40#define GOOD_COPY_LEN 128
41 41
42#define VIRTNET_SEND_COMMAND_SG_MAX 2
43#define VIRTNET_DRIVER_VERSION "1.0.0" 42#define VIRTNET_DRIVER_VERSION "1.0.0"
44 43
45struct virtnet_stats { 44struct virtnet_stats {
@@ -444,7 +443,7 @@ static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp)
444 443
445 skb_to_sgvec(skb, rq->sg + 1, 0, skb->len); 444 skb_to_sgvec(skb, rq->sg + 1, 0, skb->len);
446 445
447 err = virtqueue_add_buf(rq->vq, rq->sg, 0, 2, skb, gfp); 446 err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp);
448 if (err < 0) 447 if (err < 0)
449 dev_kfree_skb(skb); 448 dev_kfree_skb(skb);
450 449
@@ -489,8 +488,8 @@ static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp)
489 488
490 /* chain first in list head */ 489 /* chain first in list head */
491 first->private = (unsigned long)list; 490 first->private = (unsigned long)list;
492 err = virtqueue_add_buf(rq->vq, rq->sg, 0, MAX_SKB_FRAGS + 2, 491 err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2,
493 first, gfp); 492 first, gfp);
494 if (err < 0) 493 if (err < 0)
495 give_pages(rq, first); 494 give_pages(rq, first);
496 495
@@ -508,7 +507,7 @@ static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp)
508 507
509 sg_init_one(rq->sg, page_address(page), PAGE_SIZE); 508 sg_init_one(rq->sg, page_address(page), PAGE_SIZE);
510 509
511 err = virtqueue_add_buf(rq->vq, rq->sg, 0, 1, page, gfp); 510 err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, page, gfp);
512 if (err < 0) 511 if (err < 0)
513 give_pages(rq, page); 512 give_pages(rq, page);
514 513
@@ -582,7 +581,7 @@ static void refill_work(struct work_struct *work)
582 bool still_empty; 581 bool still_empty;
583 int i; 582 int i;
584 583
585 for (i = 0; i < vi->max_queue_pairs; i++) { 584 for (i = 0; i < vi->curr_queue_pairs; i++) {
586 struct receive_queue *rq = &vi->rq[i]; 585 struct receive_queue *rq = &vi->rq[i];
587 586
588 napi_disable(&rq->napi); 587 napi_disable(&rq->napi);
@@ -637,7 +636,7 @@ static int virtnet_open(struct net_device *dev)
637 struct virtnet_info *vi = netdev_priv(dev); 636 struct virtnet_info *vi = netdev_priv(dev);
638 int i; 637 int i;
639 638
640 for (i = 0; i < vi->max_queue_pairs; i++) { 639 for (i = 0; i < vi->curr_queue_pairs; i++) {
641 /* Make sure we have some buffers: if oom use wq. */ 640 /* Make sure we have some buffers: if oom use wq. */
642 if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) 641 if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
643 schedule_delayed_work(&vi->refill, 0); 642 schedule_delayed_work(&vi->refill, 0);
@@ -711,8 +710,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
711 sg_set_buf(sq->sg, &hdr->hdr, sizeof hdr->hdr); 710 sg_set_buf(sq->sg, &hdr->hdr, sizeof hdr->hdr);
712 711
713 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1; 712 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
714 return virtqueue_add_buf(sq->vq, sq->sg, num_sg, 713 return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
715 0, skb, GFP_ATOMIC);
716} 714}
717 715
718static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) 716static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -767,32 +765,35 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
767 * never fail unless improperly formated. 765 * never fail unless improperly formated.
768 */ 766 */
769static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, 767static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
770 struct scatterlist *data, int out, int in) 768 struct scatterlist *out,
769 struct scatterlist *in)
771{ 770{
772 struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2]; 771 struct scatterlist *sgs[4], hdr, stat;
773 struct virtio_net_ctrl_hdr ctrl; 772 struct virtio_net_ctrl_hdr ctrl;
774 virtio_net_ctrl_ack status = ~0; 773 virtio_net_ctrl_ack status = ~0;
775 unsigned int tmp; 774 unsigned out_num = 0, in_num = 0, tmp;
776 int i;
777 775
778 /* Caller should know better */ 776 /* Caller should know better */
779 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) || 777 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
780 (out + in > VIRTNET_SEND_COMMAND_SG_MAX));
781
782 out++; /* Add header */
783 in++; /* Add return status */
784 778
785 ctrl.class = class; 779 ctrl.class = class;
786 ctrl.cmd = cmd; 780 ctrl.cmd = cmd;
781 /* Add header */
782 sg_init_one(&hdr, &ctrl, sizeof(ctrl));
783 sgs[out_num++] = &hdr;
787 784
788 sg_init_table(sg, out + in); 785 if (out)
786 sgs[out_num++] = out;
787 if (in)
788 sgs[out_num + in_num++] = in;
789 789
790 sg_set_buf(&sg[0], &ctrl, sizeof(ctrl)); 790 /* Add return status. */
791 for_each_sg(data, s, out + in - 2, i) 791 sg_init_one(&stat, &status, sizeof(status));
792 sg_set_buf(&sg[i + 1], sg_virt(s), s->length); 792 sgs[out_num + in_num++] = &stat;
793 sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
794 793
795 BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi, GFP_ATOMIC) < 0); 794 BUG_ON(out_num + in_num > ARRAY_SIZE(sgs));
795 BUG_ON(virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC)
796 < 0);
796 797
797 virtqueue_kick(vi->cvq); 798 virtqueue_kick(vi->cvq);
798 799
@@ -821,7 +822,7 @@ static int virtnet_set_mac_address(struct net_device *dev, void *p)
821 sg_init_one(&sg, addr->sa_data, dev->addr_len); 822 sg_init_one(&sg, addr->sa_data, dev->addr_len);
822 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, 823 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
823 VIRTIO_NET_CTRL_MAC_ADDR_SET, 824 VIRTIO_NET_CTRL_MAC_ADDR_SET,
824 &sg, 1, 0)) { 825 &sg, NULL)) {
825 dev_warn(&vdev->dev, 826 dev_warn(&vdev->dev,
826 "Failed to set mac address by vq command.\n"); 827 "Failed to set mac address by vq command.\n");
827 return -EINVAL; 828 return -EINVAL;
@@ -889,8 +890,7 @@ static void virtnet_ack_link_announce(struct virtnet_info *vi)
889{ 890{
890 rtnl_lock(); 891 rtnl_lock();
891 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, 892 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
892 VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL, 893 VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL, NULL))
893 0, 0))
894 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); 894 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
895 rtnl_unlock(); 895 rtnl_unlock();
896} 896}
@@ -900,6 +900,7 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
900 struct scatterlist sg; 900 struct scatterlist sg;
901 struct virtio_net_ctrl_mq s; 901 struct virtio_net_ctrl_mq s;
902 struct net_device *dev = vi->dev; 902 struct net_device *dev = vi->dev;
903 int i;
903 904
904 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) 905 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
905 return 0; 906 return 0;
@@ -908,12 +909,16 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
908 sg_init_one(&sg, &s, sizeof(s)); 909 sg_init_one(&sg, &s, sizeof(s));
909 910
910 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, 911 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
911 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg, 1, 0)){ 912 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg, NULL)) {
912 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", 913 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
913 queue_pairs); 914 queue_pairs);
914 return -EINVAL; 915 return -EINVAL;
915 } else 916 } else {
917 for (i = vi->curr_queue_pairs; i < queue_pairs; i++)
918 if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
919 schedule_delayed_work(&vi->refill, 0);
916 vi->curr_queue_pairs = queue_pairs; 920 vi->curr_queue_pairs = queue_pairs;
921 }
917 922
918 return 0; 923 return 0;
919} 924}
@@ -955,7 +960,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
955 960
956 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 961 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
957 VIRTIO_NET_CTRL_RX_PROMISC, 962 VIRTIO_NET_CTRL_RX_PROMISC,
958 sg, 1, 0)) 963 sg, NULL))
959 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", 964 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
960 promisc ? "en" : "dis"); 965 promisc ? "en" : "dis");
961 966
@@ -963,7 +968,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
963 968
964 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 969 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
965 VIRTIO_NET_CTRL_RX_ALLMULTI, 970 VIRTIO_NET_CTRL_RX_ALLMULTI,
966 sg, 1, 0)) 971 sg, NULL))
967 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", 972 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
968 allmulti ? "en" : "dis"); 973 allmulti ? "en" : "dis");
969 974
@@ -1000,7 +1005,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
1000 1005
1001 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, 1006 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
1002 VIRTIO_NET_CTRL_MAC_TABLE_SET, 1007 VIRTIO_NET_CTRL_MAC_TABLE_SET,
1003 sg, 2, 0)) 1008 sg, NULL))
1004 dev_warn(&dev->dev, "Failed to set MAC fitler table.\n"); 1009 dev_warn(&dev->dev, "Failed to set MAC fitler table.\n");
1005 1010
1006 kfree(buf); 1011 kfree(buf);
@@ -1015,7 +1020,7 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev,
1015 sg_init_one(&sg, &vid, sizeof(vid)); 1020 sg_init_one(&sg, &vid, sizeof(vid));
1016 1021
1017 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, 1022 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
1018 VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0)) 1023 VIRTIO_NET_CTRL_VLAN_ADD, &sg, NULL))
1019 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); 1024 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
1020 return 0; 1025 return 0;
1021} 1026}
@@ -1029,7 +1034,7 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
1029 sg_init_one(&sg, &vid, sizeof(vid)); 1034 sg_init_one(&sg, &vid, sizeof(vid));
1030 1035
1031 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, 1036 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
1032 VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0)) 1037 VIRTIO_NET_CTRL_VLAN_DEL, &sg, NULL))
1033 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); 1038 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
1034 return 0; 1039 return 0;
1035} 1040}
@@ -1570,7 +1575,7 @@ static int virtnet_probe(struct virtio_device *vdev)
1570 } 1575 }
1571 1576
1572 /* Last of all, set up some receive buffers. */ 1577 /* Last of all, set up some receive buffers. */
1573 for (i = 0; i < vi->max_queue_pairs; i++) { 1578 for (i = 0; i < vi->curr_queue_pairs; i++) {
1574 try_fill_recv(&vi->rq[i], GFP_KERNEL); 1579 try_fill_recv(&vi->rq[i], GFP_KERNEL);
1575 1580
1576 /* If we didn't even get one input buffer, we're useless. */ 1581 /* If we didn't even get one input buffer, we're useless. */
@@ -1694,7 +1699,7 @@ static int virtnet_restore(struct virtio_device *vdev)
1694 1699
1695 netif_device_attach(vi->dev); 1700 netif_device_attach(vi->dev);
1696 1701
1697 for (i = 0; i < vi->max_queue_pairs; i++) 1702 for (i = 0; i < vi->curr_queue_pairs; i++)
1698 if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) 1703 if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
1699 schedule_delayed_work(&vi->refill, 0); 1704 schedule_delayed_work(&vi->refill, 0);
1700 1705