aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/caif
diff options
context:
space:
mode:
authorErwan Yvin <erwan.yvin@stericsson.com>2013-03-19 23:22:24 -0400
committerRusty Russell <rusty@rustcorp.com.au>2013-03-19 23:36:06 -0400
commit0d2e1a2926b1839a4b74519e660739b2566c9386 (patch)
tree29f6397b0f544c60bfdbc8d9ad0aae5337fc7532 /drivers/net/caif
parent3beee86a4b9374e38dba36b44e81f1423a0d6b54 (diff)
caif_virtio: Introduce caif over virtio
Add the CAIF Virtio shared memory driver for talking to a modem. This CAIF Link layer communicates to the modem over shared memory. It is implemented as a virtio_driver. The underlying virtio device is managed by the remoteproc framework. The Virtio queue is used for transmitting data to the modem, and the new vringh is used for receiving data. Genalloc is used for managing the shared memory used for TX data. The default dma-alloc-coherent allocator can only allocate whole pages, and this wastes too much shared memory. Flow control is implemented by stopping the TX-queues if the virtio queues go full or we run out of memory. Queued are reopened when queues are below the watermark. NAPI is used in RX path, and a dedicated tasklet is used for releasing TX buffers. Signed-off-by: Erwan Yvin <erwan.yvin@stericsson.com> Acked-by: David S. Miller <davem@davemloft.net> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> (minor fixes)
Diffstat (limited to 'drivers/net/caif')
-rw-r--r--drivers/net/caif/Kconfig14
-rw-r--r--drivers/net/caif/Makefile3
-rw-r--r--drivers/net/caif/caif_virtio.c785
3 files changed, 802 insertions, 0 deletions
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index 60c2142373c9..893f9154011e 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -47,3 +47,17 @@ config CAIF_HSI
47 The caif low level driver for CAIF over HSI. 47 The caif low level driver for CAIF over HSI.
48 Be aware that if you enable this then you also need to 48 Be aware that if you enable this then you also need to
49 enable a low-level HSI driver. 49 enable a low-level HSI driver.
50
51config CAIF_VIRTIO
52 tristate "CAIF virtio transport driver"
53 depends on CAIF
54 select VHOST_RING
55 select VIRTIO
56 select GENERIC_ALLOCATOR
57 default n
58 ---help---
59 The caif driver for CAIF over Virtio.
60
61if CAIF_VIRTIO
62source "drivers/vhost/Kconfig"
63endif
diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile
index 91dff861560f..d9ee26a96c6e 100644
--- a/drivers/net/caif/Makefile
+++ b/drivers/net/caif/Makefile
@@ -13,3 +13,6 @@ obj-$(CONFIG_CAIF_SHM) += caif_shm.o
13 13
14# HSI interface 14# HSI interface
15obj-$(CONFIG_CAIF_HSI) += caif_hsi.o 15obj-$(CONFIG_CAIF_HSI) += caif_hsi.o
16
17# Virtio interface
18obj-$(CONFIG_CAIF_VIRTIO) += caif_virtio.o
diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
new file mode 100644
index 000000000000..b1e1205e4e28
--- /dev/null
+++ b/drivers/net/caif/caif_virtio.c
@@ -0,0 +1,785 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2013
3 * Authors: Vicram Arv / vikram.arv@stericsson.com,
4 * Dmitry Tarnyagin / dmitry.tarnyagin@stericsson.com
5 * Sjur Brendeland / sjur.brandeland@stericsson.com
6 * License terms: GNU General Public License (GPL) version 2
7 */
8#include <linux/module.h>
9#include <linux/if_arp.h>
10#include <linux/virtio.h>
11#include <linux/vringh.h>
12#include <linux/debugfs.h>
13#include <linux/spinlock.h>
14#include <linux/genalloc.h>
15#include <linux/interrupt.h>
16#include <linux/netdevice.h>
17#include <linux/rtnetlink.h>
18#include <linux/virtio_ids.h>
19#include <linux/virtio_caif.h>
20#include <linux/virtio_ring.h>
21#include <linux/dma-mapping.h>
22#include <net/caif/caif_dev.h>
23#include <linux/virtio_config.h>
24
25MODULE_LICENSE("GPL v2");
26MODULE_AUTHOR("Vicram Arv <vikram.arv@stericsson.com>");
27MODULE_AUTHOR("Sjur Brendeland <sjur.brandeland@stericsson.com>");
28MODULE_DESCRIPTION("Virtio CAIF Driver");
29
30/* NAPI schedule quota */
31#define CFV_DEFAULT_QUOTA 32
32
33/* Defaults used if virtio config space is unavailable */
34#define CFV_DEF_MTU_SIZE 4096
35#define CFV_DEF_HEADROOM 32
36#define CFV_DEF_TAILROOM 32
37
38/* Required IP header alignment */
39#define IP_HDR_ALIGN 4
40
41/* struct cfv_napi_contxt - NAPI context info
42 * @riov: IOV holding data read from the ring. Note that riov may
43 * still hold data when cfv_rx_poll() returns.
44 * @head: Last descriptor ID we received from vringh_getdesc_kern.
45 * We use this to put descriptor back on the used ring. USHRT_MAX is
46 * used to indicate invalid head-id.
47 */
48struct cfv_napi_context {
49 struct vringh_kiov riov;
50 unsigned short head;
51};
52
53/* struct cfv_stats - statistics for debugfs
54 * @rx_napi_complete: Number of NAPI completions (RX)
55 * @rx_napi_resched: Number of calls where the full quota was used (RX)
56 * @rx_nomem: Number of SKB alloc failures (RX)
57 * @rx_kicks: Number of RX kicks
58 * @tx_full_ring: Number times TX ring was full
59 * @tx_no_mem: Number of times TX went out of memory
60 * @tx_flow_on: Number of flow on (TX)
61 * @tx_kicks: Number of TX kicks
62 */
63struct cfv_stats {
64 u32 rx_napi_complete;
65 u32 rx_napi_resched;
66 u32 rx_nomem;
67 u32 rx_kicks;
68 u32 tx_full_ring;
69 u32 tx_no_mem;
70 u32 tx_flow_on;
71 u32 tx_kicks;
72};
73
74/* struct cfv_info - Caif Virtio control structure
75 * @cfdev: caif common header
76 * @vdev: Associated virtio device
77 * @vr_rx: rx/downlink host vring
78 * @vq_tx: tx/uplink virtqueue
79 * @ndev: CAIF link layer device
80 * @watermark_tx: indicates number of free descriptors we need
81 * to reopen the tx-queues after overload.
82 * @tx_lock: protects vq_tx from concurrent use
83 * @tx_release_tasklet: Tasklet for freeing consumed TX buffers
84 * @napi: Napi context used in cfv_rx_poll()
85 * @ctx: Context data used in cfv_rx_poll()
86 * @tx_hr: transmit headroom
87 * @rx_hr: receive headroom
88 * @tx_tr: transmit tail room
89 * @rx_tr: receive tail room
90 * @mtu: transmit max size
91 * @mru: receive max size
92 * @allocsz: size of dma memory reserved for TX buffers
93 * @alloc_addr: virtual address to dma memory for TX buffers
94 * @alloc_dma: dma address to dma memory for TX buffers
95 * @genpool: Gen Pool used for allocating TX buffers
96 * @reserved_mem: Pointer to memory reserve allocated from genpool
97 * @reserved_size: Size of memory reserve allocated from genpool
98 * @stats: Statistics exposed in sysfs
99 * @debugfs: Debugfs dentry for statistic counters
100 */
101struct cfv_info {
102 struct caif_dev_common cfdev;
103 struct virtio_device *vdev;
104 struct vringh *vr_rx;
105 struct virtqueue *vq_tx;
106 struct net_device *ndev;
107 unsigned int watermark_tx;
108 /* Protect access to vq_tx */
109 spinlock_t tx_lock;
110 struct tasklet_struct tx_release_tasklet;
111 struct napi_struct napi;
112 struct cfv_napi_context ctx;
113 u16 tx_hr;
114 u16 rx_hr;
115 u16 tx_tr;
116 u16 rx_tr;
117 u32 mtu;
118 u32 mru;
119 size_t allocsz;
120 void *alloc_addr;
121 dma_addr_t alloc_dma;
122 struct gen_pool *genpool;
123 unsigned long reserved_mem;
124 size_t reserved_size;
125 struct cfv_stats stats;
126 struct dentry *debugfs;
127};
128
129/* struct buf_info - maintains transmit buffer data handle
130 * @size: size of transmit buffer
131 * @dma_handle: handle to allocated dma device memory area
132 * @vaddr: virtual address mapping to allocated memory area
133 */
134struct buf_info {
135 size_t size;
136 u8 *vaddr;
137};
138
139/* Called from virtio device, in IRQ context */
140static void cfv_release_cb(struct virtqueue *vq_tx)
141{
142 struct cfv_info *cfv = vq_tx->vdev->priv;
143
144 ++cfv->stats.tx_kicks;
145 tasklet_schedule(&cfv->tx_release_tasklet);
146}
147
148static void free_buf_info(struct cfv_info *cfv, struct buf_info *buf_info)
149{
150 if (!buf_info)
151 return;
152 gen_pool_free(cfv->genpool, (unsigned long) buf_info->vaddr,
153 buf_info->size);
154 kfree(buf_info);
155}
156
157/* This is invoked whenever the remote processor completed processing
158 * a TX msg we just sent, and the buffer is put back to the used ring.
159 */
160static void cfv_release_used_buf(struct virtqueue *vq_tx)
161{
162 struct cfv_info *cfv = vq_tx->vdev->priv;
163 unsigned long flags;
164
165 BUG_ON(vq_tx != cfv->vq_tx);
166
167 for (;;) {
168 unsigned int len;
169 struct buf_info *buf_info;
170
171 /* Get used buffer from used ring to recycle used descriptors */
172 spin_lock_irqsave(&cfv->tx_lock, flags);
173 buf_info = virtqueue_get_buf(vq_tx, &len);
174 spin_unlock_irqrestore(&cfv->tx_lock, flags);
175
176 /* Stop looping if there are no more buffers to free */
177 if (!buf_info)
178 break;
179
180 free_buf_info(cfv, buf_info);
181
182 /* watermark_tx indicates if we previously stopped the tx
183 * queues. If we have enough free stots in the virtio ring,
184 * re-establish memory reserved and open up tx queues.
185 */
186 if (cfv->vq_tx->num_free <= cfv->watermark_tx)
187 continue;
188
189 /* Re-establish memory reserve */
190 if (cfv->reserved_mem == 0 && cfv->genpool)
191 cfv->reserved_mem =
192 gen_pool_alloc(cfv->genpool,
193 cfv->reserved_size);
194
195 /* Open up the tx queues */
196 if (cfv->reserved_mem) {
197 cfv->watermark_tx =
198 virtqueue_get_vring_size(cfv->vq_tx);
199 netif_tx_wake_all_queues(cfv->ndev);
200 /* Buffers are recycled in cfv_netdev_tx, so
201 * disable notifications when queues are opened.
202 */
203 virtqueue_disable_cb(cfv->vq_tx);
204 ++cfv->stats.tx_flow_on;
205 } else {
206 /* if no memory reserve, wait for more free slots */
207 WARN_ON(cfv->watermark_tx >
208 virtqueue_get_vring_size(cfv->vq_tx));
209 cfv->watermark_tx +=
210 virtqueue_get_vring_size(cfv->vq_tx) / 4;
211 }
212 }
213}
214
215/* Allocate a SKB and copy packet data to it */
216static struct sk_buff *cfv_alloc_and_copy_skb(int *err,
217 struct cfv_info *cfv,
218 u8 *frm, u32 frm_len)
219{
220 struct sk_buff *skb;
221 u32 cfpkt_len, pad_len;
222
223 *err = 0;
224 /* Verify that packet size with down-link header and mtu size */
225 if (frm_len > cfv->mru || frm_len <= cfv->rx_hr + cfv->rx_tr) {
226 netdev_err(cfv->ndev,
227 "Invalid frmlen:%u mtu:%u hr:%d tr:%d\n",
228 frm_len, cfv->mru, cfv->rx_hr,
229 cfv->rx_tr);
230 *err = -EPROTO;
231 return NULL;
232 }
233
234 cfpkt_len = frm_len - (cfv->rx_hr + cfv->rx_tr);
235 pad_len = (unsigned long)(frm + cfv->rx_hr) & (IP_HDR_ALIGN - 1);
236
237 skb = netdev_alloc_skb(cfv->ndev, frm_len + pad_len);
238 if (!skb) {
239 *err = -ENOMEM;
240 return NULL;
241 }
242
243 skb_reserve(skb, cfv->rx_hr + pad_len);
244
245 memcpy(skb_put(skb, cfpkt_len), frm + cfv->rx_hr, cfpkt_len);
246 return skb;
247}
248
249/* Get packets from the host vring */
250static int cfv_rx_poll(struct napi_struct *napi, int quota)
251{
252 struct cfv_info *cfv = container_of(napi, struct cfv_info, napi);
253 int rxcnt = 0;
254 int err = 0;
255 void *buf;
256 struct sk_buff *skb;
257 struct vringh_kiov *riov = &cfv->ctx.riov;
258 unsigned int skb_len;
259
260again:
261 do {
262 skb = NULL;
263
264 /* Put the previous iovec back on the used ring and
265 * fetch a new iovec if we have processed all elements.
266 */
267 if (riov->i == riov->used) {
268 if (cfv->ctx.head != USHRT_MAX) {
269 vringh_complete_kern(cfv->vr_rx,
270 cfv->ctx.head,
271 0);
272 cfv->ctx.head = USHRT_MAX;
273 }
274
275 err = vringh_getdesc_kern(
276 cfv->vr_rx,
277 riov,
278 NULL,
279 &cfv->ctx.head,
280 GFP_ATOMIC);
281
282 if (err <= 0)
283 goto exit;
284 }
285
286 buf = phys_to_virt((unsigned long) riov->iov[riov->i].iov_base);
287 /* TODO: Add check on valid buffer address */
288
289 skb = cfv_alloc_and_copy_skb(&err, cfv, buf,
290 riov->iov[riov->i].iov_len);
291 if (unlikely(err))
292 goto exit;
293
294 /* Push received packet up the stack. */
295 skb_len = skb->len;
296 skb->protocol = htons(ETH_P_CAIF);
297 skb_reset_mac_header(skb);
298 skb->dev = cfv->ndev;
299 err = netif_receive_skb(skb);
300 if (unlikely(err)) {
301 ++cfv->ndev->stats.rx_dropped;
302 } else {
303 ++cfv->ndev->stats.rx_packets;
304 cfv->ndev->stats.rx_bytes += skb_len;
305 }
306
307 ++riov->i;
308 ++rxcnt;
309 } while (rxcnt < quota);
310
311 ++cfv->stats.rx_napi_resched;
312 goto out;
313
314exit:
315 switch (err) {
316 case 0:
317 ++cfv->stats.rx_napi_complete;
318
319 /* Really out of patckets? (stolen from virtio_net)*/
320 napi_complete(napi);
321 if (unlikely(vringh_notify_enable_kern(cfv->vr_rx)) &&
322 napi_schedule_prep(napi)) {
323 vringh_notify_disable_kern(cfv->vr_rx);
324 __napi_schedule(napi);
325 goto again;
326 }
327 break;
328
329 case -ENOMEM:
330 ++cfv->stats.rx_nomem;
331 dev_kfree_skb(skb);
332 /* Stop NAPI poll on OOM, we hope to be polled later */
333 napi_complete(napi);
334 vringh_notify_enable_kern(cfv->vr_rx);
335 break;
336
337 default:
338 /* We're doomed, any modem fault is fatal */
339 netdev_warn(cfv->ndev, "Bad ring, disable device\n");
340 cfv->ndev->stats.rx_dropped = riov->used - riov->i;
341 napi_complete(napi);
342 vringh_notify_disable_kern(cfv->vr_rx);
343 netif_carrier_off(cfv->ndev);
344 break;
345 }
346out:
347 if (rxcnt && vringh_need_notify_kern(cfv->vr_rx) > 0)
348 vringh_notify(cfv->vr_rx);
349 return rxcnt;
350}
351
352static void cfv_recv(struct virtio_device *vdev, struct vringh *vr_rx)
353{
354 struct cfv_info *cfv = vdev->priv;
355
356 ++cfv->stats.rx_kicks;
357 vringh_notify_disable_kern(cfv->vr_rx);
358 napi_schedule(&cfv->napi);
359}
360
361static void cfv_destroy_genpool(struct cfv_info *cfv)
362{
363 if (cfv->alloc_addr)
364 dma_free_coherent(cfv->vdev->dev.parent->parent,
365 cfv->allocsz, cfv->alloc_addr,
366 cfv->alloc_dma);
367
368 if (!cfv->genpool)
369 return;
370 gen_pool_free(cfv->genpool, cfv->reserved_mem,
371 cfv->reserved_size);
372 gen_pool_destroy(cfv->genpool);
373 cfv->genpool = NULL;
374}
375
376static int cfv_create_genpool(struct cfv_info *cfv)
377{
378 int err;
379
380 /* dma_alloc can only allocate whole pages, and we need a more
381 * fine graned allocation so we use genpool. We ask for space needed
382 * by IP and a full ring. If the dma allcoation fails we retry with a
383 * smaller allocation size.
384 */
385 err = -ENOMEM;
386 cfv->allocsz = (virtqueue_get_vring_size(cfv->vq_tx) *
387 (ETH_DATA_LEN + cfv->tx_hr + cfv->tx_tr) * 11)/10;
388 if (cfv->allocsz <= (num_possible_cpus() + 1) * cfv->ndev->mtu)
389 return -EINVAL;
390
391 for (;;) {
392 if (cfv->allocsz <= num_possible_cpus() * cfv->ndev->mtu) {
393 netdev_info(cfv->ndev, "Not enough device memory\n");
394 return -ENOMEM;
395 }
396
397 cfv->alloc_addr = dma_alloc_coherent(
398 cfv->vdev->dev.parent->parent,
399 cfv->allocsz, &cfv->alloc_dma,
400 GFP_ATOMIC);
401 if (cfv->alloc_addr)
402 break;
403
404 cfv->allocsz = (cfv->allocsz * 3) >> 2;
405 }
406
407 netdev_dbg(cfv->ndev, "Allocated %zd bytes from dma-memory\n",
408 cfv->allocsz);
409
410 /* Allocate on 128 bytes boundaries (1 << 7)*/
411 cfv->genpool = gen_pool_create(7, -1);
412 if (!cfv->genpool)
413 goto err;
414
415 err = gen_pool_add_virt(cfv->genpool, (unsigned long)cfv->alloc_addr,
416 (phys_addr_t)virt_to_phys(cfv->alloc_addr),
417 cfv->allocsz, -1);
418 if (err)
419 goto err;
420
421 /* Reserve some memory for low memory situations. If we hit the roof
422 * in the memory pool, we stop TX flow and release the reserve.
423 */
424 cfv->reserved_size = num_possible_cpus() * cfv->ndev->mtu;
425 cfv->reserved_mem = gen_pool_alloc(cfv->genpool,
426 cfv->reserved_size);
427 if (!cfv->reserved_mem)
428 goto err;
429
430 cfv->watermark_tx = virtqueue_get_vring_size(cfv->vq_tx);
431 return 0;
432err:
433 cfv_destroy_genpool(cfv);
434 return err;
435}
436
437/* Enable the CAIF interface and allocate the memory-pool */
438static int cfv_netdev_open(struct net_device *netdev)
439{
440 struct cfv_info *cfv = netdev_priv(netdev);
441
442 if (cfv_create_genpool(cfv))
443 return -ENOMEM;
444
445 netif_carrier_on(netdev);
446 napi_enable(&cfv->napi);
447
448 /* Schedule NAPI to read any pending packets */
449 napi_schedule(&cfv->napi);
450 return 0;
451}
452
453/* Disable the CAIF interface and free the memory-pool */
454static int cfv_netdev_close(struct net_device *netdev)
455{
456 struct cfv_info *cfv = netdev_priv(netdev);
457 unsigned long flags;
458 struct buf_info *buf_info;
459
460 /* Disable interrupts, queues and NAPI polling */
461 netif_carrier_off(netdev);
462 virtqueue_disable_cb(cfv->vq_tx);
463 vringh_notify_disable_kern(cfv->vr_rx);
464 napi_disable(&cfv->napi);
465
466 /* Release any TX buffers on both used and avilable rings */
467 cfv_release_used_buf(cfv->vq_tx);
468 spin_lock_irqsave(&cfv->tx_lock, flags);
469 while ((buf_info = virtqueue_detach_unused_buf(cfv->vq_tx)))
470 free_buf_info(cfv, buf_info);
471 spin_unlock_irqrestore(&cfv->tx_lock, flags);
472
473 /* Release all dma allocated memory and destroy the pool */
474 cfv_destroy_genpool(cfv);
475 return 0;
476}
477
478/* Allocate a buffer in dma-memory and copy skb to it */
479static struct buf_info *cfv_alloc_and_copy_to_shm(struct cfv_info *cfv,
480 struct sk_buff *skb,
481 struct scatterlist *sg)
482{
483 struct caif_payload_info *info = (void *)&skb->cb;
484 struct buf_info *buf_info = NULL;
485 u8 pad_len, hdr_ofs;
486
487 if (!cfv->genpool)
488 goto err;
489
490 if (unlikely(cfv->tx_hr + skb->len + cfv->tx_tr > cfv->mtu)) {
491 netdev_warn(cfv->ndev, "Invalid packet len (%d > %d)\n",
492 cfv->tx_hr + skb->len + cfv->tx_tr, cfv->mtu);
493 goto err;
494 }
495
496 buf_info = kmalloc(sizeof(struct buf_info), GFP_ATOMIC);
497 if (unlikely(!buf_info))
498 goto err;
499
500 /* Make the IP header aligned in tbe buffer */
501 hdr_ofs = cfv->tx_hr + info->hdr_len;
502 pad_len = hdr_ofs & (IP_HDR_ALIGN - 1);
503 buf_info->size = cfv->tx_hr + skb->len + cfv->tx_tr + pad_len;
504
505 /* allocate dma memory buffer */
506 buf_info->vaddr = (void *)gen_pool_alloc(cfv->genpool, buf_info->size);
507 if (unlikely(!buf_info->vaddr))
508 goto err;
509
510 /* copy skbuf contents to send buffer */
511 skb_copy_bits(skb, 0, buf_info->vaddr + cfv->tx_hr + pad_len, skb->len);
512 sg_init_one(sg, buf_info->vaddr + pad_len,
513 skb->len + cfv->tx_hr + cfv->rx_hr);
514
515 return buf_info;
516err:
517 kfree(buf_info);
518 return NULL;
519}
520
521/* Put the CAIF packet on the virtio ring and kick the receiver */
522static int cfv_netdev_tx(struct sk_buff *skb, struct net_device *netdev)
523{
524 struct cfv_info *cfv = netdev_priv(netdev);
525 struct buf_info *buf_info;
526 struct scatterlist sg;
527 unsigned long flags;
528 bool flow_off = false;
529 int ret;
530
531 /* garbage collect released buffers */
532 cfv_release_used_buf(cfv->vq_tx);
533 spin_lock_irqsave(&cfv->tx_lock, flags);
534
535 /* Flow-off check takes into account number of cpus to make sure
536 * virtqueue will not be overfilled in any possible smp conditions.
537 *
538 * Flow-on is triggered when sufficient buffers are freed
539 */
540 if (unlikely(cfv->vq_tx->num_free <= num_present_cpus())) {
541 flow_off = true;
542 cfv->stats.tx_full_ring++;
543 }
544
545 /* If we run out of memory, we release the memory reserve and retry
546 * allocation.
547 */
548 buf_info = cfv_alloc_and_copy_to_shm(cfv, skb, &sg);
549 if (unlikely(!buf_info)) {
550 cfv->stats.tx_no_mem++;
551 flow_off = true;
552
553 if (cfv->reserved_mem && cfv->genpool) {
554 gen_pool_free(cfv->genpool, cfv->reserved_mem,
555 cfv->reserved_size);
556 cfv->reserved_mem = 0;
557 buf_info = cfv_alloc_and_copy_to_shm(cfv, skb, &sg);
558 }
559 }
560
561 if (unlikely(flow_off)) {
562 /* Turn flow on when a 1/4 of the descriptors are released */
563 cfv->watermark_tx = virtqueue_get_vring_size(cfv->vq_tx) / 4;
564 /* Enable notifications of recycled TX buffers */
565 virtqueue_enable_cb(cfv->vq_tx);
566 netif_tx_stop_all_queues(netdev);
567 }
568
569 if (unlikely(!buf_info)) {
570 /* If the memory reserve does it's job, this shouldn't happen */
571 netdev_warn(cfv->ndev, "Out of gen_pool memory\n");
572 goto err;
573 }
574
575 ret = virtqueue_add_buf(cfv->vq_tx, &sg, 1, 0,
576 buf_info, GFP_ATOMIC);
577 if (unlikely((ret < 0))) {
578 /* If flow control works, this shouldn't happen */
579 netdev_warn(cfv->ndev, "Failed adding buffer to TX vring:%d\n",
580 ret);
581 goto err;
582 }
583
584 /* update netdev statistics */
585 cfv->ndev->stats.tx_packets++;
586 cfv->ndev->stats.tx_bytes += skb->len;
587 spin_unlock_irqrestore(&cfv->tx_lock, flags);
588
589 /* tell the remote processor it has a pending message to read */
590 virtqueue_kick(cfv->vq_tx);
591
592 dev_kfree_skb(skb);
593 return NETDEV_TX_OK;
594err:
595 spin_unlock_irqrestore(&cfv->tx_lock, flags);
596 cfv->ndev->stats.tx_dropped++;
597 free_buf_info(cfv, buf_info);
598 dev_kfree_skb(skb);
599 return NETDEV_TX_OK;
600}
601
602static void cfv_tx_release_tasklet(unsigned long drv)
603{
604 struct cfv_info *cfv = (struct cfv_info *)drv;
605 cfv_release_used_buf(cfv->vq_tx);
606}
607
608static const struct net_device_ops cfv_netdev_ops = {
609 .ndo_open = cfv_netdev_open,
610 .ndo_stop = cfv_netdev_close,
611 .ndo_start_xmit = cfv_netdev_tx,
612};
613
614static void cfv_netdev_setup(struct net_device *netdev)
615{
616 netdev->netdev_ops = &cfv_netdev_ops;
617 netdev->type = ARPHRD_CAIF;
618 netdev->tx_queue_len = 100;
619 netdev->flags = IFF_POINTOPOINT | IFF_NOARP;
620 netdev->mtu = CFV_DEF_MTU_SIZE;
621 netdev->destructor = free_netdev;
622}
623
624/* Create debugfs counters for the device */
625static inline void debugfs_init(struct cfv_info *cfv)
626{
627 cfv->debugfs =
628 debugfs_create_dir(netdev_name(cfv->ndev), NULL);
629
630 if (IS_ERR(cfv->debugfs))
631 return;
632
633 debugfs_create_u32("rx-napi-complete", S_IRUSR, cfv->debugfs,
634 &cfv->stats.rx_napi_complete);
635 debugfs_create_u32("rx-napi-resched", S_IRUSR, cfv->debugfs,
636 &cfv->stats.rx_napi_resched);
637 debugfs_create_u32("rx-nomem", S_IRUSR, cfv->debugfs,
638 &cfv->stats.rx_nomem);
639 debugfs_create_u32("rx-kicks", S_IRUSR, cfv->debugfs,
640 &cfv->stats.rx_kicks);
641 debugfs_create_u32("tx-full-ring", S_IRUSR, cfv->debugfs,
642 &cfv->stats.tx_full_ring);
643 debugfs_create_u32("tx-no-mem", S_IRUSR, cfv->debugfs,
644 &cfv->stats.tx_no_mem);
645 debugfs_create_u32("tx-kicks", S_IRUSR, cfv->debugfs,
646 &cfv->stats.tx_kicks);
647 debugfs_create_u32("tx-flow-on", S_IRUSR, cfv->debugfs,
648 &cfv->stats.tx_flow_on);
649}
650
651/* Setup CAIF for the a virtio device */
652static int cfv_probe(struct virtio_device *vdev)
653{
654 vq_callback_t *vq_cbs = cfv_release_cb;
655 vrh_callback_t *vrh_cbs = cfv_recv;
656 const char *names = "output";
657 const char *cfv_netdev_name = "cfvrt";
658 struct net_device *netdev;
659 struct cfv_info *cfv;
660 int err = -EINVAL;
661
662 netdev = alloc_netdev(sizeof(struct cfv_info), cfv_netdev_name,
663 cfv_netdev_setup);
664 if (!netdev)
665 return -ENOMEM;
666
667 cfv = netdev_priv(netdev);
668 cfv->vdev = vdev;
669 cfv->ndev = netdev;
670
671 spin_lock_init(&cfv->tx_lock);
672
673 /* Get the RX virtio ring. This is a "host side vring". */
674 err = vdev->vringh_config->find_vrhs(vdev, 1, &cfv->vr_rx, &vrh_cbs);
675 if (err)
676 goto err;
677
678 /* Get the TX virtio ring. This is a "guest side vring". */
679 err = vdev->config->find_vqs(vdev, 1, &cfv->vq_tx, &vq_cbs, &names);
680 if (err)
681 goto err;
682
683 /* Get the CAIF configuration from virtio config space, if available */
684#define GET_VIRTIO_CONFIG_OPS(_v, _var, _f) \
685 ((_v)->config->get(_v, offsetof(struct virtio_caif_transf_config, _f), \
686 &_var, \
687 FIELD_SIZEOF(struct virtio_caif_transf_config, _f)))
688
689 if (vdev->config->get) {
690 GET_VIRTIO_CONFIG_OPS(vdev, cfv->tx_hr, headroom);
691 GET_VIRTIO_CONFIG_OPS(vdev, cfv->rx_hr, headroom);
692 GET_VIRTIO_CONFIG_OPS(vdev, cfv->tx_tr, tailroom);
693 GET_VIRTIO_CONFIG_OPS(vdev, cfv->rx_tr, tailroom);
694 GET_VIRTIO_CONFIG_OPS(vdev, cfv->mtu, mtu);
695 GET_VIRTIO_CONFIG_OPS(vdev, cfv->mru, mtu);
696 } else {
697 cfv->tx_hr = CFV_DEF_HEADROOM;
698 cfv->rx_hr = CFV_DEF_HEADROOM;
699 cfv->tx_tr = CFV_DEF_TAILROOM;
700 cfv->rx_tr = CFV_DEF_TAILROOM;
701 cfv->mtu = CFV_DEF_MTU_SIZE;
702 cfv->mru = CFV_DEF_MTU_SIZE;
703 }
704
705 netdev->needed_headroom = cfv->tx_hr;
706 netdev->needed_tailroom = cfv->tx_tr;
707
708 /* Disable buffer release interrupts unless we have stopped TX queues */
709 virtqueue_disable_cb(cfv->vq_tx);
710
711 netdev->mtu = cfv->mtu - cfv->tx_tr;
712 vdev->priv = cfv;
713
714 /* Initialize NAPI poll context data */
715 vringh_kiov_init(&cfv->ctx.riov, NULL, 0);
716 cfv->ctx.head = USHRT_MAX;
717 netif_napi_add(netdev, &cfv->napi, cfv_rx_poll, CFV_DEFAULT_QUOTA);
718
719 tasklet_init(&cfv->tx_release_tasklet,
720 cfv_tx_release_tasklet,
721 (unsigned long)cfv);
722
723 /* Carrier is off until netdevice is opened */
724 netif_carrier_off(netdev);
725
726 /* register Netdev */
727 err = register_netdev(netdev);
728 if (err) {
729 dev_err(&vdev->dev, "Unable to register netdev (%d)\n", err);
730 goto err;
731 }
732
733 debugfs_init(cfv);
734
735 return 0;
736err:
737 netdev_warn(cfv->ndev, "CAIF Virtio probe failed:%d\n", err);
738
739 if (cfv->vr_rx)
740 vdev->vringh_config->del_vrhs(cfv->vdev);
741 if (cfv->vdev)
742 vdev->config->del_vqs(cfv->vdev);
743 free_netdev(netdev);
744 return err;
745}
746
747static void cfv_remove(struct virtio_device *vdev)
748{
749 struct cfv_info *cfv = vdev->priv;
750
751 rtnl_lock();
752 dev_close(cfv->ndev);
753 rtnl_unlock();
754
755 tasklet_kill(&cfv->tx_release_tasklet);
756 debugfs_remove_recursive(cfv->debugfs);
757
758 vringh_kiov_cleanup(&cfv->ctx.riov);
759 vdev->config->reset(vdev);
760 vdev->vringh_config->del_vrhs(cfv->vdev);
761 cfv->vr_rx = NULL;
762 vdev->config->del_vqs(cfv->vdev);
763 unregister_netdev(cfv->ndev);
764}
765
766static struct virtio_device_id id_table[] = {
767 { VIRTIO_ID_CAIF, VIRTIO_DEV_ANY_ID },
768 { 0 },
769};
770
771static unsigned int features[] = {
772};
773
774static struct virtio_driver caif_virtio_driver = {
775 .feature_table = features,
776 .feature_table_size = ARRAY_SIZE(features),
777 .driver.name = KBUILD_MODNAME,
778 .driver.owner = THIS_MODULE,
779 .id_table = id_table,
780 .probe = cfv_probe,
781 .remove = cfv_remove,
782};
783
784module_virtio_driver(caif_virtio_driver);
785MODULE_DEVICE_TABLE(virtio, id_table);