aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-02-10 15:19:58 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-02-10 15:19:58 -0500
commitce86d35d2fd119d777fc72736732c8c9c3456698 (patch)
tree531b2077d9db6b44285ad75196672b4d102e8f20 /drivers/char
parentb477958314796f51c54829dbaea89b1f46dabd06 (diff)
parent0f66e50af53d39edebf4bc64ef90077e738c171f (diff)
Merge branch 'tty-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty-2.6
* 'tty-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty-2.6: serial: bfin_5xx: split uart RX lock from uart port lock to avoid deadlock 68360serial: Plumb in rs_360_get_icount() n_gsm: copy mtu over when configuring via ioctl interface virtio: console: Move file back to drivers/char/
Diffstat (limited to 'drivers/char')
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/virtio_console.c1850
2 files changed, 1851 insertions, 0 deletions
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 5bc765d4c3ca..8238f89f73c9 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_SYNCLINK_GT) += synclink_gt.o
30obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o 30obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o
31obj-$(CONFIG_SX) += sx.o generic_serial.o 31obj-$(CONFIG_SX) += sx.o generic_serial.o
32obj-$(CONFIG_RIO) += rio/ generic_serial.o 32obj-$(CONFIG_RIO) += rio/ generic_serial.o
33obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o
33obj-$(CONFIG_RAW_DRIVER) += raw.o 34obj-$(CONFIG_RAW_DRIVER) += raw.o
34obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o 35obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o
35obj-$(CONFIG_MSPEC) += mspec.o 36obj-$(CONFIG_MSPEC) += mspec.o
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
new file mode 100644
index 000000000000..490393186338
--- /dev/null
+++ b/drivers/char/virtio_console.c
@@ -0,0 +1,1850 @@
1/*
2 * Copyright (C) 2006, 2007, 2009 Rusty Russell, IBM Corporation
3 * Copyright (C) 2009, 2010, 2011 Red Hat, Inc.
4 * Copyright (C) 2009, 2010, 2011 Amit Shah <amit.shah@redhat.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#include <linux/cdev.h>
21#include <linux/debugfs.h>
22#include <linux/device.h>
23#include <linux/err.h>
24#include <linux/fs.h>
25#include <linux/init.h>
26#include <linux/list.h>
27#include <linux/poll.h>
28#include <linux/sched.h>
29#include <linux/slab.h>
30#include <linux/spinlock.h>
31#include <linux/virtio.h>
32#include <linux/virtio_console.h>
33#include <linux/wait.h>
34#include <linux/workqueue.h>
35#include "../tty/hvc/hvc_console.h"
36
37/*
38 * This is a global struct for storing common data for all the devices
39 * this driver handles.
40 *
41 * Mainly, it has a linked list for all the consoles in one place so
42 * that callbacks from hvc for get_chars(), put_chars() work properly
43 * across multiple devices and multiple ports per device.
44 */
45struct ports_driver_data {
46 /* Used for registering chardevs */
47 struct class *class;
48
49 /* Used for exporting per-port information to debugfs */
50 struct dentry *debugfs_dir;
51
52 /* List of all the devices we're handling */
53 struct list_head portdevs;
54
55 /* Number of devices this driver is handling */
56 unsigned int index;
57
58 /*
59 * This is used to keep track of the number of hvc consoles
60 * spawned by this driver. This number is given as the first
61 * argument to hvc_alloc(). To correctly map an initial
62 * console spawned via hvc_instantiate to the console being
63 * hooked up via hvc_alloc, we need to pass the same vtermno.
64 *
65 * We also just assume the first console being initialised was
66 * the first one that got used as the initial console.
67 */
68 unsigned int next_vtermno;
69
70 /* All the console devices handled by this driver */
71 struct list_head consoles;
72};
73static struct ports_driver_data pdrvdata;
74
75DEFINE_SPINLOCK(pdrvdata_lock);
76
77/* This struct holds information that's relevant only for console ports */
78struct console {
79 /* We'll place all consoles in a list in the pdrvdata struct */
80 struct list_head list;
81
82 /* The hvc device associated with this console port */
83 struct hvc_struct *hvc;
84
85 /* The size of the console */
86 struct winsize ws;
87
88 /*
89 * This number identifies the number that we used to register
90 * with hvc in hvc_instantiate() and hvc_alloc(); this is the
91 * number passed on by the hvc callbacks to us to
92 * differentiate between the other console ports handled by
93 * this driver
94 */
95 u32 vtermno;
96};
97
98struct port_buffer {
99 char *buf;
100
101 /* size of the buffer in *buf above */
102 size_t size;
103
104 /* used length of the buffer */
105 size_t len;
106 /* offset in the buf from which to consume data */
107 size_t offset;
108};
109
110/*
111 * This is a per-device struct that stores data common to all the
112 * ports for that device (vdev->priv).
113 */
114struct ports_device {
115 /* Next portdev in the list, head is in the pdrvdata struct */
116 struct list_head list;
117
118 /*
119 * Workqueue handlers where we process deferred work after
120 * notification
121 */
122 struct work_struct control_work;
123
124 struct list_head ports;
125
126 /* To protect the list of ports */
127 spinlock_t ports_lock;
128
129 /* To protect the vq operations for the control channel */
130 spinlock_t cvq_lock;
131
132 /* The current config space is stored here */
133 struct virtio_console_config config;
134
135 /* The virtio device we're associated with */
136 struct virtio_device *vdev;
137
138 /*
139 * A couple of virtqueues for the control channel: one for
140 * guest->host transfers, one for host->guest transfers
141 */
142 struct virtqueue *c_ivq, *c_ovq;
143
144 /* Array of per-port IO virtqueues */
145 struct virtqueue **in_vqs, **out_vqs;
146
147 /* Used for numbering devices for sysfs and debugfs */
148 unsigned int drv_index;
149
150 /* Major number for this device. Ports will be created as minors. */
151 int chr_major;
152};
153
154/* This struct holds the per-port data */
155struct port {
156 /* Next port in the list, head is in the ports_device */
157 struct list_head list;
158
159 /* Pointer to the parent virtio_console device */
160 struct ports_device *portdev;
161
162 /* The current buffer from which data has to be fed to readers */
163 struct port_buffer *inbuf;
164
165 /*
166 * To protect the operations on the in_vq associated with this
167 * port. Has to be a spinlock because it can be called from
168 * interrupt context (get_char()).
169 */
170 spinlock_t inbuf_lock;
171
172 /* Protect the operations on the out_vq. */
173 spinlock_t outvq_lock;
174
175 /* The IO vqs for this port */
176 struct virtqueue *in_vq, *out_vq;
177
178 /* File in the debugfs directory that exposes this port's information */
179 struct dentry *debugfs_file;
180
181 /*
182 * The entries in this struct will be valid if this port is
183 * hooked up to an hvc console
184 */
185 struct console cons;
186
187 /* Each port associates with a separate char device */
188 struct cdev *cdev;
189 struct device *dev;
190
191 /* Reference-counting to handle port hot-unplugs and file operations */
192 struct kref kref;
193
194 /* A waitqueue for poll() or blocking read operations */
195 wait_queue_head_t waitqueue;
196
197 /* The 'name' of the port that we expose via sysfs properties */
198 char *name;
199
200 /* We can notify apps of host connect / disconnect events via SIGIO */
201 struct fasync_struct *async_queue;
202
203 /* The 'id' to identify the port with the Host */
204 u32 id;
205
206 bool outvq_full;
207
208 /* Is the host device open */
209 bool host_connected;
210
211 /* We should allow only one process to open a port */
212 bool guest_connected;
213};
214
215/* This is the very early arch-specified put chars function. */
216static int (*early_put_chars)(u32, const char *, int);
217
218static struct port *find_port_by_vtermno(u32 vtermno)
219{
220 struct port *port;
221 struct console *cons;
222 unsigned long flags;
223
224 spin_lock_irqsave(&pdrvdata_lock, flags);
225 list_for_each_entry(cons, &pdrvdata.consoles, list) {
226 if (cons->vtermno == vtermno) {
227 port = container_of(cons, struct port, cons);
228 goto out;
229 }
230 }
231 port = NULL;
232out:
233 spin_unlock_irqrestore(&pdrvdata_lock, flags);
234 return port;
235}
236
237static struct port *find_port_by_devt_in_portdev(struct ports_device *portdev,
238 dev_t dev)
239{
240 struct port *port;
241 unsigned long flags;
242
243 spin_lock_irqsave(&portdev->ports_lock, flags);
244 list_for_each_entry(port, &portdev->ports, list)
245 if (port->cdev->dev == dev)
246 goto out;
247 port = NULL;
248out:
249 spin_unlock_irqrestore(&portdev->ports_lock, flags);
250
251 return port;
252}
253
254static struct port *find_port_by_devt(dev_t dev)
255{
256 struct ports_device *portdev;
257 struct port *port;
258 unsigned long flags;
259
260 spin_lock_irqsave(&pdrvdata_lock, flags);
261 list_for_each_entry(portdev, &pdrvdata.portdevs, list) {
262 port = find_port_by_devt_in_portdev(portdev, dev);
263 if (port)
264 goto out;
265 }
266 port = NULL;
267out:
268 spin_unlock_irqrestore(&pdrvdata_lock, flags);
269 return port;
270}
271
272static struct port *find_port_by_id(struct ports_device *portdev, u32 id)
273{
274 struct port *port;
275 unsigned long flags;
276
277 spin_lock_irqsave(&portdev->ports_lock, flags);
278 list_for_each_entry(port, &portdev->ports, list)
279 if (port->id == id)
280 goto out;
281 port = NULL;
282out:
283 spin_unlock_irqrestore(&portdev->ports_lock, flags);
284
285 return port;
286}
287
288static struct port *find_port_by_vq(struct ports_device *portdev,
289 struct virtqueue *vq)
290{
291 struct port *port;
292 unsigned long flags;
293
294 spin_lock_irqsave(&portdev->ports_lock, flags);
295 list_for_each_entry(port, &portdev->ports, list)
296 if (port->in_vq == vq || port->out_vq == vq)
297 goto out;
298 port = NULL;
299out:
300 spin_unlock_irqrestore(&portdev->ports_lock, flags);
301 return port;
302}
303
304static bool is_console_port(struct port *port)
305{
306 if (port->cons.hvc)
307 return true;
308 return false;
309}
310
311static inline bool use_multiport(struct ports_device *portdev)
312{
313 /*
314 * This condition can be true when put_chars is called from
315 * early_init
316 */
317 if (!portdev->vdev)
318 return 0;
319 return portdev->vdev->features[0] & (1 << VIRTIO_CONSOLE_F_MULTIPORT);
320}
321
322static void free_buf(struct port_buffer *buf)
323{
324 kfree(buf->buf);
325 kfree(buf);
326}
327
328static struct port_buffer *alloc_buf(size_t buf_size)
329{
330 struct port_buffer *buf;
331
332 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
333 if (!buf)
334 goto fail;
335 buf->buf = kzalloc(buf_size, GFP_KERNEL);
336 if (!buf->buf)
337 goto free_buf;
338 buf->len = 0;
339 buf->offset = 0;
340 buf->size = buf_size;
341 return buf;
342
343free_buf:
344 kfree(buf);
345fail:
346 return NULL;
347}
348
349/* Callers should take appropriate locks */
350static void *get_inbuf(struct port *port)
351{
352 struct port_buffer *buf;
353 struct virtqueue *vq;
354 unsigned int len;
355
356 vq = port->in_vq;
357 buf = virtqueue_get_buf(vq, &len);
358 if (buf) {
359 buf->len = len;
360 buf->offset = 0;
361 }
362 return buf;
363}
364
365/*
366 * Create a scatter-gather list representing our input buffer and put
367 * it in the queue.
368 *
369 * Callers should take appropriate locks.
370 */
371static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf)
372{
373 struct scatterlist sg[1];
374 int ret;
375
376 sg_init_one(sg, buf->buf, buf->size);
377
378 ret = virtqueue_add_buf(vq, sg, 0, 1, buf);
379 virtqueue_kick(vq);
380 return ret;
381}
382
383/* Discard any unread data this port has. Callers lockers. */
384static void discard_port_data(struct port *port)
385{
386 struct port_buffer *buf;
387 struct virtqueue *vq;
388 unsigned int len;
389 int ret;
390
391 vq = port->in_vq;
392 if (port->inbuf)
393 buf = port->inbuf;
394 else
395 buf = virtqueue_get_buf(vq, &len);
396
397 ret = 0;
398 while (buf) {
399 if (add_inbuf(vq, buf) < 0) {
400 ret++;
401 free_buf(buf);
402 }
403 buf = virtqueue_get_buf(vq, &len);
404 }
405 port->inbuf = NULL;
406 if (ret)
407 dev_warn(port->dev, "Errors adding %d buffers back to vq\n",
408 ret);
409}
410
411static bool port_has_data(struct port *port)
412{
413 unsigned long flags;
414 bool ret;
415
416 spin_lock_irqsave(&port->inbuf_lock, flags);
417 if (port->inbuf) {
418 ret = true;
419 goto out;
420 }
421 port->inbuf = get_inbuf(port);
422 if (port->inbuf) {
423 ret = true;
424 goto out;
425 }
426 ret = false;
427out:
428 spin_unlock_irqrestore(&port->inbuf_lock, flags);
429 return ret;
430}
431
432static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
433 unsigned int event, unsigned int value)
434{
435 struct scatterlist sg[1];
436 struct virtio_console_control cpkt;
437 struct virtqueue *vq;
438 unsigned int len;
439
440 if (!use_multiport(portdev))
441 return 0;
442
443 cpkt.id = port_id;
444 cpkt.event = event;
445 cpkt.value = value;
446
447 vq = portdev->c_ovq;
448
449 sg_init_one(sg, &cpkt, sizeof(cpkt));
450 if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt) >= 0) {
451 virtqueue_kick(vq);
452 while (!virtqueue_get_buf(vq, &len))
453 cpu_relax();
454 }
455 return 0;
456}
457
458static ssize_t send_control_msg(struct port *port, unsigned int event,
459 unsigned int value)
460{
461 /* Did the port get unplugged before userspace closed it? */
462 if (port->portdev)
463 return __send_control_msg(port->portdev, port->id, event, value);
464 return 0;
465}
466
467/* Callers must take the port->outvq_lock */
468static void reclaim_consumed_buffers(struct port *port)
469{
470 void *buf;
471 unsigned int len;
472
473 while ((buf = virtqueue_get_buf(port->out_vq, &len))) {
474 kfree(buf);
475 port->outvq_full = false;
476 }
477}
478
479static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count,
480 bool nonblock)
481{
482 struct scatterlist sg[1];
483 struct virtqueue *out_vq;
484 ssize_t ret;
485 unsigned long flags;
486 unsigned int len;
487
488 out_vq = port->out_vq;
489
490 spin_lock_irqsave(&port->outvq_lock, flags);
491
492 reclaim_consumed_buffers(port);
493
494 sg_init_one(sg, in_buf, in_count);
495 ret = virtqueue_add_buf(out_vq, sg, 1, 0, in_buf);
496
497 /* Tell Host to go! */
498 virtqueue_kick(out_vq);
499
500 if (ret < 0) {
501 in_count = 0;
502 goto done;
503 }
504
505 if (ret == 0)
506 port->outvq_full = true;
507
508 if (nonblock)
509 goto done;
510
511 /*
512 * Wait till the host acknowledges it pushed out the data we
513 * sent. This is done for data from the hvc_console; the tty
514 * operations are performed with spinlocks held so we can't
515 * sleep here. An alternative would be to copy the data to a
516 * buffer and relax the spinning requirement. The downside is
517 * we need to kmalloc a GFP_ATOMIC buffer each time the
518 * console driver writes something out.
519 */
520 while (!virtqueue_get_buf(out_vq, &len))
521 cpu_relax();
522done:
523 spin_unlock_irqrestore(&port->outvq_lock, flags);
524 /*
525 * We're expected to return the amount of data we wrote -- all
526 * of it
527 */
528 return in_count;
529}
530
531/*
532 * Give out the data that's requested from the buffer that we have
533 * queued up.
534 */
535static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
536 bool to_user)
537{
538 struct port_buffer *buf;
539 unsigned long flags;
540
541 if (!out_count || !port_has_data(port))
542 return 0;
543
544 buf = port->inbuf;
545 out_count = min(out_count, buf->len - buf->offset);
546
547 if (to_user) {
548 ssize_t ret;
549
550 ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
551 if (ret)
552 return -EFAULT;
553 } else {
554 memcpy(out_buf, buf->buf + buf->offset, out_count);
555 }
556
557 buf->offset += out_count;
558
559 if (buf->offset == buf->len) {
560 /*
561 * We're done using all the data in this buffer.
562 * Re-queue so that the Host can send us more data.
563 */
564 spin_lock_irqsave(&port->inbuf_lock, flags);
565 port->inbuf = NULL;
566
567 if (add_inbuf(port->in_vq, buf) < 0)
568 dev_warn(port->dev, "failed add_buf\n");
569
570 spin_unlock_irqrestore(&port->inbuf_lock, flags);
571 }
572 /* Return the number of bytes actually copied */
573 return out_count;
574}
575
576/* The condition that must be true for polling to end */
577static bool will_read_block(struct port *port)
578{
579 if (!port->guest_connected) {
580 /* Port got hot-unplugged. Let's exit. */
581 return false;
582 }
583 return !port_has_data(port) && port->host_connected;
584}
585
586static bool will_write_block(struct port *port)
587{
588 bool ret;
589
590 if (!port->guest_connected) {
591 /* Port got hot-unplugged. Let's exit. */
592 return false;
593 }
594 if (!port->host_connected)
595 return true;
596
597 spin_lock_irq(&port->outvq_lock);
598 /*
599 * Check if the Host has consumed any buffers since we last
600 * sent data (this is only applicable for nonblocking ports).
601 */
602 reclaim_consumed_buffers(port);
603 ret = port->outvq_full;
604 spin_unlock_irq(&port->outvq_lock);
605
606 return ret;
607}
608
609static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
610 size_t count, loff_t *offp)
611{
612 struct port *port;
613 ssize_t ret;
614
615 port = filp->private_data;
616
617 if (!port_has_data(port)) {
618 /*
619 * If nothing's connected on the host just return 0 in
620 * case of list_empty; this tells the userspace app
621 * that there's no connection
622 */
623 if (!port->host_connected)
624 return 0;
625 if (filp->f_flags & O_NONBLOCK)
626 return -EAGAIN;
627
628 ret = wait_event_interruptible(port->waitqueue,
629 !will_read_block(port));
630 if (ret < 0)
631 return ret;
632 }
633 /* Port got hot-unplugged. */
634 if (!port->guest_connected)
635 return -ENODEV;
636 /*
637 * We could've received a disconnection message while we were
638 * waiting for more data.
639 *
640 * This check is not clubbed in the if() statement above as we
641 * might receive some data as well as the host could get
642 * disconnected after we got woken up from our wait. So we
643 * really want to give off whatever data we have and only then
644 * check for host_connected.
645 */
646 if (!port_has_data(port) && !port->host_connected)
647 return 0;
648
649 return fill_readbuf(port, ubuf, count, true);
650}
651
652static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
653 size_t count, loff_t *offp)
654{
655 struct port *port;
656 char *buf;
657 ssize_t ret;
658 bool nonblock;
659
660 /* Userspace could be out to fool us */
661 if (!count)
662 return 0;
663
664 port = filp->private_data;
665
666 nonblock = filp->f_flags & O_NONBLOCK;
667
668 if (will_write_block(port)) {
669 if (nonblock)
670 return -EAGAIN;
671
672 ret = wait_event_interruptible(port->waitqueue,
673 !will_write_block(port));
674 if (ret < 0)
675 return ret;
676 }
677 /* Port got hot-unplugged. */
678 if (!port->guest_connected)
679 return -ENODEV;
680
681 count = min((size_t)(32 * 1024), count);
682
683 buf = kmalloc(count, GFP_KERNEL);
684 if (!buf)
685 return -ENOMEM;
686
687 ret = copy_from_user(buf, ubuf, count);
688 if (ret) {
689 ret = -EFAULT;
690 goto free_buf;
691 }
692
693 /*
694 * We now ask send_buf() to not spin for generic ports -- we
695 * can re-use the same code path that non-blocking file
696 * descriptors take for blocking file descriptors since the
697 * wait is already done and we're certain the write will go
698 * through to the host.
699 */
700 nonblock = true;
701 ret = send_buf(port, buf, count, nonblock);
702
703 if (nonblock && ret > 0)
704 goto out;
705
706free_buf:
707 kfree(buf);
708out:
709 return ret;
710}
711
712static unsigned int port_fops_poll(struct file *filp, poll_table *wait)
713{
714 struct port *port;
715 unsigned int ret;
716
717 port = filp->private_data;
718 poll_wait(filp, &port->waitqueue, wait);
719
720 if (!port->guest_connected) {
721 /* Port got unplugged */
722 return POLLHUP;
723 }
724 ret = 0;
725 if (!will_read_block(port))
726 ret |= POLLIN | POLLRDNORM;
727 if (!will_write_block(port))
728 ret |= POLLOUT;
729 if (!port->host_connected)
730 ret |= POLLHUP;
731
732 return ret;
733}
734
735static void remove_port(struct kref *kref);
736
737static int port_fops_release(struct inode *inode, struct file *filp)
738{
739 struct port *port;
740
741 port = filp->private_data;
742
743 /* Notify host of port being closed */
744 send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0);
745
746 spin_lock_irq(&port->inbuf_lock);
747 port->guest_connected = false;
748
749 discard_port_data(port);
750
751 spin_unlock_irq(&port->inbuf_lock);
752
753 spin_lock_irq(&port->outvq_lock);
754 reclaim_consumed_buffers(port);
755 spin_unlock_irq(&port->outvq_lock);
756
757 /*
758 * Locks aren't necessary here as a port can't be opened after
759 * unplug, and if a port isn't unplugged, a kref would already
760 * exist for the port. Plus, taking ports_lock here would
761 * create a dependency on other locks taken by functions
762 * inside remove_port if we're the last holder of the port,
763 * creating many problems.
764 */
765 kref_put(&port->kref, remove_port);
766
767 return 0;
768}
769
770static int port_fops_open(struct inode *inode, struct file *filp)
771{
772 struct cdev *cdev = inode->i_cdev;
773 struct port *port;
774 int ret;
775
776 port = find_port_by_devt(cdev->dev);
777 filp->private_data = port;
778
779 /* Prevent against a port getting hot-unplugged at the same time */
780 spin_lock_irq(&port->portdev->ports_lock);
781 kref_get(&port->kref);
782 spin_unlock_irq(&port->portdev->ports_lock);
783
784 /*
785 * Don't allow opening of console port devices -- that's done
786 * via /dev/hvc
787 */
788 if (is_console_port(port)) {
789 ret = -ENXIO;
790 goto out;
791 }
792
793 /* Allow only one process to open a particular port at a time */
794 spin_lock_irq(&port->inbuf_lock);
795 if (port->guest_connected) {
796 spin_unlock_irq(&port->inbuf_lock);
797 ret = -EMFILE;
798 goto out;
799 }
800
801 port->guest_connected = true;
802 spin_unlock_irq(&port->inbuf_lock);
803
804 spin_lock_irq(&port->outvq_lock);
805 /*
806 * There might be a chance that we missed reclaiming a few
807 * buffers in the window of the port getting previously closed
808 * and opening now.
809 */
810 reclaim_consumed_buffers(port);
811 spin_unlock_irq(&port->outvq_lock);
812
813 nonseekable_open(inode, filp);
814
815 /* Notify host of port being opened */
816 send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1);
817
818 return 0;
819out:
820 kref_put(&port->kref, remove_port);
821 return ret;
822}
823
824static int port_fops_fasync(int fd, struct file *filp, int mode)
825{
826 struct port *port;
827
828 port = filp->private_data;
829 return fasync_helper(fd, filp, mode, &port->async_queue);
830}
831
832/*
833 * The file operations that we support: programs in the guest can open
834 * a console device, read from it, write to it, poll for data and
835 * close it. The devices are at
836 * /dev/vport<device number>p<port number>
837 */
838static const struct file_operations port_fops = {
839 .owner = THIS_MODULE,
840 .open = port_fops_open,
841 .read = port_fops_read,
842 .write = port_fops_write,
843 .poll = port_fops_poll,
844 .release = port_fops_release,
845 .fasync = port_fops_fasync,
846 .llseek = no_llseek,
847};
848
849/*
850 * The put_chars() callback is pretty straightforward.
851 *
852 * We turn the characters into a scatter-gather list, add it to the
853 * output queue and then kick the Host. Then we sit here waiting for
854 * it to finish: inefficient in theory, but in practice
855 * implementations will do it immediately (lguest's Launcher does).
856 */
857static int put_chars(u32 vtermno, const char *buf, int count)
858{
859 struct port *port;
860
861 if (unlikely(early_put_chars))
862 return early_put_chars(vtermno, buf, count);
863
864 port = find_port_by_vtermno(vtermno);
865 if (!port)
866 return -EPIPE;
867
868 return send_buf(port, (void *)buf, count, false);
869}
870
871/*
872 * get_chars() is the callback from the hvc_console infrastructure
873 * when an interrupt is received.
874 *
875 * We call out to fill_readbuf that gets us the required data from the
876 * buffers that are queued up.
877 */
878static int get_chars(u32 vtermno, char *buf, int count)
879{
880 struct port *port;
881
882 /* If we've not set up the port yet, we have no input to give. */
883 if (unlikely(early_put_chars))
884 return 0;
885
886 port = find_port_by_vtermno(vtermno);
887 if (!port)
888 return -EPIPE;
889
890 /* If we don't have an input queue yet, we can't get input. */
891 BUG_ON(!port->in_vq);
892
893 return fill_readbuf(port, buf, count, false);
894}
895
896static void resize_console(struct port *port)
897{
898 struct virtio_device *vdev;
899
900 /* The port could have been hot-unplugged */
901 if (!port || !is_console_port(port))
902 return;
903
904 vdev = port->portdev->vdev;
905 if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE))
906 hvc_resize(port->cons.hvc, port->cons.ws);
907}
908
909/* We set the configuration at this point, since we now have a tty */
910static int notifier_add_vio(struct hvc_struct *hp, int data)
911{
912 struct port *port;
913
914 port = find_port_by_vtermno(hp->vtermno);
915 if (!port)
916 return -EINVAL;
917
918 hp->irq_requested = 1;
919 resize_console(port);
920
921 return 0;
922}
923
924static void notifier_del_vio(struct hvc_struct *hp, int data)
925{
926 hp->irq_requested = 0;
927}
928
929/* The operations for console ports. */
930static const struct hv_ops hv_ops = {
931 .get_chars = get_chars,
932 .put_chars = put_chars,
933 .notifier_add = notifier_add_vio,
934 .notifier_del = notifier_del_vio,
935 .notifier_hangup = notifier_del_vio,
936};
937
938/*
939 * Console drivers are initialized very early so boot messages can go
940 * out, so we do things slightly differently from the generic virtio
941 * initialization of the net and block drivers.
942 *
943 * At this stage, the console is output-only. It's too early to set
944 * up a virtqueue, so we let the drivers do some boutique early-output
945 * thing.
946 */
947int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
948{
949 early_put_chars = put_chars;
950 return hvc_instantiate(0, 0, &hv_ops);
951}
952
953int init_port_console(struct port *port)
954{
955 int ret;
956
957 /*
958 * The Host's telling us this port is a console port. Hook it
959 * up with an hvc console.
960 *
961 * To set up and manage our virtual console, we call
962 * hvc_alloc().
963 *
964 * The first argument of hvc_alloc() is the virtual console
965 * number. The second argument is the parameter for the
966 * notification mechanism (like irq number). We currently
967 * leave this as zero, virtqueues have implicit notifications.
968 *
969 * The third argument is a "struct hv_ops" containing the
970 * put_chars() get_chars(), notifier_add() and notifier_del()
971 * pointers. The final argument is the output buffer size: we
972 * can do any size, so we put PAGE_SIZE here.
973 */
974 port->cons.vtermno = pdrvdata.next_vtermno;
975
976 port->cons.hvc = hvc_alloc(port->cons.vtermno, 0, &hv_ops, PAGE_SIZE);
977 if (IS_ERR(port->cons.hvc)) {
978 ret = PTR_ERR(port->cons.hvc);
979 dev_err(port->dev,
980 "error %d allocating hvc for port\n", ret);
981 port->cons.hvc = NULL;
982 return ret;
983 }
984 spin_lock_irq(&pdrvdata_lock);
985 pdrvdata.next_vtermno++;
986 list_add_tail(&port->cons.list, &pdrvdata.consoles);
987 spin_unlock_irq(&pdrvdata_lock);
988 port->guest_connected = true;
989
990 /*
991 * Start using the new console output if this is the first
992 * console to come up.
993 */
994 if (early_put_chars)
995 early_put_chars = NULL;
996
997 /* Notify host of port being opened */
998 send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1);
999
1000 return 0;
1001}
1002
1003static ssize_t show_port_name(struct device *dev,
1004 struct device_attribute *attr, char *buffer)
1005{
1006 struct port *port;
1007
1008 port = dev_get_drvdata(dev);
1009
1010 return sprintf(buffer, "%s\n", port->name);
1011}
1012
1013static DEVICE_ATTR(name, S_IRUGO, show_port_name, NULL);
1014
1015static struct attribute *port_sysfs_entries[] = {
1016 &dev_attr_name.attr,
1017 NULL
1018};
1019
1020static struct attribute_group port_attribute_group = {
1021 .name = NULL, /* put in device directory */
1022 .attrs = port_sysfs_entries,
1023};
1024
1025static int debugfs_open(struct inode *inode, struct file *filp)
1026{
1027 filp->private_data = inode->i_private;
1028 return 0;
1029}
1030
1031static ssize_t debugfs_read(struct file *filp, char __user *ubuf,
1032 size_t count, loff_t *offp)
1033{
1034 struct port *port;
1035 char *buf;
1036 ssize_t ret, out_offset, out_count;
1037
1038 out_count = 1024;
1039 buf = kmalloc(out_count, GFP_KERNEL);
1040 if (!buf)
1041 return -ENOMEM;
1042
1043 port = filp->private_data;
1044 out_offset = 0;
1045 out_offset += snprintf(buf + out_offset, out_count,
1046 "name: %s\n", port->name ? port->name : "");
1047 out_offset += snprintf(buf + out_offset, out_count - out_offset,
1048 "guest_connected: %d\n", port->guest_connected);
1049 out_offset += snprintf(buf + out_offset, out_count - out_offset,
1050 "host_connected: %d\n", port->host_connected);
1051 out_offset += snprintf(buf + out_offset, out_count - out_offset,
1052 "outvq_full: %d\n", port->outvq_full);
1053 out_offset += snprintf(buf + out_offset, out_count - out_offset,
1054 "is_console: %s\n",
1055 is_console_port(port) ? "yes" : "no");
1056 out_offset += snprintf(buf + out_offset, out_count - out_offset,
1057 "console_vtermno: %u\n", port->cons.vtermno);
1058
1059 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
1060 kfree(buf);
1061 return ret;
1062}
1063
1064static const struct file_operations port_debugfs_ops = {
1065 .owner = THIS_MODULE,
1066 .open = debugfs_open,
1067 .read = debugfs_read,
1068};
1069
1070static void set_console_size(struct port *port, u16 rows, u16 cols)
1071{
1072 if (!port || !is_console_port(port))
1073 return;
1074
1075 port->cons.ws.ws_row = rows;
1076 port->cons.ws.ws_col = cols;
1077}
1078
1079static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
1080{
1081 struct port_buffer *buf;
1082 unsigned int nr_added_bufs;
1083 int ret;
1084
1085 nr_added_bufs = 0;
1086 do {
1087 buf = alloc_buf(PAGE_SIZE);
1088 if (!buf)
1089 break;
1090
1091 spin_lock_irq(lock);
1092 ret = add_inbuf(vq, buf);
1093 if (ret < 0) {
1094 spin_unlock_irq(lock);
1095 free_buf(buf);
1096 break;
1097 }
1098 nr_added_bufs++;
1099 spin_unlock_irq(lock);
1100 } while (ret > 0);
1101
1102 return nr_added_bufs;
1103}
1104
1105static void send_sigio_to_port(struct port *port)
1106{
1107 if (port->async_queue && port->guest_connected)
1108 kill_fasync(&port->async_queue, SIGIO, POLL_OUT);
1109}
1110
1111static int add_port(struct ports_device *portdev, u32 id)
1112{
1113 char debugfs_name[16];
1114 struct port *port;
1115 struct port_buffer *buf;
1116 dev_t devt;
1117 unsigned int nr_added_bufs;
1118 int err;
1119
1120 port = kmalloc(sizeof(*port), GFP_KERNEL);
1121 if (!port) {
1122 err = -ENOMEM;
1123 goto fail;
1124 }
1125 kref_init(&port->kref);
1126
1127 port->portdev = portdev;
1128 port->id = id;
1129
1130 port->name = NULL;
1131 port->inbuf = NULL;
1132 port->cons.hvc = NULL;
1133 port->async_queue = NULL;
1134
1135 port->cons.ws.ws_row = port->cons.ws.ws_col = 0;
1136
1137 port->host_connected = port->guest_connected = false;
1138
1139 port->outvq_full = false;
1140
1141 port->in_vq = portdev->in_vqs[port->id];
1142 port->out_vq = portdev->out_vqs[port->id];
1143
1144 port->cdev = cdev_alloc();
1145 if (!port->cdev) {
1146 dev_err(&port->portdev->vdev->dev, "Error allocating cdev\n");
1147 err = -ENOMEM;
1148 goto free_port;
1149 }
1150 port->cdev->ops = &port_fops;
1151
1152 devt = MKDEV(portdev->chr_major, id);
1153 err = cdev_add(port->cdev, devt, 1);
1154 if (err < 0) {
1155 dev_err(&port->portdev->vdev->dev,
1156 "Error %d adding cdev for port %u\n", err, id);
1157 goto free_cdev;
1158 }
1159 port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev,
1160 devt, port, "vport%up%u",
1161 port->portdev->drv_index, id);
1162 if (IS_ERR(port->dev)) {
1163 err = PTR_ERR(port->dev);
1164 dev_err(&port->portdev->vdev->dev,
1165 "Error %d creating device for port %u\n",
1166 err, id);
1167 goto free_cdev;
1168 }
1169
1170 spin_lock_init(&port->inbuf_lock);
1171 spin_lock_init(&port->outvq_lock);
1172 init_waitqueue_head(&port->waitqueue);
1173
1174 /* Fill the in_vq with buffers so the host can send us data. */
1175 nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock);
1176 if (!nr_added_bufs) {
1177 dev_err(port->dev, "Error allocating inbufs\n");
1178 err = -ENOMEM;
1179 goto free_device;
1180 }
1181
1182 /*
1183 * If we're not using multiport support, this has to be a console port
1184 */
1185 if (!use_multiport(port->portdev)) {
1186 err = init_port_console(port);
1187 if (err)
1188 goto free_inbufs;
1189 }
1190
1191 spin_lock_irq(&portdev->ports_lock);
1192 list_add_tail(&port->list, &port->portdev->ports);
1193 spin_unlock_irq(&portdev->ports_lock);
1194
1195 /*
1196 * Tell the Host we're set so that it can send us various
1197 * configuration parameters for this port (eg, port name,
1198 * caching, whether this is a console port, etc.)
1199 */
1200 send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
1201
1202 if (pdrvdata.debugfs_dir) {
1203 /*
1204 * Finally, create the debugfs file that we can use to
1205 * inspect a port's state at any time
1206 */
1207 sprintf(debugfs_name, "vport%up%u",
1208 port->portdev->drv_index, id);
1209 port->debugfs_file = debugfs_create_file(debugfs_name, 0444,
1210 pdrvdata.debugfs_dir,
1211 port,
1212 &port_debugfs_ops);
1213 }
1214 return 0;
1215
1216free_inbufs:
1217 while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
1218 free_buf(buf);
1219free_device:
1220 device_destroy(pdrvdata.class, port->dev->devt);
1221free_cdev:
1222 cdev_del(port->cdev);
1223free_port:
1224 kfree(port);
1225fail:
1226 /* The host might want to notify management sw about port add failure */
1227 __send_control_msg(portdev, id, VIRTIO_CONSOLE_PORT_READY, 0);
1228 return err;
1229}
1230
1231/* No users remain, remove all port-specific data. */
1232static void remove_port(struct kref *kref)
1233{
1234 struct port *port;
1235
1236 port = container_of(kref, struct port, kref);
1237
1238 sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
1239 device_destroy(pdrvdata.class, port->dev->devt);
1240 cdev_del(port->cdev);
1241
1242 kfree(port->name);
1243
1244 debugfs_remove(port->debugfs_file);
1245
1246 kfree(port);
1247}
1248
1249/*
1250 * Port got unplugged. Remove port from portdev's list and drop the
1251 * kref reference. If no userspace has this port opened, it will
1252 * result in immediate removal the port.
1253 */
1254static void unplug_port(struct port *port)
1255{
1256 struct port_buffer *buf;
1257
1258 spin_lock_irq(&port->portdev->ports_lock);
1259 list_del(&port->list);
1260 spin_unlock_irq(&port->portdev->ports_lock);
1261
1262 if (port->guest_connected) {
1263 port->guest_connected = false;
1264 port->host_connected = false;
1265 wake_up_interruptible(&port->waitqueue);
1266
1267 /* Let the app know the port is going down. */
1268 send_sigio_to_port(port);
1269 }
1270
1271 if (is_console_port(port)) {
1272 spin_lock_irq(&pdrvdata_lock);
1273 list_del(&port->cons.list);
1274 spin_unlock_irq(&pdrvdata_lock);
1275#if 0
1276 /*
1277 * hvc_remove() not called as removing one hvc port
1278 * results in other hvc ports getting frozen.
1279 *
1280 * Once this is resolved in hvc, this functionality
1281 * will be enabled. Till that is done, the -EPIPE
1282 * return from get_chars() above will help
1283 * hvc_console.c to clean up on ports we remove here.
1284 */
1285 hvc_remove(port->cons.hvc);
1286#endif
1287 }
1288
1289 /* Remove unused data this port might have received. */
1290 discard_port_data(port);
1291
1292 reclaim_consumed_buffers(port);
1293
1294 /* Remove buffers we queued up for the Host to send us data in. */
1295 while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
1296 free_buf(buf);
1297
1298 /*
1299 * We should just assume the device itself has gone off --
1300 * else a close on an open port later will try to send out a
1301 * control message.
1302 */
1303 port->portdev = NULL;
1304
1305 /*
1306 * Locks around here are not necessary - a port can't be
1307 * opened after we removed the port struct from ports_list
1308 * above.
1309 */
1310 kref_put(&port->kref, remove_port);
1311}
1312
1313/* Any private messages that the Host and Guest want to share */
1314static void handle_control_message(struct ports_device *portdev,
1315 struct port_buffer *buf)
1316{
1317 struct virtio_console_control *cpkt;
1318 struct port *port;
1319 size_t name_size;
1320 int err;
1321
1322 cpkt = (struct virtio_console_control *)(buf->buf + buf->offset);
1323
1324 port = find_port_by_id(portdev, cpkt->id);
1325 if (!port && cpkt->event != VIRTIO_CONSOLE_PORT_ADD) {
1326 /* No valid header at start of buffer. Drop it. */
1327 dev_dbg(&portdev->vdev->dev,
1328 "Invalid index %u in control packet\n", cpkt->id);
1329 return;
1330 }
1331
1332 switch (cpkt->event) {
1333 case VIRTIO_CONSOLE_PORT_ADD:
1334 if (port) {
1335 dev_dbg(&portdev->vdev->dev,
1336 "Port %u already added\n", port->id);
1337 send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
1338 break;
1339 }
1340 if (cpkt->id >= portdev->config.max_nr_ports) {
1341 dev_warn(&portdev->vdev->dev,
1342 "Request for adding port with out-of-bound id %u, max. supported id: %u\n",
1343 cpkt->id, portdev->config.max_nr_ports - 1);
1344 break;
1345 }
1346 add_port(portdev, cpkt->id);
1347 break;
1348 case VIRTIO_CONSOLE_PORT_REMOVE:
1349 unplug_port(port);
1350 break;
1351 case VIRTIO_CONSOLE_CONSOLE_PORT:
1352 if (!cpkt->value)
1353 break;
1354 if (is_console_port(port))
1355 break;
1356
1357 init_port_console(port);
1358 /*
1359 * Could remove the port here in case init fails - but
1360 * have to notify the host first.
1361 */
1362 break;
1363 case VIRTIO_CONSOLE_RESIZE: {
1364 struct {
1365 __u16 rows;
1366 __u16 cols;
1367 } size;
1368
1369 if (!is_console_port(port))
1370 break;
1371
1372 memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt),
1373 sizeof(size));
1374 set_console_size(port, size.rows, size.cols);
1375
1376 port->cons.hvc->irq_requested = 1;
1377 resize_console(port);
1378 break;
1379 }
1380 case VIRTIO_CONSOLE_PORT_OPEN:
1381 port->host_connected = cpkt->value;
1382 wake_up_interruptible(&port->waitqueue);
1383 /*
1384 * If the host port got closed and the host had any
1385 * unconsumed buffers, we'll be able to reclaim them
1386 * now.
1387 */
1388 spin_lock_irq(&port->outvq_lock);
1389 reclaim_consumed_buffers(port);
1390 spin_unlock_irq(&port->outvq_lock);
1391
1392 /*
1393 * If the guest is connected, it'll be interested in
1394 * knowing the host connection state changed.
1395 */
1396 send_sigio_to_port(port);
1397 break;
1398 case VIRTIO_CONSOLE_PORT_NAME:
1399 /*
1400 * Skip the size of the header and the cpkt to get the size
1401 * of the name that was sent
1402 */
1403 name_size = buf->len - buf->offset - sizeof(*cpkt) + 1;
1404
1405 port->name = kmalloc(name_size, GFP_KERNEL);
1406 if (!port->name) {
1407 dev_err(port->dev,
1408 "Not enough space to store port name\n");
1409 break;
1410 }
1411 strncpy(port->name, buf->buf + buf->offset + sizeof(*cpkt),
1412 name_size - 1);
1413 port->name[name_size - 1] = 0;
1414
1415 /*
1416 * Since we only have one sysfs attribute, 'name',
1417 * create it only if we have a name for the port.
1418 */
1419 err = sysfs_create_group(&port->dev->kobj,
1420 &port_attribute_group);
1421 if (err) {
1422 dev_err(port->dev,
1423 "Error %d creating sysfs device attributes\n",
1424 err);
1425 } else {
1426 /*
1427 * Generate a udev event so that appropriate
1428 * symlinks can be created based on udev
1429 * rules.
1430 */
1431 kobject_uevent(&port->dev->kobj, KOBJ_CHANGE);
1432 }
1433 break;
1434 }
1435}
1436
1437static void control_work_handler(struct work_struct *work)
1438{
1439 struct ports_device *portdev;
1440 struct virtqueue *vq;
1441 struct port_buffer *buf;
1442 unsigned int len;
1443
1444 portdev = container_of(work, struct ports_device, control_work);
1445 vq = portdev->c_ivq;
1446
1447 spin_lock(&portdev->cvq_lock);
1448 while ((buf = virtqueue_get_buf(vq, &len))) {
1449 spin_unlock(&portdev->cvq_lock);
1450
1451 buf->len = len;
1452 buf->offset = 0;
1453
1454 handle_control_message(portdev, buf);
1455
1456 spin_lock(&portdev->cvq_lock);
1457 if (add_inbuf(portdev->c_ivq, buf) < 0) {
1458 dev_warn(&portdev->vdev->dev,
1459 "Error adding buffer to queue\n");
1460 free_buf(buf);
1461 }
1462 }
1463 spin_unlock(&portdev->cvq_lock);
1464}
1465
1466static void out_intr(struct virtqueue *vq)
1467{
1468 struct port *port;
1469
1470 port = find_port_by_vq(vq->vdev->priv, vq);
1471 if (!port)
1472 return;
1473
1474 wake_up_interruptible(&port->waitqueue);
1475}
1476
1477static void in_intr(struct virtqueue *vq)
1478{
1479 struct port *port;
1480 unsigned long flags;
1481
1482 port = find_port_by_vq(vq->vdev->priv, vq);
1483 if (!port)
1484 return;
1485
1486 spin_lock_irqsave(&port->inbuf_lock, flags);
1487 if (!port->inbuf)
1488 port->inbuf = get_inbuf(port);
1489
1490 /*
1491 * Don't queue up data when port is closed. This condition
1492 * can be reached when a console port is not yet connected (no
1493 * tty is spawned) and the host sends out data to console
1494 * ports. For generic serial ports, the host won't
1495 * (shouldn't) send data till the guest is connected.
1496 */
1497 if (!port->guest_connected)
1498 discard_port_data(port);
1499
1500 spin_unlock_irqrestore(&port->inbuf_lock, flags);
1501
1502 wake_up_interruptible(&port->waitqueue);
1503
1504 /* Send a SIGIO indicating new data in case the process asked for it */
1505 send_sigio_to_port(port);
1506
1507 if (is_console_port(port) && hvc_poll(port->cons.hvc))
1508 hvc_kick();
1509}
1510
1511static void control_intr(struct virtqueue *vq)
1512{
1513 struct ports_device *portdev;
1514
1515 portdev = vq->vdev->priv;
1516 schedule_work(&portdev->control_work);
1517}
1518
1519static void config_intr(struct virtio_device *vdev)
1520{
1521 struct ports_device *portdev;
1522
1523 portdev = vdev->priv;
1524
1525 if (!use_multiport(portdev)) {
1526 struct port *port;
1527 u16 rows, cols;
1528
1529 vdev->config->get(vdev,
1530 offsetof(struct virtio_console_config, cols),
1531 &cols, sizeof(u16));
1532 vdev->config->get(vdev,
1533 offsetof(struct virtio_console_config, rows),
1534 &rows, sizeof(u16));
1535
1536 port = find_port_by_id(portdev, 0);
1537 set_console_size(port, rows, cols);
1538
1539 /*
1540 * We'll use this way of resizing only for legacy
1541 * support. For newer userspace
1542 * (VIRTIO_CONSOLE_F_MULTPORT+), use control messages
1543 * to indicate console size changes so that it can be
1544 * done per-port.
1545 */
1546 resize_console(port);
1547 }
1548}
1549
1550static int init_vqs(struct ports_device *portdev)
1551{
1552 vq_callback_t **io_callbacks;
1553 char **io_names;
1554 struct virtqueue **vqs;
1555 u32 i, j, nr_ports, nr_queues;
1556 int err;
1557
1558 nr_ports = portdev->config.max_nr_ports;
1559 nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2;
1560
1561 vqs = kmalloc(nr_queues * sizeof(struct virtqueue *), GFP_KERNEL);
1562 io_callbacks = kmalloc(nr_queues * sizeof(vq_callback_t *), GFP_KERNEL);
1563 io_names = kmalloc(nr_queues * sizeof(char *), GFP_KERNEL);
1564 portdev->in_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *),
1565 GFP_KERNEL);
1566 portdev->out_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *),
1567 GFP_KERNEL);
1568 if (!vqs || !io_callbacks || !io_names || !portdev->in_vqs ||
1569 !portdev->out_vqs) {
1570 err = -ENOMEM;
1571 goto free;
1572 }
1573
1574 /*
1575 * For backward compat (newer host but older guest), the host
1576 * spawns a console port first and also inits the vqs for port
1577 * 0 before others.
1578 */
1579 j = 0;
1580 io_callbacks[j] = in_intr;
1581 io_callbacks[j + 1] = out_intr;
1582 io_names[j] = "input";
1583 io_names[j + 1] = "output";
1584 j += 2;
1585
1586 if (use_multiport(portdev)) {
1587 io_callbacks[j] = control_intr;
1588 io_callbacks[j + 1] = NULL;
1589 io_names[j] = "control-i";
1590 io_names[j + 1] = "control-o";
1591
1592 for (i = 1; i < nr_ports; i++) {
1593 j += 2;
1594 io_callbacks[j] = in_intr;
1595 io_callbacks[j + 1] = out_intr;
1596 io_names[j] = "input";
1597 io_names[j + 1] = "output";
1598 }
1599 }
1600 /* Find the queues. */
1601 err = portdev->vdev->config->find_vqs(portdev->vdev, nr_queues, vqs,
1602 io_callbacks,
1603 (const char **)io_names);
1604 if (err)
1605 goto free;
1606
1607 j = 0;
1608 portdev->in_vqs[0] = vqs[0];
1609 portdev->out_vqs[0] = vqs[1];
1610 j += 2;
1611 if (use_multiport(portdev)) {
1612 portdev->c_ivq = vqs[j];
1613 portdev->c_ovq = vqs[j + 1];
1614
1615 for (i = 1; i < nr_ports; i++) {
1616 j += 2;
1617 portdev->in_vqs[i] = vqs[j];
1618 portdev->out_vqs[i] = vqs[j + 1];
1619 }
1620 }
1621 kfree(io_names);
1622 kfree(io_callbacks);
1623 kfree(vqs);
1624
1625 return 0;
1626
1627free:
1628 kfree(portdev->out_vqs);
1629 kfree(portdev->in_vqs);
1630 kfree(io_names);
1631 kfree(io_callbacks);
1632 kfree(vqs);
1633
1634 return err;
1635}
1636
1637static const struct file_operations portdev_fops = {
1638 .owner = THIS_MODULE,
1639};
1640
1641/*
1642 * Once we're further in boot, we get probed like any other virtio
1643 * device.
1644 *
1645 * If the host also supports multiple console ports, we check the
1646 * config space to see how many ports the host has spawned. We
1647 * initialize each port found.
1648 */
1649static int __devinit virtcons_probe(struct virtio_device *vdev)
1650{
1651 struct ports_device *portdev;
1652 int err;
1653 bool multiport;
1654
1655 portdev = kmalloc(sizeof(*portdev), GFP_KERNEL);
1656 if (!portdev) {
1657 err = -ENOMEM;
1658 goto fail;
1659 }
1660
1661 /* Attach this portdev to this virtio_device, and vice-versa. */
1662 portdev->vdev = vdev;
1663 vdev->priv = portdev;
1664
1665 spin_lock_irq(&pdrvdata_lock);
1666 portdev->drv_index = pdrvdata.index++;
1667 spin_unlock_irq(&pdrvdata_lock);
1668
1669 portdev->chr_major = register_chrdev(0, "virtio-portsdev",
1670 &portdev_fops);
1671 if (portdev->chr_major < 0) {
1672 dev_err(&vdev->dev,
1673 "Error %d registering chrdev for device %u\n",
1674 portdev->chr_major, portdev->drv_index);
1675 err = portdev->chr_major;
1676 goto free;
1677 }
1678
1679 multiport = false;
1680 portdev->config.max_nr_ports = 1;
1681 if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT)) {
1682 multiport = true;
1683 vdev->features[0] |= 1 << VIRTIO_CONSOLE_F_MULTIPORT;
1684
1685 vdev->config->get(vdev, offsetof(struct virtio_console_config,
1686 max_nr_ports),
1687 &portdev->config.max_nr_ports,
1688 sizeof(portdev->config.max_nr_ports));
1689 }
1690
1691 /* Let the Host know we support multiple ports.*/
1692 vdev->config->finalize_features(vdev);
1693
1694 err = init_vqs(portdev);
1695 if (err < 0) {
1696 dev_err(&vdev->dev, "Error %d initializing vqs\n", err);
1697 goto free_chrdev;
1698 }
1699
1700 spin_lock_init(&portdev->ports_lock);
1701 INIT_LIST_HEAD(&portdev->ports);
1702
1703 if (multiport) {
1704 unsigned int nr_added_bufs;
1705
1706 spin_lock_init(&portdev->cvq_lock);
1707 INIT_WORK(&portdev->control_work, &control_work_handler);
1708
1709 nr_added_bufs = fill_queue(portdev->c_ivq, &portdev->cvq_lock);
1710 if (!nr_added_bufs) {
1711 dev_err(&vdev->dev,
1712 "Error allocating buffers for control queue\n");
1713 err = -ENOMEM;
1714 goto free_vqs;
1715 }
1716 } else {
1717 /*
1718 * For backward compatibility: Create a console port
1719 * if we're running on older host.
1720 */
1721 add_port(portdev, 0);
1722 }
1723
1724 spin_lock_irq(&pdrvdata_lock);
1725 list_add_tail(&portdev->list, &pdrvdata.portdevs);
1726 spin_unlock_irq(&pdrvdata_lock);
1727
1728 __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
1729 VIRTIO_CONSOLE_DEVICE_READY, 1);
1730 return 0;
1731
1732free_vqs:
1733 /* The host might want to notify mgmt sw about device add failure */
1734 __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
1735 VIRTIO_CONSOLE_DEVICE_READY, 0);
1736 vdev->config->del_vqs(vdev);
1737 kfree(portdev->in_vqs);
1738 kfree(portdev->out_vqs);
1739free_chrdev:
1740 unregister_chrdev(portdev->chr_major, "virtio-portsdev");
1741free:
1742 kfree(portdev);
1743fail:
1744 return err;
1745}
1746
1747static void virtcons_remove(struct virtio_device *vdev)
1748{
1749 struct ports_device *portdev;
1750 struct port *port, *port2;
1751
1752 portdev = vdev->priv;
1753
1754 spin_lock_irq(&pdrvdata_lock);
1755 list_del(&portdev->list);
1756 spin_unlock_irq(&pdrvdata_lock);
1757
1758 /* Disable interrupts for vqs */
1759 vdev->config->reset(vdev);
1760 /* Finish up work that's lined up */
1761 cancel_work_sync(&portdev->control_work);
1762
1763 list_for_each_entry_safe(port, port2, &portdev->ports, list)
1764 unplug_port(port);
1765
1766 unregister_chrdev(portdev->chr_major, "virtio-portsdev");
1767
1768 /*
1769 * When yanking out a device, we immediately lose the
1770 * (device-side) queues. So there's no point in keeping the
1771 * guest side around till we drop our final reference. This
1772 * also means that any ports which are in an open state will
1773 * have to just stop using the port, as the vqs are going
1774 * away.
1775 */
1776 if (use_multiport(portdev)) {
1777 struct port_buffer *buf;
1778 unsigned int len;
1779
1780 while ((buf = virtqueue_get_buf(portdev->c_ivq, &len)))
1781 free_buf(buf);
1782
1783 while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq)))
1784 free_buf(buf);
1785 }
1786
1787 vdev->config->del_vqs(vdev);
1788 kfree(portdev->in_vqs);
1789 kfree(portdev->out_vqs);
1790
1791 kfree(portdev);
1792}
1793
1794static struct virtio_device_id id_table[] = {
1795 { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID },
1796 { 0 },
1797};
1798
1799static unsigned int features[] = {
1800 VIRTIO_CONSOLE_F_SIZE,
1801 VIRTIO_CONSOLE_F_MULTIPORT,
1802};
1803
1804static struct virtio_driver virtio_console = {
1805 .feature_table = features,
1806 .feature_table_size = ARRAY_SIZE(features),
1807 .driver.name = KBUILD_MODNAME,
1808 .driver.owner = THIS_MODULE,
1809 .id_table = id_table,
1810 .probe = virtcons_probe,
1811 .remove = virtcons_remove,
1812 .config_changed = config_intr,
1813};
1814
1815static int __init init(void)
1816{
1817 int err;
1818
1819 pdrvdata.class = class_create(THIS_MODULE, "virtio-ports");
1820 if (IS_ERR(pdrvdata.class)) {
1821 err = PTR_ERR(pdrvdata.class);
1822 pr_err("Error %d creating virtio-ports class\n", err);
1823 return err;
1824 }
1825
1826 pdrvdata.debugfs_dir = debugfs_create_dir("virtio-ports", NULL);
1827 if (!pdrvdata.debugfs_dir) {
1828 pr_warning("Error %ld creating debugfs dir for virtio-ports\n",
1829 PTR_ERR(pdrvdata.debugfs_dir));
1830 }
1831 INIT_LIST_HEAD(&pdrvdata.consoles);
1832 INIT_LIST_HEAD(&pdrvdata.portdevs);
1833
1834 return register_virtio_driver(&virtio_console);
1835}
1836
1837static void __exit fini(void)
1838{
1839 unregister_virtio_driver(&virtio_console);
1840
1841 class_destroy(pdrvdata.class);
1842 if (pdrvdata.debugfs_dir)
1843 debugfs_remove_recursive(pdrvdata.debugfs_dir);
1844}
1845module_init(init);
1846module_exit(fini);
1847
1848MODULE_DEVICE_TABLE(virtio, id_table);
1849MODULE_DESCRIPTION("Virtio console driver");
1850MODULE_LICENSE("GPL");