diff options
Diffstat (limited to 'drivers/char')
-rw-r--r-- | drivers/char/hpet.c | 14 | ||||
-rw-r--r-- | drivers/char/hw_random/core.c | 9 | ||||
-rw-r--r-- | drivers/char/hw_random/virtio-rng.c | 13 | ||||
-rw-r--r-- | drivers/char/virtio_console.c | 44 |
4 files changed, 51 insertions, 29 deletions
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index e3f9a99b8522..d784650d14f0 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c | |||
@@ -373,26 +373,14 @@ static int hpet_mmap(struct file *file, struct vm_area_struct *vma) | |||
373 | struct hpet_dev *devp; | 373 | struct hpet_dev *devp; |
374 | unsigned long addr; | 374 | unsigned long addr; |
375 | 375 | ||
376 | if (((vma->vm_end - vma->vm_start) != PAGE_SIZE) || vma->vm_pgoff) | ||
377 | return -EINVAL; | ||
378 | |||
379 | devp = file->private_data; | 376 | devp = file->private_data; |
380 | addr = devp->hd_hpets->hp_hpet_phys; | 377 | addr = devp->hd_hpets->hp_hpet_phys; |
381 | 378 | ||
382 | if (addr & (PAGE_SIZE - 1)) | 379 | if (addr & (PAGE_SIZE - 1)) |
383 | return -ENOSYS; | 380 | return -ENOSYS; |
384 | 381 | ||
385 | vma->vm_flags |= VM_IO; | ||
386 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | 382 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
387 | 383 | return vm_iomap_memory(vma, addr, PAGE_SIZE); | |
388 | if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT, | ||
389 | PAGE_SIZE, vma->vm_page_prot)) { | ||
390 | printk(KERN_ERR "%s: io_remap_pfn_range failed\n", | ||
391 | __func__); | ||
392 | return -EAGAIN; | ||
393 | } | ||
394 | |||
395 | return 0; | ||
396 | #else | 384 | #else |
397 | return -ENOSYS; | 385 | return -ENOSYS; |
398 | #endif | 386 | #endif |
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 69ae5972713c..a0f7724852eb 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c | |||
@@ -380,6 +380,15 @@ void hwrng_unregister(struct hwrng *rng) | |||
380 | } | 380 | } |
381 | EXPORT_SYMBOL_GPL(hwrng_unregister); | 381 | EXPORT_SYMBOL_GPL(hwrng_unregister); |
382 | 382 | ||
383 | static void __exit hwrng_exit(void) | ||
384 | { | ||
385 | mutex_lock(&rng_mutex); | ||
386 | BUG_ON(current_rng); | ||
387 | kfree(rng_buffer); | ||
388 | mutex_unlock(&rng_mutex); | ||
389 | } | ||
390 | |||
391 | module_exit(hwrng_exit); | ||
383 | 392 | ||
384 | MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver"); | 393 | MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver"); |
385 | MODULE_LICENSE("GPL"); | 394 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c index 10fd71ccf587..6bf4d47324eb 100644 --- a/drivers/char/hw_random/virtio-rng.c +++ b/drivers/char/hw_random/virtio-rng.c | |||
@@ -92,14 +92,22 @@ static int probe_common(struct virtio_device *vdev) | |||
92 | { | 92 | { |
93 | int err; | 93 | int err; |
94 | 94 | ||
95 | if (vq) { | ||
96 | /* We only support one device for now */ | ||
97 | return -EBUSY; | ||
98 | } | ||
95 | /* We expect a single virtqueue. */ | 99 | /* We expect a single virtqueue. */ |
96 | vq = virtio_find_single_vq(vdev, random_recv_done, "input"); | 100 | vq = virtio_find_single_vq(vdev, random_recv_done, "input"); |
97 | if (IS_ERR(vq)) | 101 | if (IS_ERR(vq)) { |
98 | return PTR_ERR(vq); | 102 | err = PTR_ERR(vq); |
103 | vq = NULL; | ||
104 | return err; | ||
105 | } | ||
99 | 106 | ||
100 | err = hwrng_register(&virtio_hwrng); | 107 | err = hwrng_register(&virtio_hwrng); |
101 | if (err) { | 108 | if (err) { |
102 | vdev->config->del_vqs(vdev); | 109 | vdev->config->del_vqs(vdev); |
110 | vq = NULL; | ||
103 | return err; | 111 | return err; |
104 | } | 112 | } |
105 | 113 | ||
@@ -112,6 +120,7 @@ static void remove_common(struct virtio_device *vdev) | |||
112 | busy = false; | 120 | busy = false; |
113 | hwrng_unregister(&virtio_hwrng); | 121 | hwrng_unregister(&virtio_hwrng); |
114 | vdev->config->del_vqs(vdev); | 122 | vdev->config->del_vqs(vdev); |
123 | vq = NULL; | ||
115 | } | 124 | } |
116 | 125 | ||
117 | static int virtrng_probe(struct virtio_device *vdev) | 126 | static int virtrng_probe(struct virtio_device *vdev) |
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index e905d5f53051..ce5f3fc25d6d 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c | |||
@@ -149,7 +149,8 @@ struct ports_device { | |||
149 | spinlock_t ports_lock; | 149 | spinlock_t ports_lock; |
150 | 150 | ||
151 | /* To protect the vq operations for the control channel */ | 151 | /* To protect the vq operations for the control channel */ |
152 | spinlock_t cvq_lock; | 152 | spinlock_t c_ivq_lock; |
153 | spinlock_t c_ovq_lock; | ||
153 | 154 | ||
154 | /* The current config space is stored here */ | 155 | /* The current config space is stored here */ |
155 | struct virtio_console_config config; | 156 | struct virtio_console_config config; |
@@ -569,11 +570,14 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id, | |||
569 | vq = portdev->c_ovq; | 570 | vq = portdev->c_ovq; |
570 | 571 | ||
571 | sg_init_one(sg, &cpkt, sizeof(cpkt)); | 572 | sg_init_one(sg, &cpkt, sizeof(cpkt)); |
573 | |||
574 | spin_lock(&portdev->c_ovq_lock); | ||
572 | if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) == 0) { | 575 | if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) == 0) { |
573 | virtqueue_kick(vq); | 576 | virtqueue_kick(vq); |
574 | while (!virtqueue_get_buf(vq, &len)) | 577 | while (!virtqueue_get_buf(vq, &len)) |
575 | cpu_relax(); | 578 | cpu_relax(); |
576 | } | 579 | } |
580 | spin_unlock(&portdev->c_ovq_lock); | ||
577 | return 0; | 581 | return 0; |
578 | } | 582 | } |
579 | 583 | ||
@@ -1436,7 +1440,7 @@ static int add_port(struct ports_device *portdev, u32 id) | |||
1436 | * rproc_serial does not want the console port, only | 1440 | * rproc_serial does not want the console port, only |
1437 | * the generic port implementation. | 1441 | * the generic port implementation. |
1438 | */ | 1442 | */ |
1439 | port->host_connected = port->guest_connected = true; | 1443 | port->host_connected = true; |
1440 | else if (!use_multiport(port->portdev)) { | 1444 | else if (!use_multiport(port->portdev)) { |
1441 | /* | 1445 | /* |
1442 | * If we're not using multiport support, | 1446 | * If we're not using multiport support, |
@@ -1709,23 +1713,23 @@ static void control_work_handler(struct work_struct *work) | |||
1709 | portdev = container_of(work, struct ports_device, control_work); | 1713 | portdev = container_of(work, struct ports_device, control_work); |
1710 | vq = portdev->c_ivq; | 1714 | vq = portdev->c_ivq; |
1711 | 1715 | ||
1712 | spin_lock(&portdev->cvq_lock); | 1716 | spin_lock(&portdev->c_ivq_lock); |
1713 | while ((buf = virtqueue_get_buf(vq, &len))) { | 1717 | while ((buf = virtqueue_get_buf(vq, &len))) { |
1714 | spin_unlock(&portdev->cvq_lock); | 1718 | spin_unlock(&portdev->c_ivq_lock); |
1715 | 1719 | ||
1716 | buf->len = len; | 1720 | buf->len = len; |
1717 | buf->offset = 0; | 1721 | buf->offset = 0; |
1718 | 1722 | ||
1719 | handle_control_message(portdev, buf); | 1723 | handle_control_message(portdev, buf); |
1720 | 1724 | ||
1721 | spin_lock(&portdev->cvq_lock); | 1725 | spin_lock(&portdev->c_ivq_lock); |
1722 | if (add_inbuf(portdev->c_ivq, buf) < 0) { | 1726 | if (add_inbuf(portdev->c_ivq, buf) < 0) { |
1723 | dev_warn(&portdev->vdev->dev, | 1727 | dev_warn(&portdev->vdev->dev, |
1724 | "Error adding buffer to queue\n"); | 1728 | "Error adding buffer to queue\n"); |
1725 | free_buf(buf, false); | 1729 | free_buf(buf, false); |
1726 | } | 1730 | } |
1727 | } | 1731 | } |
1728 | spin_unlock(&portdev->cvq_lock); | 1732 | spin_unlock(&portdev->c_ivq_lock); |
1729 | } | 1733 | } |
1730 | 1734 | ||
1731 | static void out_intr(struct virtqueue *vq) | 1735 | static void out_intr(struct virtqueue *vq) |
@@ -1752,13 +1756,23 @@ static void in_intr(struct virtqueue *vq) | |||
1752 | port->inbuf = get_inbuf(port); | 1756 | port->inbuf = get_inbuf(port); |
1753 | 1757 | ||
1754 | /* | 1758 | /* |
1755 | * Don't queue up data when port is closed. This condition | 1759 | * Normally the port should not accept data when the port is |
1760 | * closed. For generic serial ports, the host won't (shouldn't) | ||
1761 | * send data till the guest is connected. But this condition | ||
1756 | * can be reached when a console port is not yet connected (no | 1762 | * can be reached when a console port is not yet connected (no |
1757 | * tty is spawned) and the host sends out data to console | 1763 | * tty is spawned) and the other side sends out data over the |
1758 | * ports. For generic serial ports, the host won't | 1764 | * vring, or when a remote devices start sending data before |
1759 | * (shouldn't) send data till the guest is connected. | 1765 | * the ports are opened. |
1766 | * | ||
1767 | * A generic serial port will discard data if not connected, | ||
1768 | * while console ports and rproc-serial ports accepts data at | ||
1769 | * any time. rproc-serial is initiated with guest_connected to | ||
1770 | * false because port_fops_open expects this. Console ports are | ||
1771 | * hooked up with an HVC console and is initialized with | ||
1772 | * guest_connected to true. | ||
1760 | */ | 1773 | */ |
1761 | if (!port->guest_connected) | 1774 | |
1775 | if (!port->guest_connected && !is_rproc_serial(port->portdev->vdev)) | ||
1762 | discard_port_data(port); | 1776 | discard_port_data(port); |
1763 | 1777 | ||
1764 | spin_unlock_irqrestore(&port->inbuf_lock, flags); | 1778 | spin_unlock_irqrestore(&port->inbuf_lock, flags); |
@@ -1986,10 +2000,12 @@ static int virtcons_probe(struct virtio_device *vdev) | |||
1986 | if (multiport) { | 2000 | if (multiport) { |
1987 | unsigned int nr_added_bufs; | 2001 | unsigned int nr_added_bufs; |
1988 | 2002 | ||
1989 | spin_lock_init(&portdev->cvq_lock); | 2003 | spin_lock_init(&portdev->c_ivq_lock); |
2004 | spin_lock_init(&portdev->c_ovq_lock); | ||
1990 | INIT_WORK(&portdev->control_work, &control_work_handler); | 2005 | INIT_WORK(&portdev->control_work, &control_work_handler); |
1991 | 2006 | ||
1992 | nr_added_bufs = fill_queue(portdev->c_ivq, &portdev->cvq_lock); | 2007 | nr_added_bufs = fill_queue(portdev->c_ivq, |
2008 | &portdev->c_ivq_lock); | ||
1993 | if (!nr_added_bufs) { | 2009 | if (!nr_added_bufs) { |
1994 | dev_err(&vdev->dev, | 2010 | dev_err(&vdev->dev, |
1995 | "Error allocating buffers for control queue\n"); | 2011 | "Error allocating buffers for control queue\n"); |
@@ -2140,7 +2156,7 @@ static int virtcons_restore(struct virtio_device *vdev) | |||
2140 | return ret; | 2156 | return ret; |
2141 | 2157 | ||
2142 | if (use_multiport(portdev)) | 2158 | if (use_multiport(portdev)) |
2143 | fill_queue(portdev->c_ivq, &portdev->cvq_lock); | 2159 | fill_queue(portdev->c_ivq, &portdev->c_ivq_lock); |
2144 | 2160 | ||
2145 | list_for_each_entry(port, &portdev->ports, list) { | 2161 | list_for_each_entry(port, &portdev->ports, list) { |
2146 | port->in_vq = portdev->in_vqs[port->id]; | 2162 | port->in_vq = portdev->in_vqs[port->id]; |