diff options
author | Amit Shah <amit.shah@redhat.com> | 2010-02-12 00:02:18 -0500 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2010-02-23 22:53:06 -0500 |
commit | d6933561924d8022f5d986ce7c511a2646eeadce (patch) | |
tree | 8e0a0c86a079400df7919f80194289258b7f5144 /drivers/char/virtio_console.c | |
parent | 22a29eacd2a17f22c8260a8106a4e36bae7fb6ea (diff) |
virtio: console: Fill ports' entire in_vq with buffers
Instead of allocating just one buffer for a port's in_vq, fill
the entire in_vq with buffers so the host need not stall while
an application consumes the data and makes the buffer available
again for the host.
Signed-off-by: Amit Shah <amit.shah@redhat.com>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'drivers/char/virtio_console.c')
-rw-r--r-- | drivers/char/virtio_console.c | 53 |
1 files changed, 32 insertions, 21 deletions
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index c40703759e26..213373b5f17f 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c | |||
@@ -330,6 +330,7 @@ static void discard_port_data(struct port *port) | |||
330 | struct port_buffer *buf; | 330 | struct port_buffer *buf; |
331 | struct virtqueue *vq; | 331 | struct virtqueue *vq; |
332 | unsigned int len; | 332 | unsigned int len; |
333 | int ret; | ||
333 | 334 | ||
334 | vq = port->in_vq; | 335 | vq = port->in_vq; |
335 | if (port->inbuf) | 336 | if (port->inbuf) |
@@ -337,16 +338,18 @@ static void discard_port_data(struct port *port) | |||
337 | else | 338 | else |
338 | buf = vq->vq_ops->get_buf(vq, &len); | 339 | buf = vq->vq_ops->get_buf(vq, &len); |
339 | 340 | ||
340 | if (!buf) | 341 | ret = 0; |
341 | return; | 342 | while (buf) { |
342 | 343 | if (add_inbuf(vq, buf) < 0) { | |
343 | if (add_inbuf(vq, buf) < 0) { | 344 | ret++; |
344 | buf->len = buf->offset = 0; | 345 | free_buf(buf); |
345 | dev_warn(port->dev, "Error adding buffer back to vq\n"); | 346 | } |
346 | return; | 347 | buf = vq->vq_ops->get_buf(vq, &len); |
347 | } | 348 | } |
348 | |||
349 | port->inbuf = NULL; | 349 | port->inbuf = NULL; |
350 | if (ret) | ||
351 | dev_warn(port->dev, "Errors adding %d buffers back to vq\n", | ||
352 | ret); | ||
350 | } | 353 | } |
351 | 354 | ||
352 | static bool port_has_data(struct port *port) | 355 | static bool port_has_data(struct port *port) |
@@ -354,12 +357,19 @@ static bool port_has_data(struct port *port) | |||
354 | unsigned long flags; | 357 | unsigned long flags; |
355 | bool ret; | 358 | bool ret; |
356 | 359 | ||
357 | ret = false; | ||
358 | spin_lock_irqsave(&port->inbuf_lock, flags); | 360 | spin_lock_irqsave(&port->inbuf_lock, flags); |
359 | if (port->inbuf) | 361 | if (port->inbuf) { |
360 | ret = true; | 362 | ret = true; |
363 | goto out; | ||
364 | } | ||
365 | port->inbuf = get_inbuf(port); | ||
366 | if (port->inbuf) { | ||
367 | ret = true; | ||
368 | goto out; | ||
369 | } | ||
370 | ret = false; | ||
371 | out: | ||
361 | spin_unlock_irqrestore(&port->inbuf_lock, flags); | 372 | spin_unlock_irqrestore(&port->inbuf_lock, flags); |
362 | |||
363 | return ret; | 373 | return ret; |
364 | } | 374 | } |
365 | 375 | ||
@@ -1011,7 +1021,8 @@ static void in_intr(struct virtqueue *vq) | |||
1011 | return; | 1021 | return; |
1012 | 1022 | ||
1013 | spin_lock_irqsave(&port->inbuf_lock, flags); | 1023 | spin_lock_irqsave(&port->inbuf_lock, flags); |
1014 | port->inbuf = get_inbuf(port); | 1024 | if (!port->inbuf) |
1025 | port->inbuf = get_inbuf(port); | ||
1015 | 1026 | ||
1016 | /* | 1027 | /* |
1017 | * Don't queue up data when port is closed. This condition | 1028 | * Don't queue up data when port is closed. This condition |
@@ -1087,7 +1098,7 @@ static int add_port(struct ports_device *portdev, u32 id) | |||
1087 | { | 1098 | { |
1088 | char debugfs_name[16]; | 1099 | char debugfs_name[16]; |
1089 | struct port *port; | 1100 | struct port *port; |
1090 | struct port_buffer *inbuf; | 1101 | struct port_buffer *buf; |
1091 | dev_t devt; | 1102 | dev_t devt; |
1092 | int err; | 1103 | int err; |
1093 | 1104 | ||
@@ -1132,22 +1143,21 @@ static int add_port(struct ports_device *portdev, u32 id) | |||
1132 | spin_lock_init(&port->inbuf_lock); | 1143 | spin_lock_init(&port->inbuf_lock); |
1133 | init_waitqueue_head(&port->waitqueue); | 1144 | init_waitqueue_head(&port->waitqueue); |
1134 | 1145 | ||
1135 | inbuf = alloc_buf(PAGE_SIZE); | 1146 | /* Fill the in_vq with buffers so the host can send us data. */ |
1136 | if (!inbuf) { | 1147 | err = fill_queue(port->in_vq, &port->inbuf_lock); |
1148 | if (!err) { | ||
1149 | dev_err(port->dev, "Error allocating inbufs\n"); | ||
1137 | err = -ENOMEM; | 1150 | err = -ENOMEM; |
1138 | goto free_device; | 1151 | goto free_device; |
1139 | } | 1152 | } |
1140 | 1153 | ||
1141 | /* Register the input buffer the first time. */ | ||
1142 | add_inbuf(port->in_vq, inbuf); | ||
1143 | |||
1144 | /* | 1154 | /* |
1145 | * If we're not using multiport support, this has to be a console port | 1155 | * If we're not using multiport support, this has to be a console port |
1146 | */ | 1156 | */ |
1147 | if (!use_multiport(port->portdev)) { | 1157 | if (!use_multiport(port->portdev)) { |
1148 | err = init_port_console(port); | 1158 | err = init_port_console(port); |
1149 | if (err) | 1159 | if (err) |
1150 | goto free_inbuf; | 1160 | goto free_inbufs; |
1151 | } | 1161 | } |
1152 | 1162 | ||
1153 | spin_lock_irq(&portdev->ports_lock); | 1163 | spin_lock_irq(&portdev->ports_lock); |
@@ -1175,8 +1185,9 @@ static int add_port(struct ports_device *portdev, u32 id) | |||
1175 | } | 1185 | } |
1176 | return 0; | 1186 | return 0; |
1177 | 1187 | ||
1178 | free_inbuf: | 1188 | free_inbufs: |
1179 | free_buf(inbuf); | 1189 | while ((buf = port->in_vq->vq_ops->detach_unused_buf(port->in_vq))) |
1190 | free_buf(buf); | ||
1180 | free_device: | 1191 | free_device: |
1181 | device_destroy(pdrvdata.class, port->dev->devt); | 1192 | device_destroy(pdrvdata.class, port->dev->devt); |
1182 | free_cdev: | 1193 | free_cdev: |