diff options
author | Amit Shah <amit.shah@redhat.com> | 2010-01-18 08:45:12 -0500 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2010-02-23 22:52:50 -0500 |
commit | 203baab8ba3195dd929473ba95b91c2b838833e6 (patch) | |
tree | e6720bb993a46f53bd4607d55701606c754f2c00 /drivers/char/virtio_console.c | |
parent | 2658a79acf014deb0eaff2063f8f7a2b59f41285 (diff) |
virtio: console: Introduce function to hand off data from host to readers
In preparation for serving data to userspace (generic ports) as well as
in-kernel users (hvc consoles), separate out the functionality common to
both in a 'fill_readbuf()' function.
Signed-off-by: Amit Shah <amit.shah@redhat.com>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'drivers/char/virtio_console.c')
-rw-r--r-- | drivers/char/virtio_console.c | 142 |
1 files changed, 106 insertions, 36 deletions
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 75c5a3512ec..5096d92f5b8 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c | |||
@@ -100,6 +100,13 @@ struct port { | |||
100 | /* The current buffer from which data has to be fed to readers */ | 100 | /* The current buffer from which data has to be fed to readers */ |
101 | struct port_buffer *inbuf; | 101 | struct port_buffer *inbuf; |
102 | 102 | ||
103 | /* | ||
104 | * To protect the operations on the in_vq associated with this | ||
105 | * port. Has to be a spinlock because it can be called from | ||
106 | * interrupt context (get_char()). | ||
107 | */ | ||
108 | spinlock_t inbuf_lock; | ||
109 | |||
103 | /* The IO vqs for this port */ | 110 | /* The IO vqs for this port */ |
104 | struct virtqueue *in_vq, *out_vq; | 111 | struct virtqueue *in_vq, *out_vq; |
105 | 112 | ||
@@ -132,6 +139,25 @@ out: | |||
132 | return port; | 139 | return port; |
133 | } | 140 | } |
134 | 141 | ||
142 | static struct port *find_port_by_vq(struct ports_device *portdev, | ||
143 | struct virtqueue *vq) | ||
144 | { | ||
145 | struct port *port; | ||
146 | struct console *cons; | ||
147 | unsigned long flags; | ||
148 | |||
149 | spin_lock_irqsave(&pdrvdata_lock, flags); | ||
150 | list_for_each_entry(cons, &pdrvdata.consoles, list) { | ||
151 | port = container_of(cons, struct port, cons); | ||
152 | if (port->in_vq == vq || port->out_vq == vq) | ||
153 | goto out; | ||
154 | } | ||
155 | port = NULL; | ||
156 | out: | ||
157 | spin_unlock_irqrestore(&pdrvdata_lock, flags); | ||
158 | return port; | ||
159 | } | ||
160 | |||
135 | static void free_buf(struct port_buffer *buf) | 161 | static void free_buf(struct port_buffer *buf) |
136 | { | 162 | { |
137 | kfree(buf->buf); | 163 | kfree(buf->buf); |
@@ -181,15 +207,67 @@ static void *get_inbuf(struct port *port) | |||
181 | * | 207 | * |
182 | * Callers should take appropriate locks. | 208 | * Callers should take appropriate locks. |
183 | */ | 209 | */ |
184 | static void add_inbuf(struct virtqueue *vq, struct port_buffer *buf) | 210 | static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf) |
185 | { | 211 | { |
186 | struct scatterlist sg[1]; | 212 | struct scatterlist sg[1]; |
213 | int ret; | ||
187 | 214 | ||
188 | sg_init_one(sg, buf->buf, buf->size); | 215 | sg_init_one(sg, buf->buf, buf->size); |
189 | 216 | ||
190 | if (vq->vq_ops->add_buf(vq, sg, 0, 1, buf) < 0) | 217 | ret = vq->vq_ops->add_buf(vq, sg, 0, 1, buf); |
191 | BUG(); | ||
192 | vq->vq_ops->kick(vq); | 218 | vq->vq_ops->kick(vq); |
219 | return ret; | ||
220 | } | ||
221 | |||
222 | static bool port_has_data(struct port *port) | ||
223 | { | ||
224 | unsigned long flags; | ||
225 | bool ret; | ||
226 | |||
227 | ret = false; | ||
228 | spin_lock_irqsave(&port->inbuf_lock, flags); | ||
229 | if (port->inbuf) | ||
230 | ret = true; | ||
231 | spin_unlock_irqrestore(&port->inbuf_lock, flags); | ||
232 | |||
233 | return ret; | ||
234 | } | ||
235 | |||
236 | /* | ||
237 | * Give out the data that's requested from the buffer that we have | ||
238 | * queued up. | ||
239 | */ | ||
240 | static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count) | ||
241 | { | ||
242 | struct port_buffer *buf; | ||
243 | unsigned long flags; | ||
244 | |||
245 | if (!out_count || !port_has_data(port)) | ||
246 | return 0; | ||
247 | |||
248 | buf = port->inbuf; | ||
249 | if (out_count > buf->len - buf->offset) | ||
250 | out_count = buf->len - buf->offset; | ||
251 | |||
252 | memcpy(out_buf, buf->buf + buf->offset, out_count); | ||
253 | |||
254 | /* Return the number of bytes actually copied */ | ||
255 | buf->offset += out_count; | ||
256 | |||
257 | if (buf->offset == buf->len) { | ||
258 | /* | ||
259 | * We're done using all the data in this buffer. | ||
260 | * Re-queue so that the Host can send us more data. | ||
261 | */ | ||
262 | spin_lock_irqsave(&port->inbuf_lock, flags); | ||
263 | port->inbuf = NULL; | ||
264 | |||
265 | if (add_inbuf(port->in_vq, buf) < 0) | ||
266 | dev_warn(&port->portdev->vdev->dev, "failed add_buf\n"); | ||
267 | |||
268 | spin_unlock_irqrestore(&port->inbuf_lock, flags); | ||
269 | } | ||
270 | return out_count; | ||
193 | } | 271 | } |
194 | 272 | ||
195 | /* | 273 | /* |
@@ -234,9 +312,8 @@ static int put_chars(u32 vtermno, const char *buf, int count) | |||
234 | * get_chars() is the callback from the hvc_console infrastructure | 312 | * get_chars() is the callback from the hvc_console infrastructure |
235 | * when an interrupt is received. | 313 | * when an interrupt is received. |
236 | * | 314 | * |
237 | * Most of the code deals with the fact that the hvc_console() | 315 | * We call out to fill_readbuf that gets us the required data from the |
238 | * infrastructure only asks us for 16 bytes at a time. We keep | 316 | * buffers that are queued up. |
239 | * in_offset and in_used fields for partially-filled buffers. | ||
240 | */ | 317 | */ |
241 | static int get_chars(u32 vtermno, char *buf, int count) | 318 | static int get_chars(u32 vtermno, char *buf, int count) |
242 | { | 319 | { |
@@ -249,25 +326,7 @@ static int get_chars(u32 vtermno, char *buf, int count) | |||
249 | /* If we don't have an input queue yet, we can't get input. */ | 326 | /* If we don't have an input queue yet, we can't get input. */ |
250 | BUG_ON(!port->in_vq); | 327 | BUG_ON(!port->in_vq); |
251 | 328 | ||
252 | /* No more in buffer? See if they've (re)used it. */ | 329 | return fill_readbuf(port, buf, count); |
253 | if (port->inbuf->offset == port->inbuf->len) { | ||
254 | if (!get_inbuf(port)) | ||
255 | return 0; | ||
256 | } | ||
257 | |||
258 | /* You want more than we have to give? Well, try wanting less! */ | ||
259 | if (port->inbuf->offset + count > port->inbuf->len) | ||
260 | count = port->inbuf->len - port->inbuf->offset; | ||
261 | |||
262 | /* Copy across to their buffer and increment offset. */ | ||
263 | memcpy(buf, port->inbuf->buf + port->inbuf->offset, count); | ||
264 | port->inbuf->offset += count; | ||
265 | |||
266 | /* Finished? Re-register buffer so Host will use it again. */ | ||
267 | if (port->inbuf->offset == port->inbuf->len) | ||
268 | add_inbuf(port->in_vq, port->inbuf); | ||
269 | |||
270 | return count; | ||
271 | } | 330 | } |
272 | 331 | ||
273 | static void resize_console(struct port *port) | 332 | static void resize_console(struct port *port) |
@@ -314,13 +373,18 @@ static void notifier_del_vio(struct hvc_struct *hp, int data) | |||
314 | 373 | ||
315 | static void hvc_handle_input(struct virtqueue *vq) | 374 | static void hvc_handle_input(struct virtqueue *vq) |
316 | { | 375 | { |
317 | struct console *cons; | 376 | struct port *port; |
318 | bool activity = false; | 377 | unsigned long flags; |
378 | |||
379 | port = find_port_by_vq(vq->vdev->priv, vq); | ||
380 | if (!port) | ||
381 | return; | ||
319 | 382 | ||
320 | list_for_each_entry(cons, &pdrvdata.consoles, list) | 383 | spin_lock_irqsave(&port->inbuf_lock, flags); |
321 | activity |= hvc_poll(cons->hvc); | 384 | port->inbuf = get_inbuf(port); |
385 | spin_unlock_irqrestore(&port->inbuf_lock, flags); | ||
322 | 386 | ||
323 | if (activity) | 387 | if (hvc_poll(port->cons.hvc)) |
324 | hvc_kick(); | 388 | hvc_kick(); |
325 | } | 389 | } |
326 | 390 | ||
@@ -388,6 +452,7 @@ int __devinit init_port_console(struct port *port) | |||
388 | static int __devinit add_port(struct ports_device *portdev) | 452 | static int __devinit add_port(struct ports_device *portdev) |
389 | { | 453 | { |
390 | struct port *port; | 454 | struct port *port; |
455 | struct port_buffer *inbuf; | ||
391 | int err; | 456 | int err; |
392 | 457 | ||
393 | port = kmalloc(sizeof(*port), GFP_KERNEL); | 458 | port = kmalloc(sizeof(*port), GFP_KERNEL); |
@@ -397,26 +462,31 @@ static int __devinit add_port(struct ports_device *portdev) | |||
397 | } | 462 | } |
398 | 463 | ||
399 | port->portdev = portdev; | 464 | port->portdev = portdev; |
465 | |||
466 | port->inbuf = NULL; | ||
467 | |||
400 | port->in_vq = portdev->in_vqs[0]; | 468 | port->in_vq = portdev->in_vqs[0]; |
401 | port->out_vq = portdev->out_vqs[0]; | 469 | port->out_vq = portdev->out_vqs[0]; |
402 | 470 | ||
403 | port->inbuf = alloc_buf(PAGE_SIZE); | 471 | spin_lock_init(&port->inbuf_lock); |
404 | if (!port->inbuf) { | 472 | |
473 | inbuf = alloc_buf(PAGE_SIZE); | ||
474 | if (!inbuf) { | ||
405 | err = -ENOMEM; | 475 | err = -ENOMEM; |
406 | goto free_port; | 476 | goto free_port; |
407 | } | 477 | } |
408 | 478 | ||
479 | /* Register the input buffer the first time. */ | ||
480 | add_inbuf(port->in_vq, inbuf); | ||
481 | |||
409 | err = init_port_console(port); | 482 | err = init_port_console(port); |
410 | if (err) | 483 | if (err) |
411 | goto free_inbuf; | 484 | goto free_inbuf; |
412 | 485 | ||
413 | /* Register the input buffer the first time. */ | ||
414 | add_inbuf(port->in_vq, port->inbuf); | ||
415 | |||
416 | return 0; | 486 | return 0; |
417 | 487 | ||
418 | free_inbuf: | 488 | free_inbuf: |
419 | free_buf(port->inbuf); | 489 | free_buf(inbuf); |
420 | free_port: | 490 | free_port: |
421 | kfree(port); | 491 | kfree(port); |
422 | fail: | 492 | fail: |