aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/tty/tty_buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/tty/tty_buffer.c')
-rw-r--r--drivers/tty/tty_buffer.c31
1 files changed, 16 insertions, 15 deletions
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index f1d30f6945af..143deb62467d 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -60,6 +60,7 @@ void tty_buffer_lock_exclusive(struct tty_port *port)
60 atomic_inc(&buf->priority); 60 atomic_inc(&buf->priority);
61 mutex_lock(&buf->lock); 61 mutex_lock(&buf->lock);
62} 62}
63EXPORT_SYMBOL_GPL(tty_buffer_lock_exclusive);
63 64
64void tty_buffer_unlock_exclusive(struct tty_port *port) 65void tty_buffer_unlock_exclusive(struct tty_port *port)
65{ 66{
@@ -73,6 +74,7 @@ void tty_buffer_unlock_exclusive(struct tty_port *port)
73 if (restart) 74 if (restart)
74 queue_work(system_unbound_wq, &buf->work); 75 queue_work(system_unbound_wq, &buf->work);
75} 76}
77EXPORT_SYMBOL_GPL(tty_buffer_unlock_exclusive);
76 78
77/** 79/**
78 * tty_buffer_space_avail - return unused buffer space 80 * tty_buffer_space_avail - return unused buffer space
@@ -255,16 +257,15 @@ static int __tty_buffer_request_room(struct tty_port *port, size_t size,
255 if (change || left < size) { 257 if (change || left < size) {
256 /* This is the slow path - looking for new buffers to use */ 258 /* This is the slow path - looking for new buffers to use */
257 if ((n = tty_buffer_alloc(port, size)) != NULL) { 259 if ((n = tty_buffer_alloc(port, size)) != NULL) {
258 unsigned long iflags;
259
260 n->flags = flags; 260 n->flags = flags;
261 buf->tail = n; 261 buf->tail = n;
262
263 spin_lock_irqsave(&buf->flush_lock, iflags);
264 b->commit = b->used; 262 b->commit = b->used;
263 /* paired w/ barrier in flush_to_ldisc(); ensures the
264 * latest commit value can be read before the head is
265 * advanced to the next buffer
266 */
267 smp_wmb();
265 b->next = n; 268 b->next = n;
266 spin_unlock_irqrestore(&buf->flush_lock, iflags);
267
268 } else if (change) 269 } else if (change)
269 size = 0; 270 size = 0;
270 else 271 else
@@ -448,27 +449,28 @@ static void flush_to_ldisc(struct work_struct *work)
448 mutex_lock(&buf->lock); 449 mutex_lock(&buf->lock);
449 450
450 while (1) { 451 while (1) {
451 unsigned long flags;
452 struct tty_buffer *head = buf->head; 452 struct tty_buffer *head = buf->head;
453 struct tty_buffer *next;
453 int count; 454 int count;
454 455
455 /* Ldisc or user is trying to gain exclusive access */ 456 /* Ldisc or user is trying to gain exclusive access */
456 if (atomic_read(&buf->priority)) 457 if (atomic_read(&buf->priority))
457 break; 458 break;
458 459
459 spin_lock_irqsave(&buf->flush_lock, flags); 460 next = head->next;
461 /* paired w/ barrier in __tty_buffer_request_room();
462 * ensures commit value read is not stale if the head
463 * is advancing to the next buffer
464 */
465 smp_rmb();
460 count = head->commit - head->read; 466 count = head->commit - head->read;
461 if (!count) { 467 if (!count) {
462 if (head->next == NULL) { 468 if (next == NULL)
463 spin_unlock_irqrestore(&buf->flush_lock, flags);
464 break; 469 break;
465 } 470 buf->head = next;
466 buf->head = head->next;
467 spin_unlock_irqrestore(&buf->flush_lock, flags);
468 tty_buffer_free(port, head); 471 tty_buffer_free(port, head);
469 continue; 472 continue;
470 } 473 }
471 spin_unlock_irqrestore(&buf->flush_lock, flags);
472 474
473 count = receive_buf(tty, head, count); 475 count = receive_buf(tty, head, count);
474 if (!count) 476 if (!count)
@@ -523,7 +525,6 @@ void tty_buffer_init(struct tty_port *port)
523 struct tty_bufhead *buf = &port->buf; 525 struct tty_bufhead *buf = &port->buf;
524 526
525 mutex_init(&buf->lock); 527 mutex_init(&buf->lock);
526 spin_lock_init(&buf->flush_lock);
527 tty_buffer_reset(&buf->sentinel, 0); 528 tty_buffer_reset(&buf->sentinel, 0);
528 buf->head = &buf->sentinel; 529 buf->head = &buf->sentinel;
529 buf->tail = &buf->sentinel; 530 buf->tail = &buf->sentinel;