aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/tty/tty_buffer.c
diff options
context:
space:
mode:
authorPeter Hurley <peter@hurleysoftware.com>2013-06-15 09:36:15 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-07-23 19:47:10 -0400
commita7c8d58c79853adeebf0a1ddc9c63e433b4d97f1 (patch)
tree328d40837fb13c7856d3aae2be663093fa6278e0 /drivers/tty/tty_buffer.c
parent47aa658a015440906def231f54685c4d5d49dc38 (diff)
tty: Fix unsafe vt paste_selection()
Convert the tty_buffer_flush() exclusion mechanism to a public interface - tty_buffer_lock/unlock_exclusive() - and use the interface to safely write the paste selection to the line discipline. Signed-off-by: Peter Hurley <peter@hurleysoftware.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/tty/tty_buffer.c')
-rw-r--r--drivers/tty/tty_buffer.c61
1 files changed, 48 insertions, 13 deletions
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index dbe4a718e2dc..f22e116db105 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -30,6 +30,42 @@
30 30
31 31
32/** 32/**
33 * tty_buffer_lock_exclusive - gain exclusive access to buffer
34 * tty_buffer_unlock_exclusive - release exclusive access
35 *
36 * @port - tty_port owning the flip buffer
37 *
38 * Guarantees safe use of the line discipline's receive_buf() method by
39 * excluding the buffer work and any pending flush from using the flip
40 * buffer. Data can continue to be added concurrently to the flip buffer
41 * from the driver side.
42 *
43 * On release, the buffer work is restarted if there is data in the
44 * flip buffer
45 */
46
47void tty_buffer_lock_exclusive(struct tty_port *port)
48{
49 struct tty_bufhead *buf = &port->buf;
50
51 atomic_inc(&buf->priority);
52 mutex_lock(&buf->lock);
53}
54
55void tty_buffer_unlock_exclusive(struct tty_port *port)
56{
57 struct tty_bufhead *buf = &port->buf;
58 int restart;
59
60 restart = buf->head->commit != buf->head->read;
61
62 atomic_dec(&buf->priority);
63 mutex_unlock(&buf->lock);
64 if (restart)
65 queue_work(system_unbound_wq, &buf->work);
66}
67
68/**
33 * tty_buffer_space_avail - return unused buffer space 69 * tty_buffer_space_avail - return unused buffer space
34 * @port - tty_port owning the flip buffer 70 * @port - tty_port owning the flip buffer
35 * 71 *
@@ -158,7 +194,7 @@ static void tty_buffer_free(struct tty_port *port, struct tty_buffer *b)
158 * being processed by flush_to_ldisc then we defer the processing 194 * being processed by flush_to_ldisc then we defer the processing
159 * to that function 195 * to that function
160 * 196 *
161 * Locking: takes flush_mutex to ensure single-threaded flip buffer 197 * Locking: takes buffer lock to ensure single-threaded flip buffer
162 * 'consumer' 198 * 'consumer'
163 */ 199 */
164 200
@@ -168,16 +204,16 @@ void tty_buffer_flush(struct tty_struct *tty)
168 struct tty_bufhead *buf = &port->buf; 204 struct tty_bufhead *buf = &port->buf;
169 struct tty_buffer *next; 205 struct tty_buffer *next;
170 206
171 buf->flushpending = 1; 207 atomic_inc(&buf->priority);
172 208
173 mutex_lock(&buf->flush_mutex); 209 mutex_lock(&buf->lock);
174 while ((next = buf->head->next) != NULL) { 210 while ((next = buf->head->next) != NULL) {
175 tty_buffer_free(port, buf->head); 211 tty_buffer_free(port, buf->head);
176 buf->head = next; 212 buf->head = next;
177 } 213 }
178 buf->head->read = buf->head->commit; 214 buf->head->read = buf->head->commit;
179 buf->flushpending = 0; 215 atomic_dec(&buf->priority);
180 mutex_unlock(&buf->flush_mutex); 216 mutex_unlock(&buf->lock);
181} 217}
182 218
183/** 219/**
@@ -383,7 +419,7 @@ receive_buf(struct tty_struct *tty, struct tty_buffer *head, int count)
383 * 419 *
384 * The receive_buf method is single threaded for each tty instance. 420 * The receive_buf method is single threaded for each tty instance.
385 * 421 *
386 * Locking: takes flush_mutex to ensure single-threaded flip buffer 422 * Locking: takes buffer lock to ensure single-threaded flip buffer
387 * 'consumer' 423 * 'consumer'
388 */ 424 */
389 425
@@ -402,14 +438,14 @@ static void flush_to_ldisc(struct work_struct *work)
402 if (disc == NULL) 438 if (disc == NULL)
403 return; 439 return;
404 440
405 mutex_lock(&buf->flush_mutex); 441 mutex_lock(&buf->lock);
406 442
407 while (1) { 443 while (1) {
408 struct tty_buffer *head = buf->head; 444 struct tty_buffer *head = buf->head;
409 int count; 445 int count;
410 446
411 /* Ldisc or user is trying to flush the buffers. */ 447 /* Ldisc or user is trying to gain exclusive access */
412 if (buf->flushpending) 448 if (atomic_read(&buf->priority))
413 break; 449 break;
414 450
415 count = head->commit - head->read; 451 count = head->commit - head->read;
@@ -426,7 +462,7 @@ static void flush_to_ldisc(struct work_struct *work)
426 break; 462 break;
427 } 463 }
428 464
429 mutex_unlock(&buf->flush_mutex); 465 mutex_unlock(&buf->lock);
430 466
431 tty_ldisc_deref(disc); 467 tty_ldisc_deref(disc);
432} 468}
@@ -482,13 +518,12 @@ void tty_buffer_init(struct tty_port *port)
482{ 518{
483 struct tty_bufhead *buf = &port->buf; 519 struct tty_bufhead *buf = &port->buf;
484 520
485 mutex_init(&buf->flush_mutex); 521 mutex_init(&buf->lock);
486 tty_buffer_reset(&buf->sentinel, 0); 522 tty_buffer_reset(&buf->sentinel, 0);
487 buf->head = &buf->sentinel; 523 buf->head = &buf->sentinel;
488 buf->tail = &buf->sentinel; 524 buf->tail = &buf->sentinel;
489 init_llist_head(&buf->free); 525 init_llist_head(&buf->free);
490 atomic_set(&buf->memory_used, 0); 526 atomic_set(&buf->memory_used, 0);
491 buf->flushpending = 0; 527 atomic_set(&buf->priority, 0);
492 INIT_WORK(&buf->work, flush_to_ldisc); 528 INIT_WORK(&buf->work, flush_to_ldisc);
493} 529}
494