aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-03-22 19:17:32 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-03-22 19:17:32 -0400
commitf23eb2b2b28547fc70df82dd5049eb39bec5ba12 (patch)
tree144dce462b34d8a232a06f766786ebfb0235fa87
parentf741a79e982cf56d7584435bad663553ffe6715f (diff)
tty: stop using "delayed_work" in the tty layer
Using delayed-work for tty flip buffers ends up causing us to wait for the next tick to complete some actions. That's usually not all that noticeable, but for certain latency-critical workloads it ends up being totally unacceptable. As an extreme case of this, passing a token back-and-forth over a pty will take two ticks per iteration, so even just a thousand iterations will take 8 seconds assuming a common 250Hz configuration. Avoiding the whole delayed work issue brings that ping-pong test-case down to 0.009s on my machine. In more practical terms, this latency has been a performance problem for things like dive computer simulators (simulating the serial interface using the ptys) and for other environments (Alan mentions a CP/M emulator). Reported-by: Jef Driesen <jefdriesen@telenet.be> Acked-by: Greg KH <gregkh@suse.de> Acked-by: Alan Cox <alan@lxorguk.ukuu.org.uk> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--drivers/tty/tty_buffer.c14
-rw-r--r--drivers/tty/tty_ldisc.c14
-rw-r--r--include/linux/kbd_kern.h2
-rw-r--r--include/linux/tty.h2
4 files changed, 16 insertions, 16 deletions
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index d8210ca00720..b9451219528b 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -322,7 +322,7 @@ void tty_schedule_flip(struct tty_struct *tty)
322 if (tty->buf.tail != NULL) 322 if (tty->buf.tail != NULL)
323 tty->buf.tail->commit = tty->buf.tail->used; 323 tty->buf.tail->commit = tty->buf.tail->used;
324 spin_unlock_irqrestore(&tty->buf.lock, flags); 324 spin_unlock_irqrestore(&tty->buf.lock, flags);
325 schedule_delayed_work(&tty->buf.work, 1); 325 schedule_work(&tty->buf.work);
326} 326}
327EXPORT_SYMBOL(tty_schedule_flip); 327EXPORT_SYMBOL(tty_schedule_flip);
328 328
@@ -402,7 +402,7 @@ EXPORT_SYMBOL_GPL(tty_prepare_flip_string_flags);
402static void flush_to_ldisc(struct work_struct *work) 402static void flush_to_ldisc(struct work_struct *work)
403{ 403{
404 struct tty_struct *tty = 404 struct tty_struct *tty =
405 container_of(work, struct tty_struct, buf.work.work); 405 container_of(work, struct tty_struct, buf.work);
406 unsigned long flags; 406 unsigned long flags;
407 struct tty_ldisc *disc; 407 struct tty_ldisc *disc;
408 408
@@ -443,7 +443,7 @@ static void flush_to_ldisc(struct work_struct *work)
443 if (test_bit(TTY_FLUSHPENDING, &tty->flags)) 443 if (test_bit(TTY_FLUSHPENDING, &tty->flags))
444 break; 444 break;
445 if (!tty->receive_room || seen_tail) { 445 if (!tty->receive_room || seen_tail) {
446 schedule_delayed_work(&tty->buf.work, 1); 446 schedule_work(&tty->buf.work);
447 break; 447 break;
448 } 448 }
449 if (count > tty->receive_room) 449 if (count > tty->receive_room)
@@ -481,7 +481,7 @@ static void flush_to_ldisc(struct work_struct *work)
481 */ 481 */
482void tty_flush_to_ldisc(struct tty_struct *tty) 482void tty_flush_to_ldisc(struct tty_struct *tty)
483{ 483{
484 flush_delayed_work(&tty->buf.work); 484 flush_work(&tty->buf.work);
485} 485}
486 486
487/** 487/**
@@ -506,9 +506,9 @@ void tty_flip_buffer_push(struct tty_struct *tty)
506 spin_unlock_irqrestore(&tty->buf.lock, flags); 506 spin_unlock_irqrestore(&tty->buf.lock, flags);
507 507
508 if (tty->low_latency) 508 if (tty->low_latency)
509 flush_to_ldisc(&tty->buf.work.work); 509 flush_to_ldisc(&tty->buf.work);
510 else 510 else
511 schedule_delayed_work(&tty->buf.work, 1); 511 schedule_work(&tty->buf.work);
512} 512}
513EXPORT_SYMBOL(tty_flip_buffer_push); 513EXPORT_SYMBOL(tty_flip_buffer_push);
514 514
@@ -529,6 +529,6 @@ void tty_buffer_init(struct tty_struct *tty)
529 tty->buf.tail = NULL; 529 tty->buf.tail = NULL;
530 tty->buf.free = NULL; 530 tty->buf.free = NULL;
531 tty->buf.memory_used = 0; 531 tty->buf.memory_used = 0;
532 INIT_DELAYED_WORK(&tty->buf.work, flush_to_ldisc); 532 INIT_WORK(&tty->buf.work, flush_to_ldisc);
533} 533}
534 534
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 0fc564a97706..e19e13647116 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -529,7 +529,7 @@ static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
529static int tty_ldisc_halt(struct tty_struct *tty) 529static int tty_ldisc_halt(struct tty_struct *tty)
530{ 530{
531 clear_bit(TTY_LDISC, &tty->flags); 531 clear_bit(TTY_LDISC, &tty->flags);
532 return cancel_delayed_work_sync(&tty->buf.work); 532 return cancel_work_sync(&tty->buf.work);
533} 533}
534 534
535/** 535/**
@@ -542,7 +542,7 @@ static void tty_ldisc_flush_works(struct tty_struct *tty)
542{ 542{
543 flush_work_sync(&tty->hangup_work); 543 flush_work_sync(&tty->hangup_work);
544 flush_work_sync(&tty->SAK_work); 544 flush_work_sync(&tty->SAK_work);
545 flush_delayed_work_sync(&tty->buf.work); 545 flush_work_sync(&tty->buf.work);
546} 546}
547 547
548/** 548/**
@@ -722,9 +722,9 @@ enable:
722 /* Restart the work queue in case no characters kick it off. Safe if 722 /* Restart the work queue in case no characters kick it off. Safe if
723 already running */ 723 already running */
724 if (work) 724 if (work)
725 schedule_delayed_work(&tty->buf.work, 1); 725 schedule_work(&tty->buf.work);
726 if (o_work) 726 if (o_work)
727 schedule_delayed_work(&o_tty->buf.work, 1); 727 schedule_work(&o_tty->buf.work);
728 mutex_unlock(&tty->ldisc_mutex); 728 mutex_unlock(&tty->ldisc_mutex);
729 tty_unlock(); 729 tty_unlock();
730 return retval; 730 return retval;
@@ -830,12 +830,12 @@ void tty_ldisc_hangup(struct tty_struct *tty)
830 830
831 /* 831 /*
832 * this is like tty_ldisc_halt, but we need to give up 832 * this is like tty_ldisc_halt, but we need to give up
833 * the BTM before calling cancel_delayed_work_sync, 833 * the BTM before calling cancel_work_sync, which may
834 * which may need to wait for another function taking the BTM 834 * need to wait for another function taking the BTM
835 */ 835 */
836 clear_bit(TTY_LDISC, &tty->flags); 836 clear_bit(TTY_LDISC, &tty->flags);
837 tty_unlock(); 837 tty_unlock();
838 cancel_delayed_work_sync(&tty->buf.work); 838 cancel_work_sync(&tty->buf.work);
839 mutex_unlock(&tty->ldisc_mutex); 839 mutex_unlock(&tty->ldisc_mutex);
840 840
841 tty_lock(); 841 tty_lock();
diff --git a/include/linux/kbd_kern.h b/include/linux/kbd_kern.h
index 4b0761cc7dd9..ec2d17bc1f1e 100644
--- a/include/linux/kbd_kern.h
+++ b/include/linux/kbd_kern.h
@@ -159,7 +159,7 @@ static inline void con_schedule_flip(struct tty_struct *t)
159 if (t->buf.tail != NULL) 159 if (t->buf.tail != NULL)
160 t->buf.tail->commit = t->buf.tail->used; 160 t->buf.tail->commit = t->buf.tail->used;
161 spin_unlock_irqrestore(&t->buf.lock, flags); 161 spin_unlock_irqrestore(&t->buf.lock, flags);
162 schedule_delayed_work(&t->buf.work, 0); 162 schedule_work(&t->buf.work);
163} 163}
164 164
165#endif 165#endif
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 4e53d4641b38..9f469c700550 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -82,7 +82,7 @@ struct tty_buffer {
82 82
83 83
84struct tty_bufhead { 84struct tty_bufhead {
85 struct delayed_work work; 85 struct work_struct work;
86 spinlock_t lock; 86 spinlock_t lock;
87 struct tty_buffer *head; /* Queue head */ 87 struct tty_buffer *head; /* Queue head */
88 struct tty_buffer *tail; /* Active buffer */ 88 struct tty_buffer *tail; /* Active buffer */