diff options
author | Alan Cox <alan@redhat.com> | 2008-10-13 05:36:58 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-10-13 12:51:40 -0400 |
commit | e04957365b21066285557e42ffe16d8330d46c02 (patch) | |
tree | c359a59a0ac283d051736534d835df2cc0f8e17b /drivers/char/tty_io.c | |
parent | c564b6fda961bd999aac0b709b79288dd8f426cd (diff) |
tty: split the buffering from tty_io
The two are basically independent chunks of code so lets split them up for
readability and sanity. It also makes the API boundaries much clearer.
Signed-off-by: Alan Cox <alan@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/char/tty_io.c')
-rw-r--r-- | drivers/char/tty_io.c | 502 |
1 files changed, 0 insertions, 502 deletions
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index 2f05728920e7..3a726936aa5b 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c | |||
@@ -176,8 +176,6 @@ static struct tty_struct *alloc_tty_struct(void) | |||
176 | return kzalloc(sizeof(struct tty_struct), GFP_KERNEL); | 176 | return kzalloc(sizeof(struct tty_struct), GFP_KERNEL); |
177 | } | 177 | } |
178 | 178 | ||
179 | static void tty_buffer_free_all(struct tty_struct *); | ||
180 | |||
181 | /** | 179 | /** |
182 | * free_tty_struct - free a disused tty | 180 | * free_tty_struct - free a disused tty |
183 | * @tty: tty struct to free | 181 | * @tty: tty struct to free |
@@ -263,398 +261,6 @@ static int check_tty_count(struct tty_struct *tty, const char *routine) | |||
263 | return 0; | 261 | return 0; |
264 | } | 262 | } |
265 | 263 | ||
266 | /* | ||
267 | * Tty buffer allocation management | ||
268 | */ | ||
269 | |||
270 | /** | ||
271 | * tty_buffer_free_all - free buffers used by a tty | ||
272 | * @tty: tty to free from | ||
273 | * | ||
274 | * Remove all the buffers pending on a tty whether queued with data | ||
275 | * or in the free ring. Must be called when the tty is no longer in use | ||
276 | * | ||
277 | * Locking: none | ||
278 | */ | ||
279 | |||
280 | static void tty_buffer_free_all(struct tty_struct *tty) | ||
281 | { | ||
282 | struct tty_buffer *thead; | ||
283 | while ((thead = tty->buf.head) != NULL) { | ||
284 | tty->buf.head = thead->next; | ||
285 | kfree(thead); | ||
286 | } | ||
287 | while ((thead = tty->buf.free) != NULL) { | ||
288 | tty->buf.free = thead->next; | ||
289 | kfree(thead); | ||
290 | } | ||
291 | tty->buf.tail = NULL; | ||
292 | tty->buf.memory_used = 0; | ||
293 | } | ||
294 | |||
295 | /** | ||
296 | * tty_buffer_init - prepare a tty buffer structure | ||
297 | * @tty: tty to initialise | ||
298 | * | ||
299 | * Set up the initial state of the buffer management for a tty device. | ||
300 | * Must be called before the other tty buffer functions are used. | ||
301 | * | ||
302 | * Locking: none | ||
303 | */ | ||
304 | |||
305 | static void tty_buffer_init(struct tty_struct *tty) | ||
306 | { | ||
307 | spin_lock_init(&tty->buf.lock); | ||
308 | tty->buf.head = NULL; | ||
309 | tty->buf.tail = NULL; | ||
310 | tty->buf.free = NULL; | ||
311 | tty->buf.memory_used = 0; | ||
312 | } | ||
313 | |||
314 | /** | ||
315 | * tty_buffer_alloc - allocate a tty buffer | ||
316 | * @tty: tty device | ||
317 | * @size: desired size (characters) | ||
318 | * | ||
319 | * Allocate a new tty buffer to hold the desired number of characters. | ||
320 | * Return NULL if out of memory or the allocation would exceed the | ||
321 | * per device queue | ||
322 | * | ||
323 | * Locking: Caller must hold tty->buf.lock | ||
324 | */ | ||
325 | |||
326 | static struct tty_buffer *tty_buffer_alloc(struct tty_struct *tty, size_t size) | ||
327 | { | ||
328 | struct tty_buffer *p; | ||
329 | |||
330 | if (tty->buf.memory_used + size > 65536) | ||
331 | return NULL; | ||
332 | p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC); | ||
333 | if (p == NULL) | ||
334 | return NULL; | ||
335 | p->used = 0; | ||
336 | p->size = size; | ||
337 | p->next = NULL; | ||
338 | p->commit = 0; | ||
339 | p->read = 0; | ||
340 | p->char_buf_ptr = (char *)(p->data); | ||
341 | p->flag_buf_ptr = (unsigned char *)p->char_buf_ptr + size; | ||
342 | tty->buf.memory_used += size; | ||
343 | return p; | ||
344 | } | ||
345 | |||
346 | /** | ||
347 | * tty_buffer_free - free a tty buffer | ||
348 | * @tty: tty owning the buffer | ||
349 | * @b: the buffer to free | ||
350 | * | ||
351 | * Free a tty buffer, or add it to the free list according to our | ||
352 | * internal strategy | ||
353 | * | ||
354 | * Locking: Caller must hold tty->buf.lock | ||
355 | */ | ||
356 | |||
357 | static void tty_buffer_free(struct tty_struct *tty, struct tty_buffer *b) | ||
358 | { | ||
359 | /* Dumb strategy for now - should keep some stats */ | ||
360 | tty->buf.memory_used -= b->size; | ||
361 | WARN_ON(tty->buf.memory_used < 0); | ||
362 | |||
363 | if (b->size >= 512) | ||
364 | kfree(b); | ||
365 | else { | ||
366 | b->next = tty->buf.free; | ||
367 | tty->buf.free = b; | ||
368 | } | ||
369 | } | ||
370 | |||
371 | /** | ||
372 | * __tty_buffer_flush - flush full tty buffers | ||
373 | * @tty: tty to flush | ||
374 | * | ||
375 | * flush all the buffers containing receive data. Caller must | ||
376 | * hold the buffer lock and must have ensured no parallel flush to | ||
377 | * ldisc is running. | ||
378 | * | ||
379 | * Locking: Caller must hold tty->buf.lock | ||
380 | */ | ||
381 | |||
382 | static void __tty_buffer_flush(struct tty_struct *tty) | ||
383 | { | ||
384 | struct tty_buffer *thead; | ||
385 | |||
386 | while ((thead = tty->buf.head) != NULL) { | ||
387 | tty->buf.head = thead->next; | ||
388 | tty_buffer_free(tty, thead); | ||
389 | } | ||
390 | tty->buf.tail = NULL; | ||
391 | } | ||
392 | |||
393 | /** | ||
394 | * tty_buffer_flush - flush full tty buffers | ||
395 | * @tty: tty to flush | ||
396 | * | ||
397 | * flush all the buffers containing receive data. If the buffer is | ||
398 | * being processed by flush_to_ldisc then we defer the processing | ||
399 | * to that function | ||
400 | * | ||
401 | * Locking: none | ||
402 | */ | ||
403 | |||
404 | static void tty_buffer_flush(struct tty_struct *tty) | ||
405 | { | ||
406 | unsigned long flags; | ||
407 | spin_lock_irqsave(&tty->buf.lock, flags); | ||
408 | |||
409 | /* If the data is being pushed to the tty layer then we can't | ||
410 | process it here. Instead set a flag and the flush_to_ldisc | ||
411 | path will process the flush request before it exits */ | ||
412 | if (test_bit(TTY_FLUSHING, &tty->flags)) { | ||
413 | set_bit(TTY_FLUSHPENDING, &tty->flags); | ||
414 | spin_unlock_irqrestore(&tty->buf.lock, flags); | ||
415 | wait_event(tty->read_wait, | ||
416 | test_bit(TTY_FLUSHPENDING, &tty->flags) == 0); | ||
417 | return; | ||
418 | } else | ||
419 | __tty_buffer_flush(tty); | ||
420 | spin_unlock_irqrestore(&tty->buf.lock, flags); | ||
421 | } | ||
422 | |||
423 | /** | ||
424 | * tty_buffer_find - find a free tty buffer | ||
425 | * @tty: tty owning the buffer | ||
426 | * @size: characters wanted | ||
427 | * | ||
428 | * Locate an existing suitable tty buffer or if we are lacking one then | ||
429 | * allocate a new one. We round our buffers off in 256 character chunks | ||
430 | * to get better allocation behaviour. | ||
431 | * | ||
432 | * Locking: Caller must hold tty->buf.lock | ||
433 | */ | ||
434 | |||
435 | static struct tty_buffer *tty_buffer_find(struct tty_struct *tty, size_t size) | ||
436 | { | ||
437 | struct tty_buffer **tbh = &tty->buf.free; | ||
438 | while ((*tbh) != NULL) { | ||
439 | struct tty_buffer *t = *tbh; | ||
440 | if (t->size >= size) { | ||
441 | *tbh = t->next; | ||
442 | t->next = NULL; | ||
443 | t->used = 0; | ||
444 | t->commit = 0; | ||
445 | t->read = 0; | ||
446 | tty->buf.memory_used += t->size; | ||
447 | return t; | ||
448 | } | ||
449 | tbh = &((*tbh)->next); | ||
450 | } | ||
451 | /* Round the buffer size out */ | ||
452 | size = (size + 0xFF) & ~0xFF; | ||
453 | return tty_buffer_alloc(tty, size); | ||
454 | /* Should possibly check if this fails for the largest buffer we | ||
455 | have queued and recycle that ? */ | ||
456 | } | ||
457 | |||
458 | /** | ||
459 | * tty_buffer_request_room - grow tty buffer if needed | ||
460 | * @tty: tty structure | ||
461 | * @size: size desired | ||
462 | * | ||
463 | * Make at least size bytes of linear space available for the tty | ||
464 | * buffer. If we fail return the size we managed to find. | ||
465 | * | ||
466 | * Locking: Takes tty->buf.lock | ||
467 | */ | ||
468 | int tty_buffer_request_room(struct tty_struct *tty, size_t size) | ||
469 | { | ||
470 | struct tty_buffer *b, *n; | ||
471 | int left; | ||
472 | unsigned long flags; | ||
473 | |||
474 | spin_lock_irqsave(&tty->buf.lock, flags); | ||
475 | |||
476 | /* OPTIMISATION: We could keep a per tty "zero" sized buffer to | ||
477 | remove this conditional if its worth it. This would be invisible | ||
478 | to the callers */ | ||
479 | if ((b = tty->buf.tail) != NULL) | ||
480 | left = b->size - b->used; | ||
481 | else | ||
482 | left = 0; | ||
483 | |||
484 | if (left < size) { | ||
485 | /* This is the slow path - looking for new buffers to use */ | ||
486 | if ((n = tty_buffer_find(tty, size)) != NULL) { | ||
487 | if (b != NULL) { | ||
488 | b->next = n; | ||
489 | b->commit = b->used; | ||
490 | } else | ||
491 | tty->buf.head = n; | ||
492 | tty->buf.tail = n; | ||
493 | } else | ||
494 | size = left; | ||
495 | } | ||
496 | |||
497 | spin_unlock_irqrestore(&tty->buf.lock, flags); | ||
498 | return size; | ||
499 | } | ||
500 | EXPORT_SYMBOL_GPL(tty_buffer_request_room); | ||
501 | |||
502 | /** | ||
503 | * tty_insert_flip_string - Add characters to the tty buffer | ||
504 | * @tty: tty structure | ||
505 | * @chars: characters | ||
506 | * @size: size | ||
507 | * | ||
508 | * Queue a series of bytes to the tty buffering. All the characters | ||
509 | * passed are marked as without error. Returns the number added. | ||
510 | * | ||
511 | * Locking: Called functions may take tty->buf.lock | ||
512 | */ | ||
513 | |||
514 | int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars, | ||
515 | size_t size) | ||
516 | { | ||
517 | int copied = 0; | ||
518 | do { | ||
519 | int space = tty_buffer_request_room(tty, size - copied); | ||
520 | struct tty_buffer *tb = tty->buf.tail; | ||
521 | /* If there is no space then tb may be NULL */ | ||
522 | if (unlikely(space == 0)) | ||
523 | break; | ||
524 | memcpy(tb->char_buf_ptr + tb->used, chars, space); | ||
525 | memset(tb->flag_buf_ptr + tb->used, TTY_NORMAL, space); | ||
526 | tb->used += space; | ||
527 | copied += space; | ||
528 | chars += space; | ||
529 | /* There is a small chance that we need to split the data over | ||
530 | several buffers. If this is the case we must loop */ | ||
531 | } while (unlikely(size > copied)); | ||
532 | return copied; | ||
533 | } | ||
534 | EXPORT_SYMBOL(tty_insert_flip_string); | ||
535 | |||
536 | /** | ||
537 | * tty_insert_flip_string_flags - Add characters to the tty buffer | ||
538 | * @tty: tty structure | ||
539 | * @chars: characters | ||
540 | * @flags: flag bytes | ||
541 | * @size: size | ||
542 | * | ||
543 | * Queue a series of bytes to the tty buffering. For each character | ||
544 | * the flags array indicates the status of the character. Returns the | ||
545 | * number added. | ||
546 | * | ||
547 | * Locking: Called functions may take tty->buf.lock | ||
548 | */ | ||
549 | |||
550 | int tty_insert_flip_string_flags(struct tty_struct *tty, | ||
551 | const unsigned char *chars, const char *flags, size_t size) | ||
552 | { | ||
553 | int copied = 0; | ||
554 | do { | ||
555 | int space = tty_buffer_request_room(tty, size - copied); | ||
556 | struct tty_buffer *tb = tty->buf.tail; | ||
557 | /* If there is no space then tb may be NULL */ | ||
558 | if (unlikely(space == 0)) | ||
559 | break; | ||
560 | memcpy(tb->char_buf_ptr + tb->used, chars, space); | ||
561 | memcpy(tb->flag_buf_ptr + tb->used, flags, space); | ||
562 | tb->used += space; | ||
563 | copied += space; | ||
564 | chars += space; | ||
565 | flags += space; | ||
566 | /* There is a small chance that we need to split the data over | ||
567 | several buffers. If this is the case we must loop */ | ||
568 | } while (unlikely(size > copied)); | ||
569 | return copied; | ||
570 | } | ||
571 | EXPORT_SYMBOL(tty_insert_flip_string_flags); | ||
572 | |||
573 | /** | ||
574 | * tty_schedule_flip - push characters to ldisc | ||
575 | * @tty: tty to push from | ||
576 | * | ||
577 | * Takes any pending buffers and transfers their ownership to the | ||
578 | * ldisc side of the queue. It then schedules those characters for | ||
579 | * processing by the line discipline. | ||
580 | * | ||
581 | * Locking: Takes tty->buf.lock | ||
582 | */ | ||
583 | |||
584 | void tty_schedule_flip(struct tty_struct *tty) | ||
585 | { | ||
586 | unsigned long flags; | ||
587 | spin_lock_irqsave(&tty->buf.lock, flags); | ||
588 | if (tty->buf.tail != NULL) | ||
589 | tty->buf.tail->commit = tty->buf.tail->used; | ||
590 | spin_unlock_irqrestore(&tty->buf.lock, flags); | ||
591 | schedule_delayed_work(&tty->buf.work, 1); | ||
592 | } | ||
593 | EXPORT_SYMBOL(tty_schedule_flip); | ||
594 | |||
595 | /** | ||
596 | * tty_prepare_flip_string - make room for characters | ||
597 | * @tty: tty | ||
598 | * @chars: return pointer for character write area | ||
599 | * @size: desired size | ||
600 | * | ||
601 | * Prepare a block of space in the buffer for data. Returns the length | ||
602 | * available and buffer pointer to the space which is now allocated and | ||
603 | * accounted for as ready for normal characters. This is used for drivers | ||
604 | * that need their own block copy routines into the buffer. There is no | ||
605 | * guarantee the buffer is a DMA target! | ||
606 | * | ||
607 | * Locking: May call functions taking tty->buf.lock | ||
608 | */ | ||
609 | |||
610 | int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars, | ||
611 | size_t size) | ||
612 | { | ||
613 | int space = tty_buffer_request_room(tty, size); | ||
614 | if (likely(space)) { | ||
615 | struct tty_buffer *tb = tty->buf.tail; | ||
616 | *chars = tb->char_buf_ptr + tb->used; | ||
617 | memset(tb->flag_buf_ptr + tb->used, TTY_NORMAL, space); | ||
618 | tb->used += space; | ||
619 | } | ||
620 | return space; | ||
621 | } | ||
622 | |||
623 | EXPORT_SYMBOL_GPL(tty_prepare_flip_string); | ||
624 | |||
625 | /** | ||
626 | * tty_prepare_flip_string_flags - make room for characters | ||
627 | * @tty: tty | ||
628 | * @chars: return pointer for character write area | ||
629 | * @flags: return pointer for status flag write area | ||
630 | * @size: desired size | ||
631 | * | ||
632 | * Prepare a block of space in the buffer for data. Returns the length | ||
633 | * available and buffer pointer to the space which is now allocated and | ||
634 | * accounted for as ready for characters. This is used for drivers | ||
635 | * that need their own block copy routines into the buffer. There is no | ||
636 | * guarantee the buffer is a DMA target! | ||
637 | * | ||
638 | * Locking: May call functions taking tty->buf.lock | ||
639 | */ | ||
640 | |||
641 | int tty_prepare_flip_string_flags(struct tty_struct *tty, | ||
642 | unsigned char **chars, char **flags, size_t size) | ||
643 | { | ||
644 | int space = tty_buffer_request_room(tty, size); | ||
645 | if (likely(space)) { | ||
646 | struct tty_buffer *tb = tty->buf.tail; | ||
647 | *chars = tb->char_buf_ptr + tb->used; | ||
648 | *flags = tb->flag_buf_ptr + tb->used; | ||
649 | tb->used += space; | ||
650 | } | ||
651 | return space; | ||
652 | } | ||
653 | |||
654 | EXPORT_SYMBOL_GPL(tty_prepare_flip_string_flags); | ||
655 | |||
656 | |||
657 | |||
658 | /** | 264 | /** |
659 | * get_tty_driver - find device of a tty | 265 | * get_tty_driver - find device of a tty |
660 | * @dev_t: device identifier | 266 | * @dev_t: device identifier |
@@ -3216,113 +2822,6 @@ void do_SAK(struct tty_struct *tty) | |||
3216 | EXPORT_SYMBOL(do_SAK); | 2822 | EXPORT_SYMBOL(do_SAK); |
3217 | 2823 | ||
3218 | /** | 2824 | /** |
3219 | * flush_to_ldisc | ||
3220 | * @work: tty structure passed from work queue. | ||
3221 | * | ||
3222 | * This routine is called out of the software interrupt to flush data | ||
3223 | * from the buffer chain to the line discipline. | ||
3224 | * | ||
3225 | * Locking: holds tty->buf.lock to guard buffer list. Drops the lock | ||
3226 | * while invoking the line discipline receive_buf method. The | ||
3227 | * receive_buf method is single threaded for each tty instance. | ||
3228 | */ | ||
3229 | |||
3230 | static void flush_to_ldisc(struct work_struct *work) | ||
3231 | { | ||
3232 | struct tty_struct *tty = | ||
3233 | container_of(work, struct tty_struct, buf.work.work); | ||
3234 | unsigned long flags; | ||
3235 | struct tty_ldisc *disc; | ||
3236 | struct tty_buffer *tbuf, *head; | ||
3237 | char *char_buf; | ||
3238 | unsigned char *flag_buf; | ||
3239 | |||
3240 | disc = tty_ldisc_ref(tty); | ||
3241 | if (disc == NULL) /* !TTY_LDISC */ | ||
3242 | return; | ||
3243 | |||
3244 | spin_lock_irqsave(&tty->buf.lock, flags); | ||
3245 | /* So we know a flush is running */ | ||
3246 | set_bit(TTY_FLUSHING, &tty->flags); | ||
3247 | head = tty->buf.head; | ||
3248 | if (head != NULL) { | ||
3249 | tty->buf.head = NULL; | ||
3250 | for (;;) { | ||
3251 | int count = head->commit - head->read; | ||
3252 | if (!count) { | ||
3253 | if (head->next == NULL) | ||
3254 | break; | ||
3255 | tbuf = head; | ||
3256 | head = head->next; | ||
3257 | tty_buffer_free(tty, tbuf); | ||
3258 | continue; | ||
3259 | } | ||
3260 | /* Ldisc or user is trying to flush the buffers | ||
3261 | we are feeding to the ldisc, stop feeding the | ||
3262 | line discipline as we want to empty the queue */ | ||
3263 | if (test_bit(TTY_FLUSHPENDING, &tty->flags)) | ||
3264 | break; | ||
3265 | if (!tty->receive_room) { | ||
3266 | schedule_delayed_work(&tty->buf.work, 1); | ||
3267 | break; | ||
3268 | } | ||
3269 | if (count > tty->receive_room) | ||
3270 | count = tty->receive_room; | ||
3271 | char_buf = head->char_buf_ptr + head->read; | ||
3272 | flag_buf = head->flag_buf_ptr + head->read; | ||
3273 | head->read += count; | ||
3274 | spin_unlock_irqrestore(&tty->buf.lock, flags); | ||
3275 | disc->ops->receive_buf(tty, char_buf, | ||
3276 | flag_buf, count); | ||
3277 | spin_lock_irqsave(&tty->buf.lock, flags); | ||
3278 | } | ||
3279 | /* Restore the queue head */ | ||
3280 | tty->buf.head = head; | ||
3281 | } | ||
3282 | /* We may have a deferred request to flush the input buffer, | ||
3283 | if so pull the chain under the lock and empty the queue */ | ||
3284 | if (test_bit(TTY_FLUSHPENDING, &tty->flags)) { | ||
3285 | __tty_buffer_flush(tty); | ||
3286 | clear_bit(TTY_FLUSHPENDING, &tty->flags); | ||
3287 | wake_up(&tty->read_wait); | ||
3288 | } | ||
3289 | clear_bit(TTY_FLUSHING, &tty->flags); | ||
3290 | spin_unlock_irqrestore(&tty->buf.lock, flags); | ||
3291 | |||
3292 | tty_ldisc_deref(disc); | ||
3293 | } | ||
3294 | |||
3295 | /** | ||
3296 | * tty_flip_buffer_push - terminal | ||
3297 | * @tty: tty to push | ||
3298 | * | ||
3299 | * Queue a push of the terminal flip buffers to the line discipline. This | ||
3300 | * function must not be called from IRQ context if tty->low_latency is set. | ||
3301 | * | ||
3302 | * In the event of the queue being busy for flipping the work will be | ||
3303 | * held off and retried later. | ||
3304 | * | ||
3305 | * Locking: tty buffer lock. Driver locks in low latency mode. | ||
3306 | */ | ||
3307 | |||
3308 | void tty_flip_buffer_push(struct tty_struct *tty) | ||
3309 | { | ||
3310 | unsigned long flags; | ||
3311 | spin_lock_irqsave(&tty->buf.lock, flags); | ||
3312 | if (tty->buf.tail != NULL) | ||
3313 | tty->buf.tail->commit = tty->buf.tail->used; | ||
3314 | spin_unlock_irqrestore(&tty->buf.lock, flags); | ||
3315 | |||
3316 | if (tty->low_latency) | ||
3317 | flush_to_ldisc(&tty->buf.work.work); | ||
3318 | else | ||
3319 | schedule_delayed_work(&tty->buf.work, 1); | ||
3320 | } | ||
3321 | |||
3322 | EXPORT_SYMBOL(tty_flip_buffer_push); | ||
3323 | |||
3324 | |||
3325 | /** | ||
3326 | * initialize_tty_struct | 2825 | * initialize_tty_struct |
3327 | * @tty: tty to initialize | 2826 | * @tty: tty to initialize |
3328 | * | 2827 | * |
@@ -3342,7 +2841,6 @@ static void initialize_tty_struct(struct tty_struct *tty) | |||
3342 | tty->overrun_time = jiffies; | 2841 | tty->overrun_time = jiffies; |
3343 | tty->buf.head = tty->buf.tail = NULL; | 2842 | tty->buf.head = tty->buf.tail = NULL; |
3344 | tty_buffer_init(tty); | 2843 | tty_buffer_init(tty); |
3345 | INIT_DELAYED_WORK(&tty->buf.work, flush_to_ldisc); | ||
3346 | mutex_init(&tty->termios_mutex); | 2844 | mutex_init(&tty->termios_mutex); |
3347 | init_waitqueue_head(&tty->write_wait); | 2845 | init_waitqueue_head(&tty->write_wait); |
3348 | init_waitqueue_head(&tty->read_wait); | 2846 | init_waitqueue_head(&tty->read_wait); |