aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/char/sclp_vt220.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/char/sclp_vt220.c')
-rw-r--r--drivers/s390/char/sclp_vt220.c118
1 files changed, 79 insertions, 39 deletions
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index a839aa531d7c..5518e24946aa 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -1,10 +1,9 @@
1/* 1/*
2 * drivers/s390/char/sclp_vt220.c 2 * SCLP VT220 terminal driver.
3 * SCLP VT220 terminal driver.
4 * 3 *
5 * S390 version 4 * Copyright IBM Corp. 2003, 2009
6 * Copyright IBM Corp. 2003,2008 5 *
7 * Author(s): Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com> 6 * Author(s): Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
8 */ 7 */
9 8
10#include <linux/module.h> 9#include <linux/module.h>
@@ -69,8 +68,11 @@ static struct list_head sclp_vt220_empty;
69/* List of pending requests */ 68/* List of pending requests */
70static struct list_head sclp_vt220_outqueue; 69static struct list_head sclp_vt220_outqueue;
71 70
72/* Number of requests in outqueue */ 71/* Suspend mode flag */
73static int sclp_vt220_outqueue_count; 72static int sclp_vt220_suspended;
73
74/* Flag that output queue is currently running */
75static int sclp_vt220_queue_running;
74 76
75/* Timer used for delaying write requests to merge subsequent messages into 77/* Timer used for delaying write requests to merge subsequent messages into
76 * a single buffer */ 78 * a single buffer */
@@ -92,6 +94,8 @@ static int __initdata sclp_vt220_init_count;
92static int sclp_vt220_flush_later; 94static int sclp_vt220_flush_later;
93 95
94static void sclp_vt220_receiver_fn(struct evbuf_header *evbuf); 96static void sclp_vt220_receiver_fn(struct evbuf_header *evbuf);
97static void sclp_vt220_pm_event_fn(struct sclp_register *reg,
98 enum sclp_pm_event sclp_pm_event);
95static int __sclp_vt220_emit(struct sclp_vt220_request *request); 99static int __sclp_vt220_emit(struct sclp_vt220_request *request);
96static void sclp_vt220_emit_current(void); 100static void sclp_vt220_emit_current(void);
97 101
@@ -100,7 +104,8 @@ static struct sclp_register sclp_vt220_register = {
100 .send_mask = EVTYP_VT220MSG_MASK, 104 .send_mask = EVTYP_VT220MSG_MASK,
101 .receive_mask = EVTYP_VT220MSG_MASK, 105 .receive_mask = EVTYP_VT220MSG_MASK,
102 .state_change_fn = NULL, 106 .state_change_fn = NULL,
103 .receiver_fn = sclp_vt220_receiver_fn 107 .receiver_fn = sclp_vt220_receiver_fn,
108 .pm_event_fn = sclp_vt220_pm_event_fn,
104}; 109};
105 110
106 111
@@ -120,15 +125,19 @@ sclp_vt220_process_queue(struct sclp_vt220_request *request)
120 spin_lock_irqsave(&sclp_vt220_lock, flags); 125 spin_lock_irqsave(&sclp_vt220_lock, flags);
121 /* Move request from outqueue to empty queue */ 126 /* Move request from outqueue to empty queue */
122 list_del(&request->list); 127 list_del(&request->list);
123 sclp_vt220_outqueue_count--;
124 list_add_tail((struct list_head *) page, &sclp_vt220_empty); 128 list_add_tail((struct list_head *) page, &sclp_vt220_empty);
125 /* Check if there is a pending buffer on the out queue. */ 129 /* Check if there is a pending buffer on the out queue. */
126 request = NULL; 130 request = NULL;
127 if (!list_empty(&sclp_vt220_outqueue)) 131 if (!list_empty(&sclp_vt220_outqueue))
128 request = list_entry(sclp_vt220_outqueue.next, 132 request = list_entry(sclp_vt220_outqueue.next,
129 struct sclp_vt220_request, list); 133 struct sclp_vt220_request, list);
134 if (!request || sclp_vt220_suspended) {
135 sclp_vt220_queue_running = 0;
136 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
137 break;
138 }
130 spin_unlock_irqrestore(&sclp_vt220_lock, flags); 139 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
131 } while (request && __sclp_vt220_emit(request)); 140 } while (__sclp_vt220_emit(request));
132 if (request == NULL && sclp_vt220_flush_later) 141 if (request == NULL && sclp_vt220_flush_later)
133 sclp_vt220_emit_current(); 142 sclp_vt220_emit_current();
134 /* Check if the tty needs a wake up call */ 143 /* Check if the tty needs a wake up call */
@@ -212,26 +221,7 @@ __sclp_vt220_emit(struct sclp_vt220_request *request)
212} 221}
213 222
214/* 223/*
215 * Queue and emit given request. 224 * Queue and emit current request.
216 */
217static void
218sclp_vt220_emit(struct sclp_vt220_request *request)
219{
220 unsigned long flags;
221 int count;
222
223 spin_lock_irqsave(&sclp_vt220_lock, flags);
224 list_add_tail(&request->list, &sclp_vt220_outqueue);
225 count = sclp_vt220_outqueue_count++;
226 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
227 /* Emit only the first buffer immediately - callback takes care of
228 * the rest */
229 if (count == 0 && __sclp_vt220_emit(request))
230 sclp_vt220_process_queue(request);
231}
232
233/*
234 * Queue and emit current request. Return zero on success, non-zero otherwise.
235 */ 225 */
236static void 226static void
237sclp_vt220_emit_current(void) 227sclp_vt220_emit_current(void)
@@ -241,22 +231,33 @@ sclp_vt220_emit_current(void)
241 struct sclp_vt220_sccb *sccb; 231 struct sclp_vt220_sccb *sccb;
242 232
243 spin_lock_irqsave(&sclp_vt220_lock, flags); 233 spin_lock_irqsave(&sclp_vt220_lock, flags);
244 request = NULL; 234 if (sclp_vt220_current_request) {
245 if (sclp_vt220_current_request != NULL) {
246 sccb = (struct sclp_vt220_sccb *) 235 sccb = (struct sclp_vt220_sccb *)
247 sclp_vt220_current_request->sclp_req.sccb; 236 sclp_vt220_current_request->sclp_req.sccb;
248 /* Only emit buffers with content */ 237 /* Only emit buffers with content */
249 if (sccb->header.length != sizeof(struct sclp_vt220_sccb)) { 238 if (sccb->header.length != sizeof(struct sclp_vt220_sccb)) {
250 request = sclp_vt220_current_request; 239 list_add_tail(&sclp_vt220_current_request->list,
240 &sclp_vt220_outqueue);
251 sclp_vt220_current_request = NULL; 241 sclp_vt220_current_request = NULL;
252 if (timer_pending(&sclp_vt220_timer)) 242 if (timer_pending(&sclp_vt220_timer))
253 del_timer(&sclp_vt220_timer); 243 del_timer(&sclp_vt220_timer);
254 } 244 }
255 sclp_vt220_flush_later = 0; 245 sclp_vt220_flush_later = 0;
256 } 246 }
247 if (sclp_vt220_queue_running || sclp_vt220_suspended)
248 goto out_unlock;
249 if (list_empty(&sclp_vt220_outqueue))
250 goto out_unlock;
251 request = list_first_entry(&sclp_vt220_outqueue,
252 struct sclp_vt220_request, list);
253 sclp_vt220_queue_running = 1;
254 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
255
256 if (__sclp_vt220_emit(request))
257 sclp_vt220_process_queue(request);
258 return;
259out_unlock:
257 spin_unlock_irqrestore(&sclp_vt220_lock, flags); 260 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
258 if (request != NULL)
259 sclp_vt220_emit(request);
260} 261}
261 262
262#define SCLP_NORMAL_WRITE 0x00 263#define SCLP_NORMAL_WRITE 0x00
@@ -396,7 +397,7 @@ __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
396 if (sclp_vt220_current_request == NULL) { 397 if (sclp_vt220_current_request == NULL) {
397 while (list_empty(&sclp_vt220_empty)) { 398 while (list_empty(&sclp_vt220_empty)) {
398 spin_unlock_irqrestore(&sclp_vt220_lock, flags); 399 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
399 if (may_fail) 400 if (may_fail || sclp_vt220_suspended)
400 goto out; 401 goto out;
401 else 402 else
402 sclp_sync_wait(); 403 sclp_sync_wait();
@@ -531,7 +532,7 @@ sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch)
531static void 532static void
532sclp_vt220_flush_chars(struct tty_struct *tty) 533sclp_vt220_flush_chars(struct tty_struct *tty)
533{ 534{
534 if (sclp_vt220_outqueue_count == 0) 535 if (!sclp_vt220_queue_running)
535 sclp_vt220_emit_current(); 536 sclp_vt220_emit_current();
536 else 537 else
537 sclp_vt220_flush_later = 1; 538 sclp_vt220_flush_later = 1;
@@ -635,7 +636,6 @@ static int __init __sclp_vt220_init(int num_pages)
635 init_timer(&sclp_vt220_timer); 636 init_timer(&sclp_vt220_timer);
636 sclp_vt220_current_request = NULL; 637 sclp_vt220_current_request = NULL;
637 sclp_vt220_buffered_chars = 0; 638 sclp_vt220_buffered_chars = 0;
638 sclp_vt220_outqueue_count = 0;
639 sclp_vt220_tty = NULL; 639 sclp_vt220_tty = NULL;
640 sclp_vt220_flush_later = 0; 640 sclp_vt220_flush_later = 0;
641 641
@@ -736,7 +736,7 @@ static void __sclp_vt220_flush_buffer(void)
736 spin_lock_irqsave(&sclp_vt220_lock, flags); 736 spin_lock_irqsave(&sclp_vt220_lock, flags);
737 if (timer_pending(&sclp_vt220_timer)) 737 if (timer_pending(&sclp_vt220_timer))
738 del_timer(&sclp_vt220_timer); 738 del_timer(&sclp_vt220_timer);
739 while (sclp_vt220_outqueue_count > 0) { 739 while (sclp_vt220_queue_running) {
740 spin_unlock_irqrestore(&sclp_vt220_lock, flags); 740 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
741 sclp_sync_wait(); 741 sclp_sync_wait();
742 spin_lock_irqsave(&sclp_vt220_lock, flags); 742 spin_lock_irqsave(&sclp_vt220_lock, flags);
@@ -744,6 +744,46 @@ static void __sclp_vt220_flush_buffer(void)
744 spin_unlock_irqrestore(&sclp_vt220_lock, flags); 744 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
745} 745}
746 746
747/*
748 * Resume console: If there are cached messages, emit them.
749 */
750static void sclp_vt220_resume(void)
751{
752 unsigned long flags;
753
754 spin_lock_irqsave(&sclp_vt220_lock, flags);
755 sclp_vt220_suspended = 0;
756 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
757 sclp_vt220_emit_current();
758}
759
760/*
761 * Suspend console: Set suspend flag and flush console
762 */
763static void sclp_vt220_suspend(void)
764{
765 unsigned long flags;
766
767 spin_lock_irqsave(&sclp_vt220_lock, flags);
768 sclp_vt220_suspended = 1;
769 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
770 __sclp_vt220_flush_buffer();
771}
772
773static void sclp_vt220_pm_event_fn(struct sclp_register *reg,
774 enum sclp_pm_event sclp_pm_event)
775{
776 switch (sclp_pm_event) {
777 case SCLP_PM_EVENT_FREEZE:
778 sclp_vt220_suspend();
779 break;
780 case SCLP_PM_EVENT_RESTORE:
781 case SCLP_PM_EVENT_THAW:
782 sclp_vt220_resume();
783 break;
784 }
785}
786
747static int 787static int
748sclp_vt220_notify(struct notifier_block *self, 788sclp_vt220_notify(struct notifier_block *self,
749 unsigned long event, void *data) 789 unsigned long event, void *data)