diff options
Diffstat (limited to 'drivers/s390/char')
-rw-r--r-- | drivers/s390/char/sclp.c | 70 |
1 files changed, 49 insertions, 21 deletions
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c index 8a056df09d6b..3457a9a31571 100644 --- a/drivers/s390/char/sclp.c +++ b/drivers/s390/char/sclp.c | |||
@@ -59,7 +59,8 @@ static volatile enum sclp_init_state_t { | |||
59 | /* Internal state: is a request active at the sclp? */ | 59 | /* Internal state: is a request active at the sclp? */ |
60 | static volatile enum sclp_running_state_t { | 60 | static volatile enum sclp_running_state_t { |
61 | sclp_running_state_idle, | 61 | sclp_running_state_idle, |
62 | sclp_running_state_running | 62 | sclp_running_state_running, |
63 | sclp_running_state_reset_pending | ||
63 | } sclp_running_state = sclp_running_state_idle; | 64 | } sclp_running_state = sclp_running_state_idle; |
64 | 65 | ||
65 | /* Internal state: is a read request pending? */ | 66 | /* Internal state: is a read request pending? */ |
@@ -88,7 +89,7 @@ static volatile enum sclp_mask_state_t { | |||
88 | 89 | ||
89 | /* Timeout intervals in seconds.*/ | 90 | /* Timeout intervals in seconds.*/ |
90 | #define SCLP_BUSY_INTERVAL 10 | 91 | #define SCLP_BUSY_INTERVAL 10 |
91 | #define SCLP_RETRY_INTERVAL 15 | 92 | #define SCLP_RETRY_INTERVAL 30 |
92 | 93 | ||
93 | static void sclp_process_queue(void); | 94 | static void sclp_process_queue(void); |
94 | static int sclp_init_mask(int calculate); | 95 | static int sclp_init_mask(int calculate); |
@@ -113,19 +114,17 @@ service_call(sclp_cmdw_t command, void *sccb) | |||
113 | return 0; | 114 | return 0; |
114 | } | 115 | } |
115 | 116 | ||
116 | /* Request timeout handler. Restart the request queue. If DATA is non-zero, | 117 | static inline void __sclp_make_read_req(void); |
117 | * force restart of running request. */ | 118 | |
118 | static void | 119 | static void |
119 | sclp_request_timeout(unsigned long data) | 120 | __sclp_queue_read_req(void) |
120 | { | 121 | { |
121 | unsigned long flags; | 122 | if (sclp_reading_state == sclp_reading_state_idle) { |
122 | 123 | sclp_reading_state = sclp_reading_state_reading; | |
123 | if (data) { | 124 | __sclp_make_read_req(); |
124 | spin_lock_irqsave(&sclp_lock, flags); | 125 | /* Add request to head of queue */ |
125 | sclp_running_state = sclp_running_state_idle; | 126 | list_add(&sclp_read_req.list, &sclp_req_queue); |
126 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
127 | } | 127 | } |
128 | sclp_process_queue(); | ||
129 | } | 128 | } |
130 | 129 | ||
131 | /* Set up request retry timer. Called while sclp_lock is locked. */ | 130 | /* Set up request retry timer. Called while sclp_lock is locked. */ |
@@ -140,6 +139,29 @@ __sclp_set_request_timer(unsigned long time, void (*function)(unsigned long), | |||
140 | add_timer(&sclp_request_timer); | 139 | add_timer(&sclp_request_timer); |
141 | } | 140 | } |
142 | 141 | ||
142 | /* Request timeout handler. Restart the request queue. If DATA is non-zero, | ||
143 | * force restart of running request. */ | ||
144 | static void | ||
145 | sclp_request_timeout(unsigned long data) | ||
146 | { | ||
147 | unsigned long flags; | ||
148 | |||
149 | spin_lock_irqsave(&sclp_lock, flags); | ||
150 | if (data) { | ||
151 | if (sclp_running_state == sclp_running_state_running) { | ||
152 | /* Break running state and queue NOP read event request | ||
153 | * to get a defined interface state. */ | ||
154 | __sclp_queue_read_req(); | ||
155 | sclp_running_state = sclp_running_state_idle; | ||
156 | } | ||
157 | } else { | ||
158 | __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ, | ||
159 | sclp_request_timeout, 0); | ||
160 | } | ||
161 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
162 | sclp_process_queue(); | ||
163 | } | ||
164 | |||
143 | /* Try to start a request. Return zero if the request was successfully | 165 | /* Try to start a request. Return zero if the request was successfully |
144 | * started or if it will be started at a later time. Return non-zero otherwise. | 166 | * started or if it will be started at a later time. Return non-zero otherwise. |
145 | * Called while sclp_lock is locked. */ | 167 | * Called while sclp_lock is locked. */ |
@@ -191,7 +213,15 @@ sclp_process_queue(void) | |||
191 | rc = __sclp_start_request(req); | 213 | rc = __sclp_start_request(req); |
192 | if (rc == 0) | 214 | if (rc == 0) |
193 | break; | 215 | break; |
194 | /* Request failed. */ | 216 | /* Request failed */ |
217 | if (req->start_count > 1) { | ||
218 | /* Cannot abort already submitted request - could still | ||
219 | * be active at the SCLP */ | ||
220 | __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ, | ||
221 | sclp_request_timeout, 0); | ||
222 | break; | ||
223 | } | ||
224 | /* Post-processing for aborted request */ | ||
195 | list_del(&req->list); | 225 | list_del(&req->list); |
196 | if (req->callback) { | 226 | if (req->callback) { |
197 | spin_unlock_irqrestore(&sclp_lock, flags); | 227 | spin_unlock_irqrestore(&sclp_lock, flags); |
@@ -221,7 +251,8 @@ sclp_add_request(struct sclp_req *req) | |||
221 | list_add_tail(&req->list, &sclp_req_queue); | 251 | list_add_tail(&req->list, &sclp_req_queue); |
222 | rc = 0; | 252 | rc = 0; |
223 | /* Start if request is first in list */ | 253 | /* Start if request is first in list */ |
224 | if (req->list.prev == &sclp_req_queue) { | 254 | if (sclp_running_state == sclp_running_state_idle && |
255 | req->list.prev == &sclp_req_queue) { | ||
225 | rc = __sclp_start_request(req); | 256 | rc = __sclp_start_request(req); |
226 | if (rc) | 257 | if (rc) |
227 | list_del(&req->list); | 258 | list_del(&req->list); |
@@ -334,6 +365,8 @@ sclp_interrupt_handler(__u16 code) | |||
334 | finished_sccb = S390_lowcore.ext_params & 0xfffffff8; | 365 | finished_sccb = S390_lowcore.ext_params & 0xfffffff8; |
335 | evbuf_pending = S390_lowcore.ext_params & 0x3; | 366 | evbuf_pending = S390_lowcore.ext_params & 0x3; |
336 | if (finished_sccb) { | 367 | if (finished_sccb) { |
368 | del_timer(&sclp_request_timer); | ||
369 | sclp_running_state = sclp_running_state_reset_pending; | ||
337 | req = __sclp_find_req(finished_sccb); | 370 | req = __sclp_find_req(finished_sccb); |
338 | if (req) { | 371 | if (req) { |
339 | /* Request post-processing */ | 372 | /* Request post-processing */ |
@@ -348,13 +381,8 @@ sclp_interrupt_handler(__u16 code) | |||
348 | sclp_running_state = sclp_running_state_idle; | 381 | sclp_running_state = sclp_running_state_idle; |
349 | } | 382 | } |
350 | if (evbuf_pending && sclp_receive_mask != 0 && | 383 | if (evbuf_pending && sclp_receive_mask != 0 && |
351 | sclp_reading_state == sclp_reading_state_idle && | 384 | sclp_activation_state == sclp_activation_state_active) |
352 | sclp_activation_state == sclp_activation_state_active ) { | 385 | __sclp_queue_read_req(); |
353 | sclp_reading_state = sclp_reading_state_reading; | ||
354 | __sclp_make_read_req(); | ||
355 | /* Add request to head of queue */ | ||
356 | list_add(&sclp_read_req.list, &sclp_req_queue); | ||
357 | } | ||
358 | spin_unlock(&sclp_lock); | 386 | spin_unlock(&sclp_lock); |
359 | sclp_process_queue(); | 387 | sclp_process_queue(); |
360 | } | 388 | } |