diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/s390/char/sclp.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'drivers/s390/char/sclp.c')
-rw-r--r-- | drivers/s390/char/sclp.c | 915 |
1 files changed, 915 insertions, 0 deletions
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c new file mode 100644 index 000000000000..ceb0e474fde4 --- /dev/null +++ b/drivers/s390/char/sclp.c | |||
@@ -0,0 +1,915 @@ | |||
1 | /* | ||
2 | * drivers/s390/char/sclp.c | ||
3 | * core function to access sclp interface | ||
4 | * | ||
5 | * S390 version | ||
6 | * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
7 | * Author(s): Martin Peschke <mpeschke@de.ibm.com> | ||
8 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | ||
9 | */ | ||
10 | |||
11 | #include <linux/module.h> | ||
12 | #include <linux/err.h> | ||
13 | #include <linux/spinlock.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/timer.h> | ||
16 | #include <linux/reboot.h> | ||
17 | #include <linux/jiffies.h> | ||
18 | #include <asm/types.h> | ||
19 | #include <asm/s390_ext.h> | ||
20 | |||
21 | #include "sclp.h" | ||
22 | |||
23 | #define SCLP_HEADER "sclp: " | ||
24 | |||
25 | /* Structure for register_early_external_interrupt. */ | ||
26 | static ext_int_info_t ext_int_info_hwc; | ||
27 | |||
28 | /* Lock to protect internal data consistency. */ | ||
29 | static DEFINE_SPINLOCK(sclp_lock); | ||
30 | |||
31 | /* Mask of events that we can receive from the sclp interface. */ | ||
32 | static sccb_mask_t sclp_receive_mask; | ||
33 | |||
34 | /* Mask of events that we can send to the sclp interface. */ | ||
35 | static sccb_mask_t sclp_send_mask; | ||
36 | |||
37 | /* List of registered event listeners and senders. */ | ||
38 | static struct list_head sclp_reg_list; | ||
39 | |||
40 | /* List of queued requests. */ | ||
41 | static struct list_head sclp_req_queue; | ||
42 | |||
43 | /* Data for read and and init requests. */ | ||
44 | static struct sclp_req sclp_read_req; | ||
45 | static struct sclp_req sclp_init_req; | ||
46 | static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); | ||
47 | static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); | ||
48 | |||
49 | /* Timer for request retries. */ | ||
50 | static struct timer_list sclp_request_timer; | ||
51 | |||
52 | /* Internal state: is the driver initialized? */ | ||
53 | static volatile enum sclp_init_state_t { | ||
54 | sclp_init_state_uninitialized, | ||
55 | sclp_init_state_initializing, | ||
56 | sclp_init_state_initialized | ||
57 | } sclp_init_state = sclp_init_state_uninitialized; | ||
58 | |||
59 | /* Internal state: is a request active at the sclp? */ | ||
60 | static volatile enum sclp_running_state_t { | ||
61 | sclp_running_state_idle, | ||
62 | sclp_running_state_running | ||
63 | } sclp_running_state = sclp_running_state_idle; | ||
64 | |||
65 | /* Internal state: is a read request pending? */ | ||
66 | static volatile enum sclp_reading_state_t { | ||
67 | sclp_reading_state_idle, | ||
68 | sclp_reading_state_reading | ||
69 | } sclp_reading_state = sclp_reading_state_idle; | ||
70 | |||
71 | /* Internal state: is the driver currently serving requests? */ | ||
72 | static volatile enum sclp_activation_state_t { | ||
73 | sclp_activation_state_active, | ||
74 | sclp_activation_state_deactivating, | ||
75 | sclp_activation_state_inactive, | ||
76 | sclp_activation_state_activating | ||
77 | } sclp_activation_state = sclp_activation_state_active; | ||
78 | |||
79 | /* Internal state: is an init mask request pending? */ | ||
80 | static volatile enum sclp_mask_state_t { | ||
81 | sclp_mask_state_idle, | ||
82 | sclp_mask_state_initializing | ||
83 | } sclp_mask_state = sclp_mask_state_idle; | ||
84 | |||
85 | /* Maximum retry counts */ | ||
86 | #define SCLP_INIT_RETRY 3 | ||
87 | #define SCLP_MASK_RETRY 3 | ||
88 | #define SCLP_REQUEST_RETRY 3 | ||
89 | |||
90 | /* Timeout intervals in seconds.*/ | ||
91 | #define SCLP_BUSY_INTERVAL 2 | ||
92 | #define SCLP_RETRY_INTERVAL 5 | ||
93 | |||
94 | static void sclp_process_queue(void); | ||
95 | static int sclp_init_mask(int calculate); | ||
96 | static int sclp_init(void); | ||
97 | |||
98 | /* Perform service call. Return 0 on success, non-zero otherwise. */ | ||
99 | static int | ||
100 | service_call(sclp_cmdw_t command, void *sccb) | ||
101 | { | ||
102 | int cc; | ||
103 | |||
104 | __asm__ __volatile__( | ||
105 | " .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */ | ||
106 | " ipm %0\n" | ||
107 | " srl %0,28" | ||
108 | : "=&d" (cc) | ||
109 | : "d" (command), "a" (__pa(sccb)) | ||
110 | : "cc", "memory" ); | ||
111 | if (cc == 3) | ||
112 | return -EIO; | ||
113 | if (cc == 2) | ||
114 | return -EBUSY; | ||
115 | return 0; | ||
116 | } | ||
117 | |||
118 | /* Request timeout handler. Restart the request queue. If DATA is non-zero, | ||
119 | * force restart of running request. */ | ||
120 | static void | ||
121 | sclp_request_timeout(unsigned long data) | ||
122 | { | ||
123 | unsigned long flags; | ||
124 | |||
125 | if (data) { | ||
126 | spin_lock_irqsave(&sclp_lock, flags); | ||
127 | sclp_running_state = sclp_running_state_idle; | ||
128 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
129 | } | ||
130 | sclp_process_queue(); | ||
131 | } | ||
132 | |||
133 | /* Set up request retry timer. Called while sclp_lock is locked. */ | ||
134 | static inline void | ||
135 | __sclp_set_request_timer(unsigned long time, void (*function)(unsigned long), | ||
136 | unsigned long data) | ||
137 | { | ||
138 | del_timer(&sclp_request_timer); | ||
139 | sclp_request_timer.function = function; | ||
140 | sclp_request_timer.data = data; | ||
141 | sclp_request_timer.expires = jiffies + time; | ||
142 | add_timer(&sclp_request_timer); | ||
143 | } | ||
144 | |||
145 | /* Try to start a request. Return zero if the request was successfully | ||
146 | * started or if it will be started at a later time. Return non-zero otherwise. | ||
147 | * Called while sclp_lock is locked. */ | ||
148 | static int | ||
149 | __sclp_start_request(struct sclp_req *req) | ||
150 | { | ||
151 | int rc; | ||
152 | |||
153 | if (sclp_running_state != sclp_running_state_idle) | ||
154 | return 0; | ||
155 | del_timer(&sclp_request_timer); | ||
156 | if (req->start_count <= SCLP_REQUEST_RETRY) { | ||
157 | rc = service_call(req->command, req->sccb); | ||
158 | req->start_count++; | ||
159 | } else | ||
160 | rc = -EIO; | ||
161 | if (rc == 0) { | ||
162 | /* Sucessfully started request */ | ||
163 | req->status = SCLP_REQ_RUNNING; | ||
164 | sclp_running_state = sclp_running_state_running; | ||
165 | __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ, | ||
166 | sclp_request_timeout, 1); | ||
167 | return 0; | ||
168 | } else if (rc == -EBUSY) { | ||
169 | /* Try again later */ | ||
170 | __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ, | ||
171 | sclp_request_timeout, 0); | ||
172 | return 0; | ||
173 | } | ||
174 | /* Request failed */ | ||
175 | req->status = SCLP_REQ_FAILED; | ||
176 | return rc; | ||
177 | } | ||
178 | |||
179 | /* Try to start queued requests. */ | ||
180 | static void | ||
181 | sclp_process_queue(void) | ||
182 | { | ||
183 | struct sclp_req *req; | ||
184 | int rc; | ||
185 | unsigned long flags; | ||
186 | |||
187 | spin_lock_irqsave(&sclp_lock, flags); | ||
188 | if (sclp_running_state != sclp_running_state_idle) { | ||
189 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
190 | return; | ||
191 | } | ||
192 | del_timer(&sclp_request_timer); | ||
193 | while (!list_empty(&sclp_req_queue)) { | ||
194 | req = list_entry(sclp_req_queue.next, struct sclp_req, list); | ||
195 | rc = __sclp_start_request(req); | ||
196 | if (rc == 0) | ||
197 | break; | ||
198 | /* Request failed. */ | ||
199 | list_del(&req->list); | ||
200 | if (req->callback) { | ||
201 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
202 | req->callback(req, req->callback_data); | ||
203 | spin_lock_irqsave(&sclp_lock, flags); | ||
204 | } | ||
205 | } | ||
206 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
207 | } | ||
208 | |||
209 | /* Queue a new request. Return zero on success, non-zero otherwise. */ | ||
210 | int | ||
211 | sclp_add_request(struct sclp_req *req) | ||
212 | { | ||
213 | unsigned long flags; | ||
214 | int rc; | ||
215 | |||
216 | spin_lock_irqsave(&sclp_lock, flags); | ||
217 | if ((sclp_init_state != sclp_init_state_initialized || | ||
218 | sclp_activation_state != sclp_activation_state_active) && | ||
219 | req != &sclp_init_req) { | ||
220 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
221 | return -EIO; | ||
222 | } | ||
223 | req->status = SCLP_REQ_QUEUED; | ||
224 | req->start_count = 0; | ||
225 | list_add_tail(&req->list, &sclp_req_queue); | ||
226 | rc = 0; | ||
227 | /* Start if request is first in list */ | ||
228 | if (req->list.prev == &sclp_req_queue) { | ||
229 | rc = __sclp_start_request(req); | ||
230 | if (rc) | ||
231 | list_del(&req->list); | ||
232 | } | ||
233 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
234 | return rc; | ||
235 | } | ||
236 | |||
237 | EXPORT_SYMBOL(sclp_add_request); | ||
238 | |||
239 | /* Dispatch events found in request buffer to registered listeners. Return 0 | ||
240 | * if all events were dispatched, non-zero otherwise. */ | ||
241 | static int | ||
242 | sclp_dispatch_evbufs(struct sccb_header *sccb) | ||
243 | { | ||
244 | unsigned long flags; | ||
245 | struct evbuf_header *evbuf; | ||
246 | struct list_head *l; | ||
247 | struct sclp_register *reg; | ||
248 | int offset; | ||
249 | int rc; | ||
250 | |||
251 | spin_lock_irqsave(&sclp_lock, flags); | ||
252 | rc = 0; | ||
253 | for (offset = sizeof(struct sccb_header); offset < sccb->length; | ||
254 | offset += evbuf->length) { | ||
255 | /* Search for event handler */ | ||
256 | evbuf = (struct evbuf_header *) ((addr_t) sccb + offset); | ||
257 | reg = NULL; | ||
258 | list_for_each(l, &sclp_reg_list) { | ||
259 | reg = list_entry(l, struct sclp_register, list); | ||
260 | if (reg->receive_mask & (1 << (32 - evbuf->type))) | ||
261 | break; | ||
262 | else | ||
263 | reg = NULL; | ||
264 | } | ||
265 | if (reg && reg->receiver_fn) { | ||
266 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
267 | reg->receiver_fn(evbuf); | ||
268 | spin_lock_irqsave(&sclp_lock, flags); | ||
269 | } else if (reg == NULL) | ||
270 | rc = -ENOSYS; | ||
271 | } | ||
272 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
273 | return rc; | ||
274 | } | ||
275 | |||
276 | /* Read event data request callback. */ | ||
277 | static void | ||
278 | sclp_read_cb(struct sclp_req *req, void *data) | ||
279 | { | ||
280 | unsigned long flags; | ||
281 | struct sccb_header *sccb; | ||
282 | |||
283 | sccb = (struct sccb_header *) req->sccb; | ||
284 | if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 || | ||
285 | sccb->response_code == 0x220)) | ||
286 | sclp_dispatch_evbufs(sccb); | ||
287 | spin_lock_irqsave(&sclp_lock, flags); | ||
288 | sclp_reading_state = sclp_reading_state_idle; | ||
289 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
290 | } | ||
291 | |||
292 | /* Prepare read event data request. Called while sclp_lock is locked. */ | ||
293 | static inline void | ||
294 | __sclp_make_read_req(void) | ||
295 | { | ||
296 | struct sccb_header *sccb; | ||
297 | |||
298 | sccb = (struct sccb_header *) sclp_read_sccb; | ||
299 | clear_page(sccb); | ||
300 | memset(&sclp_read_req, 0, sizeof(struct sclp_req)); | ||
301 | sclp_read_req.command = SCLP_CMDW_READDATA; | ||
302 | sclp_read_req.status = SCLP_REQ_QUEUED; | ||
303 | sclp_read_req.start_count = 0; | ||
304 | sclp_read_req.callback = sclp_read_cb; | ||
305 | sclp_read_req.sccb = sccb; | ||
306 | sccb->length = PAGE_SIZE; | ||
307 | sccb->function_code = 0; | ||
308 | sccb->control_mask[2] = 0x80; | ||
309 | } | ||
310 | |||
311 | /* Search request list for request with matching sccb. Return request if found, | ||
312 | * NULL otherwise. Called while sclp_lock is locked. */ | ||
313 | static inline struct sclp_req * | ||
314 | __sclp_find_req(u32 sccb) | ||
315 | { | ||
316 | struct list_head *l; | ||
317 | struct sclp_req *req; | ||
318 | |||
319 | list_for_each(l, &sclp_req_queue) { | ||
320 | req = list_entry(l, struct sclp_req, list); | ||
321 | if (sccb == (u32) (addr_t) req->sccb) | ||
322 | return req; | ||
323 | } | ||
324 | return NULL; | ||
325 | } | ||
326 | |||
327 | /* Handler for external interruption. Perform request post-processing. | ||
328 | * Prepare read event data request if necessary. Start processing of next | ||
329 | * request on queue. */ | ||
330 | static void | ||
331 | sclp_interrupt_handler(struct pt_regs *regs, __u16 code) | ||
332 | { | ||
333 | struct sclp_req *req; | ||
334 | u32 finished_sccb; | ||
335 | u32 evbuf_pending; | ||
336 | |||
337 | spin_lock(&sclp_lock); | ||
338 | finished_sccb = S390_lowcore.ext_params & 0xfffffff8; | ||
339 | evbuf_pending = S390_lowcore.ext_params & 0x3; | ||
340 | if (finished_sccb) { | ||
341 | req = __sclp_find_req(finished_sccb); | ||
342 | if (req) { | ||
343 | /* Request post-processing */ | ||
344 | list_del(&req->list); | ||
345 | req->status = SCLP_REQ_DONE; | ||
346 | if (req->callback) { | ||
347 | spin_unlock(&sclp_lock); | ||
348 | req->callback(req, req->callback_data); | ||
349 | spin_lock(&sclp_lock); | ||
350 | } | ||
351 | } | ||
352 | sclp_running_state = sclp_running_state_idle; | ||
353 | } | ||
354 | if (evbuf_pending && sclp_receive_mask != 0 && | ||
355 | sclp_reading_state == sclp_reading_state_idle && | ||
356 | sclp_activation_state == sclp_activation_state_active ) { | ||
357 | sclp_reading_state = sclp_reading_state_reading; | ||
358 | __sclp_make_read_req(); | ||
359 | /* Add request to head of queue */ | ||
360 | list_add(&sclp_read_req.list, &sclp_req_queue); | ||
361 | } | ||
362 | spin_unlock(&sclp_lock); | ||
363 | sclp_process_queue(); | ||
364 | } | ||
365 | |||
366 | /* Return current Time-Of-Day clock. */ | ||
367 | static inline u64 | ||
368 | sclp_get_clock(void) | ||
369 | { | ||
370 | u64 result; | ||
371 | |||
372 | asm volatile ("STCK 0(%1)" : "=m" (result) : "a" (&(result)) : "cc"); | ||
373 | return result; | ||
374 | } | ||
375 | |||
376 | /* Convert interval in jiffies to TOD ticks. */ | ||
377 | static inline u64 | ||
378 | sclp_tod_from_jiffies(unsigned long jiffies) | ||
379 | { | ||
380 | return (u64) (jiffies / HZ) << 32; | ||
381 | } | ||
382 | |||
383 | /* Wait until a currently running request finished. Note: while this function | ||
384 | * is running, no timers are served on the calling CPU. */ | ||
385 | void | ||
386 | sclp_sync_wait(void) | ||
387 | { | ||
388 | unsigned long psw_mask; | ||
389 | unsigned long cr0, cr0_sync; | ||
390 | u64 timeout; | ||
391 | |||
392 | /* We'll be disabling timer interrupts, so we need a custom timeout | ||
393 | * mechanism */ | ||
394 | timeout = 0; | ||
395 | if (timer_pending(&sclp_request_timer)) { | ||
396 | /* Get timeout TOD value */ | ||
397 | timeout = sclp_get_clock() + | ||
398 | sclp_tod_from_jiffies(sclp_request_timer.expires - | ||
399 | jiffies); | ||
400 | } | ||
401 | /* Prevent bottom half from executing once we force interrupts open */ | ||
402 | local_bh_disable(); | ||
403 | /* Enable service-signal interruption, disable timer interrupts */ | ||
404 | __ctl_store(cr0, 0, 0); | ||
405 | cr0_sync = cr0; | ||
406 | cr0_sync |= 0x00000200; | ||
407 | cr0_sync &= 0xFFFFF3AC; | ||
408 | __ctl_load(cr0_sync, 0, 0); | ||
409 | asm volatile ("STOSM 0(%1),0x01" | ||
410 | : "=m" (psw_mask) : "a" (&psw_mask) : "memory"); | ||
411 | /* Loop until driver state indicates finished request */ | ||
412 | while (sclp_running_state != sclp_running_state_idle) { | ||
413 | /* Check for expired request timer */ | ||
414 | if (timer_pending(&sclp_request_timer) && | ||
415 | sclp_get_clock() > timeout && | ||
416 | del_timer(&sclp_request_timer)) | ||
417 | sclp_request_timer.function(sclp_request_timer.data); | ||
418 | barrier(); | ||
419 | cpu_relax(); | ||
420 | } | ||
421 | /* Restore interrupt settings */ | ||
422 | asm volatile ("SSM 0(%0)" | ||
423 | : : "a" (&psw_mask) : "memory"); | ||
424 | __ctl_load(cr0, 0, 0); | ||
425 | __local_bh_enable(); | ||
426 | } | ||
427 | |||
428 | EXPORT_SYMBOL(sclp_sync_wait); | ||
429 | |||
430 | /* Dispatch changes in send and receive mask to registered listeners. */ | ||
431 | static inline void | ||
432 | sclp_dispatch_state_change(void) | ||
433 | { | ||
434 | struct list_head *l; | ||
435 | struct sclp_register *reg; | ||
436 | unsigned long flags; | ||
437 | sccb_mask_t receive_mask; | ||
438 | sccb_mask_t send_mask; | ||
439 | |||
440 | do { | ||
441 | spin_lock_irqsave(&sclp_lock, flags); | ||
442 | reg = NULL; | ||
443 | list_for_each(l, &sclp_reg_list) { | ||
444 | reg = list_entry(l, struct sclp_register, list); | ||
445 | receive_mask = reg->receive_mask & sclp_receive_mask; | ||
446 | send_mask = reg->send_mask & sclp_send_mask; | ||
447 | if (reg->sclp_receive_mask != receive_mask || | ||
448 | reg->sclp_send_mask != send_mask) { | ||
449 | reg->sclp_receive_mask = receive_mask; | ||
450 | reg->sclp_send_mask = send_mask; | ||
451 | break; | ||
452 | } else | ||
453 | reg = NULL; | ||
454 | } | ||
455 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
456 | if (reg && reg->state_change_fn) | ||
457 | reg->state_change_fn(reg); | ||
458 | } while (reg); | ||
459 | } | ||
460 | |||
461 | struct sclp_statechangebuf { | ||
462 | struct evbuf_header header; | ||
463 | u8 validity_sclp_active_facility_mask : 1; | ||
464 | u8 validity_sclp_receive_mask : 1; | ||
465 | u8 validity_sclp_send_mask : 1; | ||
466 | u8 validity_read_data_function_mask : 1; | ||
467 | u16 _zeros : 12; | ||
468 | u16 mask_length; | ||
469 | u64 sclp_active_facility_mask; | ||
470 | sccb_mask_t sclp_receive_mask; | ||
471 | sccb_mask_t sclp_send_mask; | ||
472 | u32 read_data_function_mask; | ||
473 | } __attribute__((packed)); | ||
474 | |||
475 | |||
476 | /* State change event callback. Inform listeners of changes. */ | ||
477 | static void | ||
478 | sclp_state_change_cb(struct evbuf_header *evbuf) | ||
479 | { | ||
480 | unsigned long flags; | ||
481 | struct sclp_statechangebuf *scbuf; | ||
482 | |||
483 | scbuf = (struct sclp_statechangebuf *) evbuf; | ||
484 | if (scbuf->mask_length != sizeof(sccb_mask_t)) | ||
485 | return; | ||
486 | spin_lock_irqsave(&sclp_lock, flags); | ||
487 | if (scbuf->validity_sclp_receive_mask) | ||
488 | sclp_receive_mask = scbuf->sclp_receive_mask; | ||
489 | if (scbuf->validity_sclp_send_mask) | ||
490 | sclp_send_mask = scbuf->sclp_send_mask; | ||
491 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
492 | sclp_dispatch_state_change(); | ||
493 | } | ||
494 | |||
495 | static struct sclp_register sclp_state_change_event = { | ||
496 | .receive_mask = EvTyp_StateChange_Mask, | ||
497 | .receiver_fn = sclp_state_change_cb | ||
498 | }; | ||
499 | |||
500 | /* Calculate receive and send mask of currently registered listeners. | ||
501 | * Called while sclp_lock is locked. */ | ||
502 | static inline void | ||
503 | __sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask) | ||
504 | { | ||
505 | struct list_head *l; | ||
506 | struct sclp_register *t; | ||
507 | |||
508 | *receive_mask = 0; | ||
509 | *send_mask = 0; | ||
510 | list_for_each(l, &sclp_reg_list) { | ||
511 | t = list_entry(l, struct sclp_register, list); | ||
512 | *receive_mask |= t->receive_mask; | ||
513 | *send_mask |= t->send_mask; | ||
514 | } | ||
515 | } | ||
516 | |||
517 | /* Register event listener. Return 0 on success, non-zero otherwise. */ | ||
518 | int | ||
519 | sclp_register(struct sclp_register *reg) | ||
520 | { | ||
521 | unsigned long flags; | ||
522 | sccb_mask_t receive_mask; | ||
523 | sccb_mask_t send_mask; | ||
524 | int rc; | ||
525 | |||
526 | rc = sclp_init(); | ||
527 | if (rc) | ||
528 | return rc; | ||
529 | spin_lock_irqsave(&sclp_lock, flags); | ||
530 | /* Check event mask for collisions */ | ||
531 | __sclp_get_mask(&receive_mask, &send_mask); | ||
532 | if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) { | ||
533 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
534 | return -EBUSY; | ||
535 | } | ||
536 | /* Trigger initial state change callback */ | ||
537 | reg->sclp_receive_mask = 0; | ||
538 | reg->sclp_send_mask = 0; | ||
539 | list_add(®->list, &sclp_reg_list); | ||
540 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
541 | rc = sclp_init_mask(1); | ||
542 | if (rc) { | ||
543 | spin_lock_irqsave(&sclp_lock, flags); | ||
544 | list_del(®->list); | ||
545 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
546 | } | ||
547 | return rc; | ||
548 | } | ||
549 | |||
550 | EXPORT_SYMBOL(sclp_register); | ||
551 | |||
552 | /* Unregister event listener. */ | ||
553 | void | ||
554 | sclp_unregister(struct sclp_register *reg) | ||
555 | { | ||
556 | unsigned long flags; | ||
557 | |||
558 | spin_lock_irqsave(&sclp_lock, flags); | ||
559 | list_del(®->list); | ||
560 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
561 | sclp_init_mask(1); | ||
562 | } | ||
563 | |||
564 | EXPORT_SYMBOL(sclp_unregister); | ||
565 | |||
566 | /* Remove event buffers which are marked processed. Return the number of | ||
567 | * remaining event buffers. */ | ||
568 | int | ||
569 | sclp_remove_processed(struct sccb_header *sccb) | ||
570 | { | ||
571 | struct evbuf_header *evbuf; | ||
572 | int unprocessed; | ||
573 | u16 remaining; | ||
574 | |||
575 | evbuf = (struct evbuf_header *) (sccb + 1); | ||
576 | unprocessed = 0; | ||
577 | remaining = sccb->length - sizeof(struct sccb_header); | ||
578 | while (remaining > 0) { | ||
579 | remaining -= evbuf->length; | ||
580 | if (evbuf->flags & 0x80) { | ||
581 | sccb->length -= evbuf->length; | ||
582 | memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length), | ||
583 | remaining); | ||
584 | } else { | ||
585 | unprocessed++; | ||
586 | evbuf = (struct evbuf_header *) | ||
587 | ((addr_t) evbuf + evbuf->length); | ||
588 | } | ||
589 | } | ||
590 | return unprocessed; | ||
591 | } | ||
592 | |||
593 | EXPORT_SYMBOL(sclp_remove_processed); | ||
594 | |||
595 | struct init_sccb { | ||
596 | struct sccb_header header; | ||
597 | u16 _reserved; | ||
598 | u16 mask_length; | ||
599 | sccb_mask_t receive_mask; | ||
600 | sccb_mask_t send_mask; | ||
601 | sccb_mask_t sclp_send_mask; | ||
602 | sccb_mask_t sclp_receive_mask; | ||
603 | } __attribute__((packed)); | ||
604 | |||
605 | /* Prepare init mask request. Called while sclp_lock is locked. */ | ||
606 | static inline void | ||
607 | __sclp_make_init_req(u32 receive_mask, u32 send_mask) | ||
608 | { | ||
609 | struct init_sccb *sccb; | ||
610 | |||
611 | sccb = (struct init_sccb *) sclp_init_sccb; | ||
612 | clear_page(sccb); | ||
613 | memset(&sclp_init_req, 0, sizeof(struct sclp_req)); | ||
614 | sclp_init_req.command = SCLP_CMDW_WRITEMASK; | ||
615 | sclp_init_req.status = SCLP_REQ_FILLED; | ||
616 | sclp_init_req.start_count = 0; | ||
617 | sclp_init_req.callback = NULL; | ||
618 | sclp_init_req.callback_data = NULL; | ||
619 | sclp_init_req.sccb = sccb; | ||
620 | sccb->header.length = sizeof(struct init_sccb); | ||
621 | sccb->mask_length = sizeof(sccb_mask_t); | ||
622 | sccb->receive_mask = receive_mask; | ||
623 | sccb->send_mask = send_mask; | ||
624 | sccb->sclp_receive_mask = 0; | ||
625 | sccb->sclp_send_mask = 0; | ||
626 | } | ||
627 | |||
628 | /* Start init mask request. If calculate is non-zero, calculate the mask as | ||
629 | * requested by registered listeners. Use zero mask otherwise. Return 0 on | ||
630 | * success, non-zero otherwise. */ | ||
631 | static int | ||
632 | sclp_init_mask(int calculate) | ||
633 | { | ||
634 | unsigned long flags; | ||
635 | struct init_sccb *sccb = (struct init_sccb *) sclp_init_sccb; | ||
636 | sccb_mask_t receive_mask; | ||
637 | sccb_mask_t send_mask; | ||
638 | int retry; | ||
639 | int rc; | ||
640 | unsigned long wait; | ||
641 | |||
642 | spin_lock_irqsave(&sclp_lock, flags); | ||
643 | /* Check if interface is in appropriate state */ | ||
644 | if (sclp_mask_state != sclp_mask_state_idle) { | ||
645 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
646 | return -EBUSY; | ||
647 | } | ||
648 | if (sclp_activation_state == sclp_activation_state_inactive) { | ||
649 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
650 | return -EINVAL; | ||
651 | } | ||
652 | sclp_mask_state = sclp_mask_state_initializing; | ||
653 | /* Determine mask */ | ||
654 | if (calculate) | ||
655 | __sclp_get_mask(&receive_mask, &send_mask); | ||
656 | else { | ||
657 | receive_mask = 0; | ||
658 | send_mask = 0; | ||
659 | } | ||
660 | rc = -EIO; | ||
661 | for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) { | ||
662 | /* Prepare request */ | ||
663 | __sclp_make_init_req(receive_mask, send_mask); | ||
664 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
665 | if (sclp_add_request(&sclp_init_req)) { | ||
666 | /* Try again later */ | ||
667 | wait = jiffies + SCLP_BUSY_INTERVAL * HZ; | ||
668 | while (time_before(jiffies, wait)) | ||
669 | sclp_sync_wait(); | ||
670 | spin_lock_irqsave(&sclp_lock, flags); | ||
671 | continue; | ||
672 | } | ||
673 | while (sclp_init_req.status != SCLP_REQ_DONE && | ||
674 | sclp_init_req.status != SCLP_REQ_FAILED) | ||
675 | sclp_sync_wait(); | ||
676 | spin_lock_irqsave(&sclp_lock, flags); | ||
677 | if (sclp_init_req.status == SCLP_REQ_DONE && | ||
678 | sccb->header.response_code == 0x20) { | ||
679 | /* Successful request */ | ||
680 | if (calculate) { | ||
681 | sclp_receive_mask = sccb->sclp_receive_mask; | ||
682 | sclp_send_mask = sccb->sclp_send_mask; | ||
683 | } else { | ||
684 | sclp_receive_mask = 0; | ||
685 | sclp_send_mask = 0; | ||
686 | } | ||
687 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
688 | sclp_dispatch_state_change(); | ||
689 | spin_lock_irqsave(&sclp_lock, flags); | ||
690 | rc = 0; | ||
691 | break; | ||
692 | } | ||
693 | } | ||
694 | sclp_mask_state = sclp_mask_state_idle; | ||
695 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
696 | return rc; | ||
697 | } | ||
698 | |||
699 | /* Deactivate SCLP interface. On success, new requests will be rejected, | ||
700 | * events will no longer be dispatched. Return 0 on success, non-zero | ||
701 | * otherwise. */ | ||
702 | int | ||
703 | sclp_deactivate(void) | ||
704 | { | ||
705 | unsigned long flags; | ||
706 | int rc; | ||
707 | |||
708 | spin_lock_irqsave(&sclp_lock, flags); | ||
709 | /* Deactivate can only be called when active */ | ||
710 | if (sclp_activation_state != sclp_activation_state_active) { | ||
711 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
712 | return -EINVAL; | ||
713 | } | ||
714 | sclp_activation_state = sclp_activation_state_deactivating; | ||
715 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
716 | rc = sclp_init_mask(0); | ||
717 | spin_lock_irqsave(&sclp_lock, flags); | ||
718 | if (rc == 0) | ||
719 | sclp_activation_state = sclp_activation_state_inactive; | ||
720 | else | ||
721 | sclp_activation_state = sclp_activation_state_active; | ||
722 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
723 | return rc; | ||
724 | } | ||
725 | |||
726 | EXPORT_SYMBOL(sclp_deactivate); | ||
727 | |||
728 | /* Reactivate SCLP interface after sclp_deactivate. On success, new | ||
729 | * requests will be accepted, events will be dispatched again. Return 0 on | ||
730 | * success, non-zero otherwise. */ | ||
731 | int | ||
732 | sclp_reactivate(void) | ||
733 | { | ||
734 | unsigned long flags; | ||
735 | int rc; | ||
736 | |||
737 | spin_lock_irqsave(&sclp_lock, flags); | ||
738 | /* Reactivate can only be called when inactive */ | ||
739 | if (sclp_activation_state != sclp_activation_state_inactive) { | ||
740 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
741 | return -EINVAL; | ||
742 | } | ||
743 | sclp_activation_state = sclp_activation_state_activating; | ||
744 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
745 | rc = sclp_init_mask(1); | ||
746 | spin_lock_irqsave(&sclp_lock, flags); | ||
747 | if (rc == 0) | ||
748 | sclp_activation_state = sclp_activation_state_active; | ||
749 | else | ||
750 | sclp_activation_state = sclp_activation_state_inactive; | ||
751 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
752 | return rc; | ||
753 | } | ||
754 | |||
755 | EXPORT_SYMBOL(sclp_reactivate); | ||
756 | |||
757 | /* Handler for external interruption used during initialization. Modify | ||
758 | * request state to done. */ | ||
759 | static void | ||
760 | sclp_check_handler(struct pt_regs *regs, __u16 code) | ||
761 | { | ||
762 | u32 finished_sccb; | ||
763 | |||
764 | finished_sccb = S390_lowcore.ext_params & 0xfffffff8; | ||
765 | /* Is this the interrupt we are waiting for? */ | ||
766 | if (finished_sccb == 0) | ||
767 | return; | ||
768 | if (finished_sccb != (u32) (addr_t) sclp_init_sccb) { | ||
769 | printk(KERN_WARNING SCLP_HEADER "unsolicited interrupt " | ||
770 | "for buffer at 0x%x\n", finished_sccb); | ||
771 | return; | ||
772 | } | ||
773 | spin_lock(&sclp_lock); | ||
774 | if (sclp_running_state == sclp_running_state_running) { | ||
775 | sclp_init_req.status = SCLP_REQ_DONE; | ||
776 | sclp_running_state = sclp_running_state_idle; | ||
777 | } | ||
778 | spin_unlock(&sclp_lock); | ||
779 | } | ||
780 | |||
781 | /* Initial init mask request timed out. Modify request state to failed. */ | ||
782 | static void | ||
783 | sclp_check_timeout(unsigned long data) | ||
784 | { | ||
785 | unsigned long flags; | ||
786 | |||
787 | spin_lock_irqsave(&sclp_lock, flags); | ||
788 | if (sclp_running_state == sclp_running_state_running) { | ||
789 | sclp_init_req.status = SCLP_REQ_FAILED; | ||
790 | sclp_running_state = sclp_running_state_idle; | ||
791 | } | ||
792 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
793 | } | ||
794 | |||
795 | /* Perform a check of the SCLP interface. Return zero if the interface is | ||
796 | * available and there are no pending requests from a previous instance. | ||
797 | * Return non-zero otherwise. */ | ||
798 | static int | ||
799 | sclp_check_interface(void) | ||
800 | { | ||
801 | struct init_sccb *sccb; | ||
802 | unsigned long flags; | ||
803 | int retry; | ||
804 | int rc; | ||
805 | |||
806 | spin_lock_irqsave(&sclp_lock, flags); | ||
807 | /* Prepare init mask command */ | ||
808 | rc = register_early_external_interrupt(0x2401, sclp_check_handler, | ||
809 | &ext_int_info_hwc); | ||
810 | if (rc) { | ||
811 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
812 | return rc; | ||
813 | } | ||
814 | for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) { | ||
815 | __sclp_make_init_req(0, 0); | ||
816 | sccb = (struct init_sccb *) sclp_init_req.sccb; | ||
817 | rc = service_call(sclp_init_req.command, sccb); | ||
818 | if (rc == -EIO) | ||
819 | break; | ||
820 | sclp_init_req.status = SCLP_REQ_RUNNING; | ||
821 | sclp_running_state = sclp_running_state_running; | ||
822 | __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ, | ||
823 | sclp_check_timeout, 0); | ||
824 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
825 | /* Enable service-signal interruption - needs to happen | ||
826 | * with IRQs enabled. */ | ||
827 | ctl_set_bit(0, 9); | ||
828 | /* Wait for signal from interrupt or timeout */ | ||
829 | sclp_sync_wait(); | ||
830 | /* Disable service-signal interruption - needs to happen | ||
831 | * with IRQs enabled. */ | ||
832 | ctl_clear_bit(0,9); | ||
833 | spin_lock_irqsave(&sclp_lock, flags); | ||
834 | del_timer(&sclp_request_timer); | ||
835 | if (sclp_init_req.status == SCLP_REQ_DONE && | ||
836 | sccb->header.response_code == 0x20) { | ||
837 | rc = 0; | ||
838 | break; | ||
839 | } else | ||
840 | rc = -EBUSY; | ||
841 | } | ||
842 | unregister_early_external_interrupt(0x2401, sclp_check_handler, | ||
843 | &ext_int_info_hwc); | ||
844 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
845 | return rc; | ||
846 | } | ||
847 | |||
848 | /* Reboot event handler. Reset send and receive mask to prevent pending SCLP | ||
849 | * events from interfering with rebooted system. */ | ||
850 | static int | ||
851 | sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr) | ||
852 | { | ||
853 | sclp_deactivate(); | ||
854 | return NOTIFY_DONE; | ||
855 | } | ||
856 | |||
857 | static struct notifier_block sclp_reboot_notifier = { | ||
858 | .notifier_call = sclp_reboot_event | ||
859 | }; | ||
860 | |||
861 | /* Initialize SCLP driver. Return zero if driver is operational, non-zero | ||
862 | * otherwise. */ | ||
863 | static int | ||
864 | sclp_init(void) | ||
865 | { | ||
866 | unsigned long flags; | ||
867 | int rc; | ||
868 | |||
869 | if (!MACHINE_HAS_SCLP) | ||
870 | return -ENODEV; | ||
871 | spin_lock_irqsave(&sclp_lock, flags); | ||
872 | /* Check for previous or running initialization */ | ||
873 | if (sclp_init_state != sclp_init_state_uninitialized) { | ||
874 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
875 | return 0; | ||
876 | } | ||
877 | sclp_init_state = sclp_init_state_initializing; | ||
878 | /* Set up variables */ | ||
879 | INIT_LIST_HEAD(&sclp_req_queue); | ||
880 | INIT_LIST_HEAD(&sclp_reg_list); | ||
881 | list_add(&sclp_state_change_event.list, &sclp_reg_list); | ||
882 | init_timer(&sclp_request_timer); | ||
883 | /* Check interface */ | ||
884 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
885 | rc = sclp_check_interface(); | ||
886 | spin_lock_irqsave(&sclp_lock, flags); | ||
887 | if (rc) { | ||
888 | sclp_init_state = sclp_init_state_uninitialized; | ||
889 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
890 | return rc; | ||
891 | } | ||
892 | /* Register reboot handler */ | ||
893 | rc = register_reboot_notifier(&sclp_reboot_notifier); | ||
894 | if (rc) { | ||
895 | sclp_init_state = sclp_init_state_uninitialized; | ||
896 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
897 | return rc; | ||
898 | } | ||
899 | /* Register interrupt handler */ | ||
900 | rc = register_early_external_interrupt(0x2401, sclp_interrupt_handler, | ||
901 | &ext_int_info_hwc); | ||
902 | if (rc) { | ||
903 | unregister_reboot_notifier(&sclp_reboot_notifier); | ||
904 | sclp_init_state = sclp_init_state_uninitialized; | ||
905 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
906 | return rc; | ||
907 | } | ||
908 | sclp_init_state = sclp_init_state_initialized; | ||
909 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
910 | /* Enable service-signal external interruption - needs to happen with | ||
911 | * IRQs enabled. */ | ||
912 | ctl_set_bit(0, 9); | ||
913 | sclp_init_mask(1); | ||
914 | return 0; | ||
915 | } | ||