aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/char
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/s390/char
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'drivers/s390/char')
-rw-r--r--drivers/s390/char/Makefile28
-rw-r--r--drivers/s390/char/con3215.c1192
-rw-r--r--drivers/s390/char/con3270.c638
-rw-r--r--drivers/s390/char/ctrlchar.c75
-rw-r--r--drivers/s390/char/ctrlchar.h20
-rw-r--r--drivers/s390/char/defkeymap.c156
-rw-r--r--drivers/s390/char/defkeymap.map191
-rw-r--r--drivers/s390/char/fs3270.c373
-rw-r--r--drivers/s390/char/keyboard.c519
-rw-r--r--drivers/s390/char/keyboard.h57
-rw-r--r--drivers/s390/char/monreader.c662
-rw-r--r--drivers/s390/char/raw3270.c1335
-rw-r--r--drivers/s390/char/raw3270.h274
-rw-r--r--drivers/s390/char/sclp.c915
-rw-r--r--drivers/s390/char/sclp.h159
-rw-r--r--drivers/s390/char/sclp_con.c252
-rw-r--r--drivers/s390/char/sclp_cpi.c254
-rw-r--r--drivers/s390/char/sclp_quiesce.c99
-rw-r--r--drivers/s390/char/sclp_rw.c471
-rw-r--r--drivers/s390/char/sclp_rw.h96
-rw-r--r--drivers/s390/char/sclp_tty.c813
-rw-r--r--drivers/s390/char/sclp_tty.h71
-rw-r--r--drivers/s390/char/sclp_vt220.c785
-rw-r--r--drivers/s390/char/tape.h384
-rw-r--r--drivers/s390/char/tape_34xx.c1385
-rw-r--r--drivers/s390/char/tape_block.c492
-rw-r--r--drivers/s390/char/tape_char.c492
-rw-r--r--drivers/s390/char/tape_class.c126
-rw-r--r--drivers/s390/char/tape_class.h61
-rw-r--r--drivers/s390/char/tape_core.c1242
-rw-r--r--drivers/s390/char/tape_proc.c145
-rw-r--r--drivers/s390/char/tape_std.c765
-rw-r--r--drivers/s390/char/tape_std.h152
-rw-r--r--drivers/s390/char/tty3270.c1836
-rw-r--r--drivers/s390/char/vmlogrdr.c920
-rw-r--r--drivers/s390/char/vmwatchdog.c292
36 files changed, 17727 insertions, 0 deletions
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
new file mode 100644
index 000000000000..14e8cce9f862
--- /dev/null
+++ b/drivers/s390/char/Makefile
@@ -0,0 +1,28 @@
1#
2# S/390 character devices
3#
4
5obj-y += ctrlchar.o keyboard.o defkeymap.o
6
7obj-$(CONFIG_TN3270) += raw3270.o
8obj-$(CONFIG_TN3270_CONSOLE) += con3270.o
9obj-$(CONFIG_TN3270_TTY) += tty3270.o
10obj-$(CONFIG_TN3270_FS) += fs3270.o
11
12obj-$(CONFIG_TN3215) += con3215.o
13
14obj-$(CONFIG_SCLP) += sclp.o sclp_rw.o sclp_quiesce.o
15obj-$(CONFIG_SCLP_TTY) += sclp_tty.o
16obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o
17obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o
18obj-$(CONFIG_SCLP_CPI) += sclp_cpi.o
19
20obj-$(CONFIG_ZVM_WATCHDOG) += vmwatchdog.o
21obj-$(CONFIG_VMLOGRDR) += vmlogrdr.o
22
23tape-$(CONFIG_S390_TAPE_BLOCK) += tape_block.o
24tape-$(CONFIG_PROC_FS) += tape_proc.o
25tape-objs := tape_core.o tape_std.o tape_char.o $(tape-y)
26obj-$(CONFIG_S390_TAPE) += tape.o tape_class.o
27obj-$(CONFIG_S390_TAPE_34XX) += tape_34xx.o
28obj-$(CONFIG_MONREADER) += monreader.o
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
new file mode 100644
index 000000000000..022f17bff731
--- /dev/null
+++ b/drivers/s390/char/con3215.c
@@ -0,0 +1,1192 @@
1/*
2 * drivers/s390/char/con3215.c
3 * 3215 line mode terminal driver.
4 *
5 * S390 version
6 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
8 *
9 * Updated:
10 * Aug-2000: Added tab support
11 * Dan Morrison, IBM Corporation (dmorriso@cse.buffalo.edu)
12 */
13
14#include <linux/config.h>
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/kdev_t.h>
18#include <linux/tty.h>
19#include <linux/vt_kern.h>
20#include <linux/init.h>
21#include <linux/console.h>
22#include <linux/interrupt.h>
23
24#include <linux/slab.h>
25#include <linux/bootmem.h>
26
27#include <asm/ccwdev.h>
28#include <asm/cio.h>
29#include <asm/io.h>
30#include <asm/ebcdic.h>
31#include <asm/uaccess.h>
32#include <asm/delay.h>
33#include <asm/cpcmd.h>
34#include <asm/setup.h>
35
36#include "ctrlchar.h"
37
38#define NR_3215 1
39#define NR_3215_REQ (4*NR_3215)
40#define RAW3215_BUFFER_SIZE 65536 /* output buffer size */
41#define RAW3215_INBUF_SIZE 256 /* input buffer size */
42#define RAW3215_MIN_SPACE 128 /* minimum free space for wakeup */
43#define RAW3215_MIN_WRITE 1024 /* min. length for immediate output */
44#define RAW3215_MAX_BYTES 3968 /* max. bytes to write with one ssch */
45#define RAW3215_MAX_NEWLINE 50 /* max. lines to write with one ssch */
46#define RAW3215_NR_CCWS 3
47#define RAW3215_TIMEOUT HZ/10 /* time for delayed output */
48
49#define RAW3215_FIXED 1 /* 3215 console device is not be freed */
50#define RAW3215_ACTIVE 2 /* set if the device is in use */
51#define RAW3215_WORKING 4 /* set if a request is being worked on */
52#define RAW3215_THROTTLED 8 /* set if reading is disabled */
53#define RAW3215_STOPPED 16 /* set if writing is disabled */
54#define RAW3215_CLOSING 32 /* set while in close process */
55#define RAW3215_TIMER_RUNS 64 /* set if the output delay timer is on */
56#define RAW3215_FLUSHING 128 /* set to flush buffer (no delay) */
57
58#define TAB_STOP_SIZE 8 /* tab stop size */
59
60/*
61 * Request types for a 3215 device
62 */
63enum raw3215_type {
64 RAW3215_FREE, RAW3215_READ, RAW3215_WRITE
65};
66
67/*
68 * Request structure for a 3215 device
69 */
70struct raw3215_req {
71 enum raw3215_type type; /* type of the request */
72 int start, len; /* start index & len in output buffer */
73 int delayable; /* indication to wait for more data */
74 int residual; /* residual count for read request */
75 struct ccw1 ccws[RAW3215_NR_CCWS]; /* space for the channel program */
76 struct raw3215_info *info; /* pointer to main structure */
77 struct raw3215_req *next; /* pointer to next request */
78} __attribute__ ((aligned(8)));
79
80struct raw3215_info {
81 struct ccw_device *cdev; /* device for tty driver */
82 spinlock_t *lock; /* pointer to irq lock */
83 int flags; /* state flags */
84 char *buffer; /* pointer to output buffer */
85 char *inbuf; /* pointer to input buffer */
86 int head; /* first free byte in output buffer */
87 int count; /* number of bytes in output buffer */
88 int written; /* number of bytes in write requests */
89 struct tty_struct *tty; /* pointer to tty structure if present */
90 struct tasklet_struct tasklet;
91 struct raw3215_req *queued_read; /* pointer to queued read requests */
92 struct raw3215_req *queued_write;/* pointer to queued write requests */
93 wait_queue_head_t empty_wait; /* wait queue for flushing */
94 struct timer_list timer; /* timer for delayed output */
95 char *message; /* pending message from raw3215_irq */
96 int msg_dstat; /* dstat for pending message */
97 int msg_cstat; /* cstat for pending message */
98 int line_pos; /* position on the line (for tabs) */
99 char ubuffer[80]; /* copy_from_user buffer */
100};
101
102/* array of 3215 devices structures */
103static struct raw3215_info *raw3215[NR_3215];
104/* spinlock to protect the raw3215 array */
105static DEFINE_SPINLOCK(raw3215_device_lock);
106/* list of free request structures */
107static struct raw3215_req *raw3215_freelist;
108/* spinlock to protect free list */
109static spinlock_t raw3215_freelist_lock;
110
111static struct tty_driver *tty3215_driver;
112
113/*
114 * Get a request structure from the free list
115 */
116static inline struct raw3215_req *
117raw3215_alloc_req(void) {
118 struct raw3215_req *req;
119 unsigned long flags;
120
121 spin_lock_irqsave(&raw3215_freelist_lock, flags);
122 req = raw3215_freelist;
123 raw3215_freelist = req->next;
124 spin_unlock_irqrestore(&raw3215_freelist_lock, flags);
125 return req;
126}
127
128/*
129 * Put a request structure back to the free list
130 */
131static inline void
132raw3215_free_req(struct raw3215_req *req) {
133 unsigned long flags;
134
135 if (req->type == RAW3215_FREE)
136 return; /* don't free a free request */
137 req->type = RAW3215_FREE;
138 spin_lock_irqsave(&raw3215_freelist_lock, flags);
139 req->next = raw3215_freelist;
140 raw3215_freelist = req;
141 spin_unlock_irqrestore(&raw3215_freelist_lock, flags);
142}
143
144/*
145 * Set up a read request that reads up to 160 byte from the 3215 device.
146 * If there is a queued read request it is used, but that shouldn't happen
147 * because a 3215 terminal won't accept a new read before the old one is
148 * completed.
149 */
150static void
151raw3215_mk_read_req(struct raw3215_info *raw)
152{
153 struct raw3215_req *req;
154 struct ccw1 *ccw;
155
156 /* there can only be ONE read request at a time */
157 req = raw->queued_read;
158 if (req == NULL) {
159 /* no queued read request, use new req structure */
160 req = raw3215_alloc_req();
161 req->type = RAW3215_READ;
162 req->info = raw;
163 raw->queued_read = req;
164 }
165
166 ccw = req->ccws;
167 ccw->cmd_code = 0x0A; /* read inquiry */
168 ccw->flags = 0x20; /* ignore incorrect length */
169 ccw->count = 160;
170 ccw->cda = (__u32) __pa(raw->inbuf);
171}
172
173/*
174 * Set up a write request with the information from the main structure.
175 * A ccw chain is created that writes as much as possible from the output
176 * buffer to the 3215 device. If a queued write exists it is replaced by
177 * the new, probably lengthened request.
178 */
179static void
180raw3215_mk_write_req(struct raw3215_info *raw)
181{
182 struct raw3215_req *req;
183 struct ccw1 *ccw;
184 int len, count, ix, lines;
185
186 if (raw->count <= raw->written)
187 return;
188 /* check if there is a queued write request */
189 req = raw->queued_write;
190 if (req == NULL) {
191 /* no queued write request, use new req structure */
192 req = raw3215_alloc_req();
193 req->type = RAW3215_WRITE;
194 req->info = raw;
195 raw->queued_write = req;
196 } else {
197 raw->written -= req->len;
198 }
199
200 ccw = req->ccws;
201 req->start = (raw->head - raw->count + raw->written) &
202 (RAW3215_BUFFER_SIZE - 1);
203 /*
204 * now we have to count newlines. We can at max accept
205 * RAW3215_MAX_NEWLINE newlines in a single ssch due to
206 * a restriction in VM
207 */
208 lines = 0;
209 ix = req->start;
210 while (lines < RAW3215_MAX_NEWLINE && ix != raw->head) {
211 if (raw->buffer[ix] == 0x15)
212 lines++;
213 ix = (ix + 1) & (RAW3215_BUFFER_SIZE - 1);
214 }
215 len = ((ix - 1 - req->start) & (RAW3215_BUFFER_SIZE - 1)) + 1;
216 if (len > RAW3215_MAX_BYTES)
217 len = RAW3215_MAX_BYTES;
218 req->len = len;
219 raw->written += len;
220
221 /* set the indication if we should try to enlarge this request */
222 req->delayable = (ix == raw->head) && (len < RAW3215_MIN_WRITE);
223
224 ix = req->start;
225 while (len > 0) {
226 if (ccw > req->ccws)
227 ccw[-1].flags |= 0x40; /* use command chaining */
228 ccw->cmd_code = 0x01; /* write, auto carrier return */
229 ccw->flags = 0x20; /* ignore incorrect length ind. */
230 ccw->cda =
231 (__u32) __pa(raw->buffer + ix);
232 count = len;
233 if (ix + count > RAW3215_BUFFER_SIZE)
234 count = RAW3215_BUFFER_SIZE - ix;
235 ccw->count = count;
236 len -= count;
237 ix = (ix + count) & (RAW3215_BUFFER_SIZE - 1);
238 ccw++;
239 }
240 /*
241 * Add a NOP to the channel program. 3215 devices are purely
242 * emulated and its much better to avoid the channel end
243 * interrupt in this case.
244 */
245 if (ccw > req->ccws)
246 ccw[-1].flags |= 0x40; /* use command chaining */
247 ccw->cmd_code = 0x03; /* NOP */
248 ccw->flags = 0;
249 ccw->cda = 0;
250 ccw->count = 1;
251}
252
253/*
254 * Start a read or a write request
255 */
256static void
257raw3215_start_io(struct raw3215_info *raw)
258{
259 struct raw3215_req *req;
260 int res;
261
262 req = raw->queued_read;
263 if (req != NULL &&
264 !(raw->flags & (RAW3215_WORKING | RAW3215_THROTTLED))) {
265 /* dequeue request */
266 raw->queued_read = NULL;
267 res = ccw_device_start(raw->cdev, req->ccws,
268 (unsigned long) req, 0, 0);
269 if (res != 0) {
270 /* do_IO failed, put request back to queue */
271 raw->queued_read = req;
272 } else {
273 raw->flags |= RAW3215_WORKING;
274 }
275 }
276 req = raw->queued_write;
277 if (req != NULL &&
278 !(raw->flags & (RAW3215_WORKING | RAW3215_STOPPED))) {
279 /* dequeue request */
280 raw->queued_write = NULL;
281 res = ccw_device_start(raw->cdev, req->ccws,
282 (unsigned long) req, 0, 0);
283 if (res != 0) {
284 /* do_IO failed, put request back to queue */
285 raw->queued_write = req;
286 } else {
287 raw->flags |= RAW3215_WORKING;
288 }
289 }
290}
291
292/*
293 * Function to start a delayed output after RAW3215_TIMEOUT seconds
294 */
295static void
296raw3215_timeout(unsigned long __data)
297{
298 struct raw3215_info *raw = (struct raw3215_info *) __data;
299 unsigned long flags;
300
301 spin_lock_irqsave(raw->lock, flags);
302 if (raw->flags & RAW3215_TIMER_RUNS) {
303 del_timer(&raw->timer);
304 raw->flags &= ~RAW3215_TIMER_RUNS;
305 raw3215_mk_write_req(raw);
306 raw3215_start_io(raw);
307 }
308 spin_unlock_irqrestore(raw->lock, flags);
309}
310
311/*
312 * Function to conditionally start an IO. A read is started immediately,
313 * a write is only started immediately if the flush flag is on or the
314 * amount of data is bigger than RAW3215_MIN_WRITE. If a write is not
315 * done immediately a timer is started with a delay of RAW3215_TIMEOUT.
316 */
317static inline void
318raw3215_try_io(struct raw3215_info *raw)
319{
320 if (!(raw->flags & RAW3215_ACTIVE))
321 return;
322 if (raw->queued_read != NULL)
323 raw3215_start_io(raw);
324 else if (raw->queued_write != NULL) {
325 if ((raw->queued_write->delayable == 0) ||
326 (raw->flags & RAW3215_FLUSHING)) {
327 /* execute write requests bigger than minimum size */
328 raw3215_start_io(raw);
329 if (raw->flags & RAW3215_TIMER_RUNS) {
330 del_timer(&raw->timer);
331 raw->flags &= ~RAW3215_TIMER_RUNS;
332 }
333 } else if (!(raw->flags & RAW3215_TIMER_RUNS)) {
334 /* delay small writes */
335 init_timer(&raw->timer);
336 raw->timer.expires = RAW3215_TIMEOUT + jiffies;
337 raw->timer.data = (unsigned long) raw;
338 raw->timer.function = raw3215_timeout;
339 add_timer(&raw->timer);
340 raw->flags |= RAW3215_TIMER_RUNS;
341 }
342 }
343}
344
345/*
346 * The bottom half handler routine for 3215 devices. It tries to start
347 * the next IO and wakes up processes waiting on the tty.
348 */
349static void
350raw3215_tasklet(void *data)
351{
352 struct raw3215_info *raw;
353 struct tty_struct *tty;
354 unsigned long flags;
355
356 raw = (struct raw3215_info *) data;
357 spin_lock_irqsave(raw->lock, flags);
358 raw3215_mk_write_req(raw);
359 raw3215_try_io(raw);
360 spin_unlock_irqrestore(raw->lock, flags);
361 /* Check for pending message from raw3215_irq */
362 if (raw->message != NULL) {
363 printk(raw->message, raw->msg_dstat, raw->msg_cstat);
364 raw->message = NULL;
365 }
366 tty = raw->tty;
367 if (tty != NULL &&
368 RAW3215_BUFFER_SIZE - raw->count >= RAW3215_MIN_SPACE) {
369 tty_wakeup(tty);
370 }
371}
372
373/*
374 * Interrupt routine, called from common io layer
375 */
376static void
377raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
378{
379 struct raw3215_info *raw;
380 struct raw3215_req *req;
381 struct tty_struct *tty;
382 int cstat, dstat;
383 int count, slen;
384
385 raw = cdev->dev.driver_data;
386 req = (struct raw3215_req *) intparm;
387 cstat = irb->scsw.cstat;
388 dstat = irb->scsw.dstat;
389 if (cstat != 0) {
390 raw->message = KERN_WARNING
391 "Got nonzero channel status in raw3215_irq "
392 "(dev sts 0x%2x, sch sts 0x%2x)";
393 raw->msg_dstat = dstat;
394 raw->msg_cstat = cstat;
395 tasklet_schedule(&raw->tasklet);
396 }
397 if (dstat & 0x01) { /* we got a unit exception */
398 dstat &= ~0x01; /* we can ignore it */
399 }
400 switch (dstat) {
401 case 0x80:
402 if (cstat != 0)
403 break;
404 /* Attention interrupt, someone hit the enter key */
405 raw3215_mk_read_req(raw);
406 if (MACHINE_IS_P390)
407 memset(raw->inbuf, 0, RAW3215_INBUF_SIZE);
408 tasklet_schedule(&raw->tasklet);
409 break;
410 case 0x08:
411 case 0x0C:
412 /* Channel end interrupt. */
413 if ((raw = req->info) == NULL)
414 return; /* That shouldn't happen ... */
415 if (req->type == RAW3215_READ) {
416 /* store residual count, then wait for device end */
417 req->residual = irb->scsw.count;
418 }
419 if (dstat == 0x08)
420 break;
421 case 0x04:
422 /* Device end interrupt. */
423 if ((raw = req->info) == NULL)
424 return; /* That shouldn't happen ... */
425 if (req->type == RAW3215_READ && raw->tty != NULL) {
426 unsigned int cchar;
427
428 tty = raw->tty;
429 count = 160 - req->residual;
430 if (MACHINE_IS_P390) {
431 slen = strnlen(raw->inbuf, RAW3215_INBUF_SIZE);
432 if (count > slen)
433 count = slen;
434 } else
435 if (count >= TTY_FLIPBUF_SIZE - tty->flip.count)
436 count = TTY_FLIPBUF_SIZE - tty->flip.count - 1;
437 EBCASC(raw->inbuf, count);
438 cchar = ctrlchar_handle(raw->inbuf, count, tty);
439 switch (cchar & CTRLCHAR_MASK) {
440 case CTRLCHAR_SYSRQ:
441 break;
442
443 case CTRLCHAR_CTRL:
444 tty->flip.count++;
445 *tty->flip.flag_buf_ptr++ = TTY_NORMAL;
446 *tty->flip.char_buf_ptr++ = cchar;
447 tty_flip_buffer_push(raw->tty);
448 break;
449
450 case CTRLCHAR_NONE:
451 memcpy(tty->flip.char_buf_ptr,
452 raw->inbuf, count);
453 if (count < 2 ||
454 (strncmp(raw->inbuf+count-2, "^n", 2) &&
455 strncmp(raw->inbuf+count-2, "\252n", 2)) ) {
456 /* don't add the auto \n */
457 tty->flip.char_buf_ptr[count] = '\n';
458 memset(tty->flip.flag_buf_ptr,
459 TTY_NORMAL, count + 1);
460 count++;
461 } else
462 count-=2;
463 tty->flip.char_buf_ptr += count;
464 tty->flip.flag_buf_ptr += count;
465 tty->flip.count += count;
466 tty_flip_buffer_push(raw->tty);
467 break;
468 }
469 } else if (req->type == RAW3215_WRITE) {
470 raw->count -= req->len;
471 raw->written -= req->len;
472 }
473 raw->flags &= ~RAW3215_WORKING;
474 raw3215_free_req(req);
475 /* check for empty wait */
476 if (waitqueue_active(&raw->empty_wait) &&
477 raw->queued_write == NULL &&
478 raw->queued_read == NULL) {
479 wake_up_interruptible(&raw->empty_wait);
480 }
481 tasklet_schedule(&raw->tasklet);
482 break;
483 default:
484 /* Strange interrupt, I'll do my best to clean up */
485 if (req != NULL && req->type != RAW3215_FREE) {
486 if (req->type == RAW3215_WRITE) {
487 raw->count -= req->len;
488 raw->written -= req->len;
489 }
490 raw->flags &= ~RAW3215_WORKING;
491 raw3215_free_req(req);
492 }
493 raw->message = KERN_WARNING
494 "Spurious interrupt in in raw3215_irq "
495 "(dev sts 0x%2x, sch sts 0x%2x)";
496 raw->msg_dstat = dstat;
497 raw->msg_cstat = cstat;
498 tasklet_schedule(&raw->tasklet);
499 }
500 return;
501}
502
503/*
504 * Wait until length bytes are available int the output buffer.
505 * Has to be called with the s390irq lock held. Can be called
506 * disabled.
507 */
508static void
509raw3215_make_room(struct raw3215_info *raw, unsigned int length)
510{
511 while (RAW3215_BUFFER_SIZE - raw->count < length) {
512 /* there might be a request pending */
513 raw->flags |= RAW3215_FLUSHING;
514 raw3215_mk_write_req(raw);
515 raw3215_try_io(raw);
516 raw->flags &= ~RAW3215_FLUSHING;
517#ifdef CONFIG_TN3215_CONSOLE
518 wait_cons_dev();
519#endif
520 /* Enough room freed up ? */
521 if (RAW3215_BUFFER_SIZE - raw->count >= length)
522 break;
523 /* there might be another cpu waiting for the lock */
524 spin_unlock(raw->lock);
525 udelay(100);
526 spin_lock(raw->lock);
527 }
528}
529
530/*
531 * String write routine for 3215 devices
532 */
533static void
534raw3215_write(struct raw3215_info *raw, const char *str, unsigned int length)
535{
536 unsigned long flags;
537 int c, count;
538
539 while (length > 0) {
540 spin_lock_irqsave(raw->lock, flags);
541 count = (length > RAW3215_BUFFER_SIZE) ?
542 RAW3215_BUFFER_SIZE : length;
543 length -= count;
544
545 raw3215_make_room(raw, count);
546
547 /* copy string to output buffer and convert it to EBCDIC */
548 while (1) {
549 c = min_t(int, count,
550 min(RAW3215_BUFFER_SIZE - raw->count,
551 RAW3215_BUFFER_SIZE - raw->head));
552 if (c <= 0)
553 break;
554 memcpy(raw->buffer + raw->head, str, c);
555 ASCEBC(raw->buffer + raw->head, c);
556 raw->head = (raw->head + c) & (RAW3215_BUFFER_SIZE - 1);
557 raw->count += c;
558 raw->line_pos += c;
559 str += c;
560 count -= c;
561 }
562 if (!(raw->flags & RAW3215_WORKING)) {
563 raw3215_mk_write_req(raw);
564 /* start or queue request */
565 raw3215_try_io(raw);
566 }
567 spin_unlock_irqrestore(raw->lock, flags);
568 }
569}
570
571/*
572 * Put character routine for 3215 devices
573 */
574static void
575raw3215_putchar(struct raw3215_info *raw, unsigned char ch)
576{
577 unsigned long flags;
578 unsigned int length, i;
579
580 spin_lock_irqsave(raw->lock, flags);
581 if (ch == '\t') {
582 length = TAB_STOP_SIZE - (raw->line_pos%TAB_STOP_SIZE);
583 raw->line_pos += length;
584 ch = ' ';
585 } else if (ch == '\n') {
586 length = 1;
587 raw->line_pos = 0;
588 } else {
589 length = 1;
590 raw->line_pos++;
591 }
592 raw3215_make_room(raw, length);
593
594 for (i = 0; i < length; i++) {
595 raw->buffer[raw->head] = (char) _ascebc[(int) ch];
596 raw->head = (raw->head + 1) & (RAW3215_BUFFER_SIZE - 1);
597 raw->count++;
598 }
599 if (!(raw->flags & RAW3215_WORKING)) {
600 raw3215_mk_write_req(raw);
601 /* start or queue request */
602 raw3215_try_io(raw);
603 }
604 spin_unlock_irqrestore(raw->lock, flags);
605}
606
607/*
608 * Flush routine, it simply sets the flush flag and tries to start
609 * pending IO.
610 */
611static void
612raw3215_flush_buffer(struct raw3215_info *raw)
613{
614 unsigned long flags;
615
616 spin_lock_irqsave(raw->lock, flags);
617 if (raw->count > 0) {
618 raw->flags |= RAW3215_FLUSHING;
619 raw3215_try_io(raw);
620 raw->flags &= ~RAW3215_FLUSHING;
621 }
622 spin_unlock_irqrestore(raw->lock, flags);
623}
624
625/*
626 * Fire up a 3215 device.
627 */
628static int
629raw3215_startup(struct raw3215_info *raw)
630{
631 unsigned long flags;
632
633 if (raw->flags & RAW3215_ACTIVE)
634 return 0;
635 raw->line_pos = 0;
636 raw->flags |= RAW3215_ACTIVE;
637 spin_lock_irqsave(raw->lock, flags);
638 raw3215_try_io(raw);
639 spin_unlock_irqrestore(raw->lock, flags);
640
641 return 0;
642}
643
644/*
645 * Shutdown a 3215 device.
646 */
647static void
648raw3215_shutdown(struct raw3215_info *raw)
649{
650 DECLARE_WAITQUEUE(wait, current);
651 unsigned long flags;
652
653 if (!(raw->flags & RAW3215_ACTIVE) || (raw->flags & RAW3215_FIXED))
654 return;
655 /* Wait for outstanding requests, then free irq */
656 spin_lock_irqsave(raw->lock, flags);
657 if ((raw->flags & RAW3215_WORKING) ||
658 raw->queued_write != NULL ||
659 raw->queued_read != NULL) {
660 raw->flags |= RAW3215_CLOSING;
661 add_wait_queue(&raw->empty_wait, &wait);
662 set_current_state(TASK_INTERRUPTIBLE);
663 spin_unlock_irqrestore(raw->lock, flags);
664 schedule();
665 spin_lock_irqsave(raw->lock, flags);
666 remove_wait_queue(&raw->empty_wait, &wait);
667 set_current_state(TASK_RUNNING);
668 raw->flags &= ~(RAW3215_ACTIVE | RAW3215_CLOSING);
669 }
670 spin_unlock_irqrestore(raw->lock, flags);
671}
672
673static int
674raw3215_probe (struct ccw_device *cdev)
675{
676 struct raw3215_info *raw;
677 int line;
678
679 raw = kmalloc(sizeof(struct raw3215_info) +
680 RAW3215_INBUF_SIZE, GFP_KERNEL|GFP_DMA);
681 if (raw == NULL)
682 return -ENOMEM;
683
684 spin_lock(&raw3215_device_lock);
685 for (line = 0; line < NR_3215; line++) {
686 if (!raw3215[line]) {
687 raw3215[line] = raw;
688 break;
689 }
690 }
691 spin_unlock(&raw3215_device_lock);
692 if (line == NR_3215) {
693 kfree(raw);
694 return -ENODEV;
695 }
696
697 raw->cdev = cdev;
698 raw->lock = get_ccwdev_lock(cdev);
699 raw->inbuf = (char *) raw + sizeof(struct raw3215_info);
700 memset(raw, 0, sizeof(struct raw3215_info));
701 raw->buffer = (char *) kmalloc(RAW3215_BUFFER_SIZE,
702 GFP_KERNEL|GFP_DMA);
703 if (raw->buffer == NULL) {
704 spin_lock(&raw3215_device_lock);
705 raw3215[line] = 0;
706 spin_unlock(&raw3215_device_lock);
707 kfree(raw);
708 return -ENOMEM;
709 }
710 tasklet_init(&raw->tasklet,
711 (void (*)(unsigned long)) raw3215_tasklet,
712 (unsigned long) raw);
713 init_waitqueue_head(&raw->empty_wait);
714
715 cdev->dev.driver_data = raw;
716 cdev->handler = raw3215_irq;
717
718 return 0;
719}
720
721static void
722raw3215_remove (struct ccw_device *cdev)
723{
724 struct raw3215_info *raw;
725
726 ccw_device_set_offline(cdev);
727 raw = cdev->dev.driver_data;
728 if (raw) {
729 cdev->dev.driver_data = NULL;
730 if (raw->buffer)
731 kfree(raw->buffer);
732 kfree(raw);
733 }
734}
735
736static int
737raw3215_set_online (struct ccw_device *cdev)
738{
739 struct raw3215_info *raw;
740
741 raw = cdev->dev.driver_data;
742 if (!raw)
743 return -ENODEV;
744
745 return raw3215_startup(raw);
746}
747
748static int
749raw3215_set_offline (struct ccw_device *cdev)
750{
751 struct raw3215_info *raw;
752
753 raw = cdev->dev.driver_data;
754 if (!raw)
755 return -ENODEV;
756
757 raw3215_shutdown(raw);
758
759 return 0;
760}
761
762static struct ccw_device_id raw3215_id[] = {
763 { CCW_DEVICE(0x3215, 0) },
764 { /* end of list */ },
765};
766
767static struct ccw_driver raw3215_ccw_driver = {
768 .name = "3215",
769 .owner = THIS_MODULE,
770 .ids = raw3215_id,
771 .probe = &raw3215_probe,
772 .remove = &raw3215_remove,
773 .set_online = &raw3215_set_online,
774 .set_offline = &raw3215_set_offline,
775};
776
777#ifdef CONFIG_TN3215_CONSOLE
778/*
779 * Write a string to the 3215 console
780 */
781static void
782con3215_write(struct console *co, const char *str, unsigned int count)
783{
784 struct raw3215_info *raw;
785 int i;
786
787 if (count <= 0)
788 return;
789 raw = raw3215[0]; /* console 3215 is the first one */
790 while (count > 0) {
791 for (i = 0; i < count; i++)
792 if (str[i] == '\t' || str[i] == '\n')
793 break;
794 raw3215_write(raw, str, i);
795 count -= i;
796 str += i;
797 if (count > 0) {
798 raw3215_putchar(raw, *str);
799 count--;
800 str++;
801 }
802 }
803}
804
805static struct tty_driver *con3215_device(struct console *c, int *index)
806{
807 *index = c->index;
808 return tty3215_driver;
809}
810
811/*
812 * panic() calls console_unblank before the system enters a
813 * disabled, endless loop.
814 */
815static void
816con3215_unblank(void)
817{
818 struct raw3215_info *raw;
819 unsigned long flags;
820
821 raw = raw3215[0]; /* console 3215 is the first one */
822 spin_lock_irqsave(raw->lock, flags);
823 raw3215_make_room(raw, RAW3215_BUFFER_SIZE);
824 spin_unlock_irqrestore(raw->lock, flags);
825}
826
827static int __init
828con3215_consetup(struct console *co, char *options)
829{
830 return 0;
831}
832
833/*
834 * The console structure for the 3215 console
835 */
836static struct console con3215 = {
837 .name = "ttyS",
838 .write = con3215_write,
839 .device = con3215_device,
840 .unblank = con3215_unblank,
841 .setup = con3215_consetup,
842 .flags = CON_PRINTBUFFER,
843};
844
845/*
846 * 3215 console initialization code called from console_init().
847 * NOTE: This is called before kmalloc is available.
848 */
849static int __init
850con3215_init(void)
851{
852 struct ccw_device *cdev;
853 struct raw3215_info *raw;
854 struct raw3215_req *req;
855 int i;
856
857 /* Check if 3215 is to be the console */
858 if (!CONSOLE_IS_3215)
859 return -ENODEV;
860
861 /* Set the console mode for VM */
862 if (MACHINE_IS_VM) {
863 cpcmd("TERM CONMODE 3215", NULL, 0);
864 cpcmd("TERM AUTOCR OFF", NULL, 0);
865 }
866
867 /* allocate 3215 request structures */
868 raw3215_freelist = NULL;
869 spin_lock_init(&raw3215_freelist_lock);
870 for (i = 0; i < NR_3215_REQ; i++) {
871 req = (struct raw3215_req *) alloc_bootmem_low(sizeof(struct raw3215_req));
872 req->next = raw3215_freelist;
873 raw3215_freelist = req;
874 }
875
876 cdev = ccw_device_probe_console();
877 if (!cdev)
878 return -ENODEV;
879
880 raw3215[0] = raw = (struct raw3215_info *)
881 alloc_bootmem_low(sizeof(struct raw3215_info));
882 memset(raw, 0, sizeof(struct raw3215_info));
883 raw->buffer = (char *) alloc_bootmem_low(RAW3215_BUFFER_SIZE);
884 raw->inbuf = (char *) alloc_bootmem_low(RAW3215_INBUF_SIZE);
885 raw->cdev = cdev;
886 raw->lock = get_ccwdev_lock(cdev);
887 cdev->dev.driver_data = raw;
888 cdev->handler = raw3215_irq;
889
890 raw->flags |= RAW3215_FIXED;
891 tasklet_init(&raw->tasklet,
892 (void (*)(unsigned long)) raw3215_tasklet,
893 (unsigned long) raw);
894 init_waitqueue_head(&raw->empty_wait);
895
896 /* Request the console irq */
897 if (raw3215_startup(raw) != 0) {
898 free_bootmem((unsigned long) raw->inbuf, RAW3215_INBUF_SIZE);
899 free_bootmem((unsigned long) raw->buffer, RAW3215_BUFFER_SIZE);
900 free_bootmem((unsigned long) raw, sizeof(struct raw3215_info));
901 raw3215[0] = NULL;
902 printk("Couldn't find a 3215 console device\n");
903 return -ENODEV;
904 }
905 register_console(&con3215);
906 return 0;
907}
908console_initcall(con3215_init);
909#endif
910
911/*
912 * tty3215_open
913 *
914 * This routine is called whenever a 3215 tty is opened.
915 */
916static int
917tty3215_open(struct tty_struct *tty, struct file * filp)
918{
919 struct raw3215_info *raw;
920 int retval, line;
921
922 line = tty->index;
923 if ((line < 0) || (line >= NR_3215))
924 return -ENODEV;
925
926 raw = raw3215[line];
927 if (raw == NULL)
928 return -ENODEV;
929
930 tty->driver_data = raw;
931 raw->tty = tty;
932
933 tty->low_latency = 0; /* don't use bottom half for pushing chars */
934 /*
935 * Start up 3215 device
936 */
937 retval = raw3215_startup(raw);
938 if (retval)
939 return retval;
940
941 return 0;
942}
943
944/*
945 * tty3215_close()
946 *
947 * This routine is called when the 3215 tty is closed. We wait
948 * for the remaining request to be completed. Then we clean up.
949 */
950static void
951tty3215_close(struct tty_struct *tty, struct file * filp)
952{
953 struct raw3215_info *raw;
954
955 raw = (struct raw3215_info *) tty->driver_data;
956 if (raw == NULL || tty->count > 1)
957 return;
958 tty->closing = 1;
959 /* Shutdown the terminal */
960 raw3215_shutdown(raw);
961 tty->closing = 0;
962 raw->tty = NULL;
963}
964
965/*
966 * Returns the amount of free space in the output buffer.
967 */
968static int
969tty3215_write_room(struct tty_struct *tty)
970{
971 struct raw3215_info *raw;
972
973 raw = (struct raw3215_info *) tty->driver_data;
974
975 /* Subtract TAB_STOP_SIZE to allow for a tab, 8 <<< 64K */
976 if ((RAW3215_BUFFER_SIZE - raw->count - TAB_STOP_SIZE) >= 0)
977 return RAW3215_BUFFER_SIZE - raw->count - TAB_STOP_SIZE;
978 else
979 return 0;
980}
981
982/*
983 * String write routine for 3215 ttys
984 */
985static int
986tty3215_write(struct tty_struct * tty,
987 const unsigned char *buf, int count)
988{
989 struct raw3215_info *raw;
990
991 if (!tty)
992 return 0;
993 raw = (struct raw3215_info *) tty->driver_data;
994 raw3215_write(raw, buf, count);
995 return count;
996}
997
998/*
999 * Put character routine for 3215 ttys
1000 */
1001static void
1002tty3215_put_char(struct tty_struct *tty, unsigned char ch)
1003{
1004 struct raw3215_info *raw;
1005
1006 if (!tty)
1007 return;
1008 raw = (struct raw3215_info *) tty->driver_data;
1009 raw3215_putchar(raw, ch);
1010}
1011
1012static void
1013tty3215_flush_chars(struct tty_struct *tty)
1014{
1015}
1016
1017/*
1018 * Returns the number of characters in the output buffer
1019 */
1020static int
1021tty3215_chars_in_buffer(struct tty_struct *tty)
1022{
1023 struct raw3215_info *raw;
1024
1025 raw = (struct raw3215_info *) tty->driver_data;
1026 return raw->count;
1027}
1028
1029static void
1030tty3215_flush_buffer(struct tty_struct *tty)
1031{
1032 struct raw3215_info *raw;
1033
1034 raw = (struct raw3215_info *) tty->driver_data;
1035 raw3215_flush_buffer(raw);
1036 tty_wakeup(tty);
1037}
1038
1039/*
1040 * Currently we don't have any io controls for 3215 ttys
1041 */
1042static int
1043tty3215_ioctl(struct tty_struct *tty, struct file * file,
1044 unsigned int cmd, unsigned long arg)
1045{
1046 if (tty->flags & (1 << TTY_IO_ERROR))
1047 return -EIO;
1048
1049 switch (cmd) {
1050 default:
1051 return -ENOIOCTLCMD;
1052 }
1053 return 0;
1054}
1055
1056/*
1057 * Disable reading from a 3215 tty
1058 */
1059static void
1060tty3215_throttle(struct tty_struct * tty)
1061{
1062 struct raw3215_info *raw;
1063
1064 raw = (struct raw3215_info *) tty->driver_data;
1065 raw->flags |= RAW3215_THROTTLED;
1066}
1067
1068/*
1069 * Enable reading from a 3215 tty
1070 */
1071static void
1072tty3215_unthrottle(struct tty_struct * tty)
1073{
1074 struct raw3215_info *raw;
1075 unsigned long flags;
1076
1077 raw = (struct raw3215_info *) tty->driver_data;
1078 if (raw->flags & RAW3215_THROTTLED) {
1079 spin_lock_irqsave(raw->lock, flags);
1080 raw->flags &= ~RAW3215_THROTTLED;
1081 raw3215_try_io(raw);
1082 spin_unlock_irqrestore(raw->lock, flags);
1083 }
1084}
1085
1086/*
1087 * Disable writing to a 3215 tty
1088 */
1089static void
1090tty3215_stop(struct tty_struct *tty)
1091{
1092 struct raw3215_info *raw;
1093
1094 raw = (struct raw3215_info *) tty->driver_data;
1095 raw->flags |= RAW3215_STOPPED;
1096}
1097
1098/*
1099 * Enable writing to a 3215 tty
1100 */
1101static void
1102tty3215_start(struct tty_struct *tty)
1103{
1104 struct raw3215_info *raw;
1105 unsigned long flags;
1106
1107 raw = (struct raw3215_info *) tty->driver_data;
1108 if (raw->flags & RAW3215_STOPPED) {
1109 spin_lock_irqsave(raw->lock, flags);
1110 raw->flags &= ~RAW3215_STOPPED;
1111 raw3215_try_io(raw);
1112 spin_unlock_irqrestore(raw->lock, flags);
1113 }
1114}
1115
1116static struct tty_operations tty3215_ops = {
1117 .open = tty3215_open,
1118 .close = tty3215_close,
1119 .write = tty3215_write,
1120 .put_char = tty3215_put_char,
1121 .flush_chars = tty3215_flush_chars,
1122 .write_room = tty3215_write_room,
1123 .chars_in_buffer = tty3215_chars_in_buffer,
1124 .flush_buffer = tty3215_flush_buffer,
1125 .ioctl = tty3215_ioctl,
1126 .throttle = tty3215_throttle,
1127 .unthrottle = tty3215_unthrottle,
1128 .stop = tty3215_stop,
1129 .start = tty3215_start,
1130};
1131
1132/*
1133 * 3215 tty registration code called from tty_init().
1134 * Most kernel services (incl. kmalloc) are available at this poimt.
1135 */
1136int __init
1137tty3215_init(void)
1138{
1139 struct tty_driver *driver;
1140 int ret;
1141
1142 if (!CONSOLE_IS_3215)
1143 return 0;
1144
1145 driver = alloc_tty_driver(NR_3215);
1146 if (!driver)
1147 return -ENOMEM;
1148
1149 ret = ccw_driver_register(&raw3215_ccw_driver);
1150 if (ret) {
1151 put_tty_driver(driver);
1152 return ret;
1153 }
1154 /*
1155 * Initialize the tty_driver structure
1156 * Entries in tty3215_driver that are NOT initialized:
1157 * proc_entry, set_termios, flush_buffer, set_ldisc, write_proc
1158 */
1159
1160 driver->owner = THIS_MODULE;
1161 driver->driver_name = "tty3215";
1162 driver->name = "ttyS";
1163 driver->major = TTY_MAJOR;
1164 driver->minor_start = 64;
1165 driver->type = TTY_DRIVER_TYPE_SYSTEM;
1166 driver->subtype = SYSTEM_TYPE_TTY;
1167 driver->init_termios = tty_std_termios;
1168 driver->init_termios.c_iflag = IGNBRK | IGNPAR;
1169 driver->init_termios.c_oflag = ONLCR | XTABS;
1170 driver->init_termios.c_lflag = ISIG;
1171 driver->flags = TTY_DRIVER_REAL_RAW;
1172 tty_set_operations(driver, &tty3215_ops);
1173 ret = tty_register_driver(driver);
1174 if (ret) {
1175 printk("Couldn't register tty3215 driver\n");
1176 put_tty_driver(driver);
1177 return ret;
1178 }
1179 tty3215_driver = driver;
1180 return 0;
1181}
1182
1183static void __exit
1184tty3215_exit(void)
1185{
1186 tty_unregister_driver(tty3215_driver);
1187 put_tty_driver(tty3215_driver);
1188 ccw_driver_unregister(&raw3215_ccw_driver);
1189}
1190
1191module_init(tty3215_init);
1192module_exit(tty3215_exit);
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
new file mode 100644
index 000000000000..d52fb57a6b19
--- /dev/null
+++ b/drivers/s390/char/con3270.c
@@ -0,0 +1,638 @@
1/*
2 * drivers/s390/char/con3270.c
3 * IBM/3270 Driver - console view.
4 *
5 * Author(s):
6 * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
7 * Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
8 * -- Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
9 */
10
11#include <linux/config.h>
12#include <linux/bootmem.h>
13#include <linux/console.h>
14#include <linux/init.h>
15#include <linux/interrupt.h>
16#include <linux/list.h>
17#include <linux/types.h>
18
19#include <asm/ccwdev.h>
20#include <asm/cio.h>
21#include <asm/cpcmd.h>
22#include <asm/ebcdic.h>
23
24#include "raw3270.h"
25#include "ctrlchar.h"
26
27#define CON3270_OUTPUT_BUFFER_SIZE 1024
28#define CON3270_STRING_PAGES 4
29
30static struct raw3270_fn con3270_fn;
31
32/*
33 * Main 3270 console view data structure.
34 */
35struct con3270 {
36 struct raw3270_view view;
37 spinlock_t lock;
38 struct list_head freemem; /* list of free memory for strings. */
39
40 /* Output stuff. */
41 struct list_head lines; /* list of lines. */
42 struct list_head update; /* list of lines to update. */
43 int line_nr; /* line number for next update. */
44 int nr_lines; /* # lines in list. */
45 int nr_up; /* # lines up in history. */
46 unsigned long update_flags; /* Update indication bits. */
47 struct string *cline; /* current output line. */
48 struct string *status; /* last line of display. */
49 struct raw3270_request *write; /* single write request. */
50 struct timer_list timer;
51
52 /* Input stuff. */
53 struct string *input; /* input string for read request. */
54 struct raw3270_request *read; /* single read request. */
55 struct raw3270_request *kreset; /* single keyboard reset request. */
56 struct tasklet_struct readlet; /* tasklet to issue read request. */
57};
58
59static struct con3270 *condev;
60
61/* con3270->update_flags. See con3270_update for details. */
62#define CON_UPDATE_ERASE 1 /* Use EWRITEA instead of WRITE. */
63#define CON_UPDATE_LIST 2 /* Update lines in tty3270->update. */
64#define CON_UPDATE_STATUS 4 /* Update status line. */
65#define CON_UPDATE_ALL 7
66
67static void con3270_update(struct con3270 *);
68
69/*
70 * Setup timeout for a device. On timeout trigger an update.
71 */
72void
73con3270_set_timer(struct con3270 *cp, int expires)
74{
75 if (expires == 0) {
76 if (timer_pending(&cp->timer))
77 del_timer(&cp->timer);
78 return;
79 }
80 if (timer_pending(&cp->timer) &&
81 mod_timer(&cp->timer, jiffies + expires))
82 return;
83 cp->timer.function = (void (*)(unsigned long)) con3270_update;
84 cp->timer.data = (unsigned long) cp;
85 cp->timer.expires = jiffies + expires;
86 add_timer(&cp->timer);
87}
88
89/*
90 * The status line is the last line of the screen. It shows the string
91 * "console view" in the lower left corner and "Running"/"More..."/"Holding"
92 * in the lower right corner of the screen.
93 */
94static void
95con3270_update_status(struct con3270 *cp)
96{
97 char *str;
98
99 str = (cp->nr_up != 0) ? "History" : "Running";
100 memcpy(cp->status->string + 24, str, 7);
101 codepage_convert(cp->view.ascebc, cp->status->string + 24, 7);
102 cp->update_flags |= CON_UPDATE_STATUS;
103}
104
105static void
106con3270_create_status(struct con3270 *cp)
107{
108 static const unsigned char blueprint[] =
109 { TO_SBA, 0, 0, TO_SF,TF_LOG,TO_SA,TAT_COLOR, TAC_GREEN,
110 'c','o','n','s','o','l','e',' ','v','i','e','w',
111 TO_RA,0,0,0,'R','u','n','n','i','n','g',TO_SF,TF_LOG };
112
113 cp->status = alloc_string(&cp->freemem, sizeof(blueprint));
114 /* Copy blueprint to status line */
115 memcpy(cp->status->string, blueprint, sizeof(blueprint));
116 /* Set TO_RA addresses. */
117 raw3270_buffer_address(cp->view.dev, cp->status->string + 1,
118 cp->view.cols * (cp->view.rows - 1));
119 raw3270_buffer_address(cp->view.dev, cp->status->string + 21,
120 cp->view.cols * cp->view.rows - 8);
121 /* Convert strings to ebcdic. */
122 codepage_convert(cp->view.ascebc, cp->status->string + 8, 12);
123 codepage_convert(cp->view.ascebc, cp->status->string + 24, 7);
124}
125
126/*
127 * Set output offsets to 3270 datastream fragment of a console string.
128 */
129static void
130con3270_update_string(struct con3270 *cp, struct string *s, int nr)
131{
132 if (s->len >= cp->view.cols - 5)
133 return;
134 raw3270_buffer_address(cp->view.dev, s->string + s->len - 3,
135 cp->view.cols * (nr + 1));
136}
137
138/*
139 * Rebuild update list to print all lines.
140 */
141static void
142con3270_rebuild_update(struct con3270 *cp)
143{
144 struct string *s, *n;
145 int nr;
146
147 /*
148 * Throw away update list and create a new one,
149 * containing all lines that will fit on the screen.
150 */
151 list_for_each_entry_safe(s, n, &cp->update, update)
152 list_del_init(&s->update);
153 nr = cp->view.rows - 2 + cp->nr_up;
154 list_for_each_entry_reverse(s, &cp->lines, list) {
155 if (nr < cp->view.rows - 1)
156 list_add(&s->update, &cp->update);
157 if (--nr < 0)
158 break;
159 }
160 cp->line_nr = 0;
161 cp->update_flags |= CON_UPDATE_LIST;
162}
163
164/*
165 * Alloc string for size bytes. Free strings from history if necessary.
166 */
167static struct string *
168con3270_alloc_string(struct con3270 *cp, size_t size)
169{
170 struct string *s, *n;
171
172 s = alloc_string(&cp->freemem, size);
173 if (s)
174 return s;
175 list_for_each_entry_safe(s, n, &cp->lines, list) {
176 list_del(&s->list);
177 if (!list_empty(&s->update))
178 list_del(&s->update);
179 cp->nr_lines--;
180 if (free_string(&cp->freemem, s) >= size)
181 break;
182 }
183 s = alloc_string(&cp->freemem, size);
184 BUG_ON(!s);
185 if (cp->nr_up != 0 && cp->nr_up + cp->view.rows > cp->nr_lines) {
186 cp->nr_up = cp->nr_lines - cp->view.rows + 1;
187 con3270_rebuild_update(cp);
188 con3270_update_status(cp);
189 }
190 return s;
191}
192
193/*
194 * Write completion callback.
195 */
196static void
197con3270_write_callback(struct raw3270_request *rq, void *data)
198{
199 raw3270_request_reset(rq);
200 xchg(&((struct con3270 *) rq->view)->write, rq);
201}
202
203/*
204 * Update console display.
205 */
206static void
207con3270_update(struct con3270 *cp)
208{
209 struct raw3270_request *wrq;
210 char wcc, prolog[6];
211 unsigned long flags;
212 unsigned long updated;
213 struct string *s, *n;
214 int rc;
215
216 wrq = xchg(&cp->write, 0);
217 if (!wrq) {
218 con3270_set_timer(cp, 1);
219 return;
220 }
221
222 spin_lock_irqsave(&cp->view.lock, flags);
223 updated = 0;
224 if (cp->update_flags & CON_UPDATE_ERASE) {
225 /* Use erase write alternate to initialize display. */
226 raw3270_request_set_cmd(wrq, TC_EWRITEA);
227 updated |= CON_UPDATE_ERASE;
228 } else
229 raw3270_request_set_cmd(wrq, TC_WRITE);
230
231 wcc = TW_NONE;
232 raw3270_request_add_data(wrq, &wcc, 1);
233
234 /*
235 * Update status line.
236 */
237 if (cp->update_flags & CON_UPDATE_STATUS)
238 if (raw3270_request_add_data(wrq, cp->status->string,
239 cp->status->len) == 0)
240 updated |= CON_UPDATE_STATUS;
241
242 if (cp->update_flags & CON_UPDATE_LIST) {
243 prolog[0] = TO_SBA;
244 prolog[3] = TO_SA;
245 prolog[4] = TAT_COLOR;
246 prolog[5] = TAC_TURQ;
247 raw3270_buffer_address(cp->view.dev, prolog + 1,
248 cp->view.cols * cp->line_nr);
249 raw3270_request_add_data(wrq, prolog, 6);
250 /* Write strings in the update list to the screen. */
251 list_for_each_entry_safe(s, n, &cp->update, update) {
252 if (s != cp->cline)
253 con3270_update_string(cp, s, cp->line_nr);
254 if (raw3270_request_add_data(wrq, s->string,
255 s->len) != 0)
256 break;
257 list_del_init(&s->update);
258 if (s != cp->cline)
259 cp->line_nr++;
260 }
261 if (list_empty(&cp->update))
262 updated |= CON_UPDATE_LIST;
263 }
264 wrq->callback = con3270_write_callback;
265 rc = raw3270_start(&cp->view, wrq);
266 if (rc == 0) {
267 cp->update_flags &= ~updated;
268 if (cp->update_flags)
269 con3270_set_timer(cp, 1);
270 } else {
271 raw3270_request_reset(wrq);
272 xchg(&cp->write, wrq);
273 }
274 spin_unlock_irqrestore(&cp->view.lock, flags);
275}
276
277/*
278 * Read tasklet.
279 */
280static void
281con3270_read_tasklet(struct raw3270_request *rrq)
282{
283 static char kreset_data = TW_KR;
284 struct con3270 *cp;
285 unsigned long flags;
286 int nr_up, deactivate;
287
288 cp = (struct con3270 *) rrq->view;
289 spin_lock_irqsave(&cp->view.lock, flags);
290 nr_up = cp->nr_up;
291 deactivate = 0;
292 /* Check aid byte. */
293 switch (cp->input->string[0]) {
294 case 0x7d: /* enter: jump to bottom. */
295 nr_up = 0;
296 break;
297 case 0xf3: /* PF3: deactivate the console view. */
298 deactivate = 1;
299 break;
300 case 0x6d: /* clear: start from scratch. */
301 con3270_rebuild_update(cp);
302 cp->update_flags = CON_UPDATE_ALL;
303 con3270_set_timer(cp, 1);
304 break;
305 case 0xf7: /* PF7: do a page up in the console log. */
306 nr_up += cp->view.rows - 2;
307 if (nr_up + cp->view.rows - 1 > cp->nr_lines) {
308 nr_up = cp->nr_lines - cp->view.rows + 1;
309 if (nr_up < 0)
310 nr_up = 0;
311 }
312 break;
313 case 0xf8: /* PF8: do a page down in the console log. */
314 nr_up -= cp->view.rows - 2;
315 if (nr_up < 0)
316 nr_up = 0;
317 break;
318 }
319 if (nr_up != cp->nr_up) {
320 cp->nr_up = nr_up;
321 con3270_rebuild_update(cp);
322 con3270_update_status(cp);
323 con3270_set_timer(cp, 1);
324 }
325 spin_unlock_irqrestore(&cp->view.lock, flags);
326
327 /* Start keyboard reset command. */
328 raw3270_request_reset(cp->kreset);
329 raw3270_request_set_cmd(cp->kreset, TC_WRITE);
330 raw3270_request_add_data(cp->kreset, &kreset_data, 1);
331 raw3270_start(&cp->view, cp->kreset);
332
333 if (deactivate)
334 raw3270_deactivate_view(&cp->view);
335
336 raw3270_request_reset(rrq);
337 xchg(&cp->read, rrq);
338 raw3270_put_view(&cp->view);
339}
340
341/*
342 * Read request completion callback.
343 */
344static void
345con3270_read_callback(struct raw3270_request *rq, void *data)
346{
347 raw3270_get_view(rq->view);
348 /* Schedule tasklet to pass input to tty. */
349 tasklet_schedule(&((struct con3270 *) rq->view)->readlet);
350}
351
352/*
353 * Issue a read request. Called only from interrupt function.
354 */
355static void
356con3270_issue_read(struct con3270 *cp)
357{
358 struct raw3270_request *rrq;
359 int rc;
360
361 rrq = xchg(&cp->read, 0);
362 if (!rrq)
363 /* Read already scheduled. */
364 return;
365 rrq->callback = con3270_read_callback;
366 rrq->callback_data = cp;
367 raw3270_request_set_cmd(rrq, TC_READMOD);
368 raw3270_request_set_data(rrq, cp->input->string, cp->input->len);
369 /* Issue the read modified request. */
370 rc = raw3270_start_irq(&cp->view, rrq);
371 if (rc)
372 raw3270_request_reset(rrq);
373}
374
375/*
376 * Switch to the console view.
377 */
378static int
379con3270_activate(struct raw3270_view *view)
380{
381 unsigned long flags;
382 struct con3270 *cp;
383
384 cp = (struct con3270 *) view;
385 spin_lock_irqsave(&cp->view.lock, flags);
386 cp->nr_up = 0;
387 con3270_rebuild_update(cp);
388 con3270_update_status(cp);
389 cp->update_flags = CON_UPDATE_ALL;
390 con3270_set_timer(cp, 1);
391 spin_unlock_irqrestore(&cp->view.lock, flags);
392 return 0;
393}
394
395static void
396con3270_deactivate(struct raw3270_view *view)
397{
398 unsigned long flags;
399 struct con3270 *cp;
400
401 cp = (struct con3270 *) view;
402 spin_lock_irqsave(&cp->view.lock, flags);
403 del_timer(&cp->timer);
404 spin_unlock_irqrestore(&cp->view.lock, flags);
405}
406
407static int
408con3270_irq(struct con3270 *cp, struct raw3270_request *rq, struct irb *irb)
409{
410 /* Handle ATTN. Schedule tasklet to read aid. */
411 if (irb->scsw.dstat & DEV_STAT_ATTENTION)
412 con3270_issue_read(cp);
413
414 if (rq) {
415 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK)
416 rq->rc = -EIO;
417 else
418 /* Normal end. Copy residual count. */
419 rq->rescnt = irb->scsw.count;
420 }
421 return RAW3270_IO_DONE;
422}
423
424/* Console view to a 3270 device. */
425static struct raw3270_fn con3270_fn = {
426 .activate = con3270_activate,
427 .deactivate = con3270_deactivate,
428 .intv = (void *) con3270_irq
429};
430
431static inline void
432con3270_cline_add(struct con3270 *cp)
433{
434 if (!list_empty(&cp->cline->list))
435 /* Already added. */
436 return;
437 list_add_tail(&cp->cline->list, &cp->lines);
438 cp->nr_lines++;
439 con3270_rebuild_update(cp);
440}
441
442static inline void
443con3270_cline_insert(struct con3270 *cp, unsigned char c)
444{
445 cp->cline->string[cp->cline->len++] =
446 cp->view.ascebc[(c < ' ') ? ' ' : c];
447 if (list_empty(&cp->cline->update)) {
448 list_add_tail(&cp->cline->update, &cp->update);
449 cp->update_flags |= CON_UPDATE_LIST;
450 }
451}
452
453static inline void
454con3270_cline_end(struct con3270 *cp)
455{
456 struct string *s;
457 unsigned int size;
458
459 /* Copy cline. */
460 size = (cp->cline->len < cp->view.cols - 5) ?
461 cp->cline->len + 4 : cp->view.cols;
462 s = con3270_alloc_string(cp, size);
463 memcpy(s->string, cp->cline->string, cp->cline->len);
464 if (s->len < cp->view.cols - 5) {
465 s->string[s->len - 4] = TO_RA;
466 s->string[s->len - 1] = 0;
467 } else {
468 while (--size > cp->cline->len)
469 s->string[size] = cp->view.ascebc[' '];
470 }
471 /* Replace cline with allocated line s and reset cline. */
472 list_add(&s->list, &cp->cline->list);
473 list_del_init(&cp->cline->list);
474 if (!list_empty(&cp->cline->update)) {
475 list_add(&s->update, &cp->cline->update);
476 list_del_init(&cp->cline->update);
477 }
478 cp->cline->len = 0;
479}
480
481/*
482 * Write a string to the 3270 console
483 */
484static void
485con3270_write(struct console *co, const char *str, unsigned int count)
486{
487 struct con3270 *cp;
488 unsigned long flags;
489 unsigned char c;
490
491 cp = condev;
492 if (cp->view.dev)
493 raw3270_activate_view(&cp->view);
494 spin_lock_irqsave(&cp->view.lock, flags);
495 while (count-- > 0) {
496 c = *str++;
497 if (cp->cline->len == 0)
498 con3270_cline_add(cp);
499 if (c != '\n')
500 con3270_cline_insert(cp, c);
501 if (c == '\n' || cp->cline->len >= cp->view.cols)
502 con3270_cline_end(cp);
503 }
504 /* Setup timer to output current console buffer after 1/10 second */
505 if (cp->view.dev && !timer_pending(&cp->timer))
506 con3270_set_timer(cp, HZ/10);
507 spin_unlock_irqrestore(&cp->view.lock,flags);
508}
509
510extern struct tty_driver *tty3270_driver;
511
512static struct tty_driver *
513con3270_device(struct console *c, int *index)
514{
515 *index = c->index;
516 return tty3270_driver;
517}
518
519/*
520 * Wait for end of write request.
521 */
522static void
523con3270_wait_write(struct con3270 *cp)
524{
525 while (!cp->write) {
526 raw3270_wait_cons_dev(cp->view.dev);
527 barrier();
528 }
529}
530
531/*
532 * panic() calls console_unblank before the system enters a
533 * disabled, endless loop.
534 */
535static void
536con3270_unblank(void)
537{
538 struct con3270 *cp;
539 unsigned long flags;
540
541 cp = condev;
542 if (!cp->view.dev)
543 return;
544 spin_lock_irqsave(&cp->view.lock, flags);
545 con3270_wait_write(cp);
546 cp->nr_up = 0;
547 con3270_rebuild_update(cp);
548 con3270_update_status(cp);
549 while (cp->update_flags != 0) {
550 spin_unlock_irqrestore(&cp->view.lock, flags);
551 con3270_update(cp);
552 spin_lock_irqsave(&cp->view.lock, flags);
553 con3270_wait_write(cp);
554 }
555 spin_unlock_irqrestore(&cp->view.lock, flags);
556}
557
558static int __init
559con3270_consetup(struct console *co, char *options)
560{
561 return 0;
562}
563
564/*
565 * The console structure for the 3270 console
566 */
567static struct console con3270 = {
568 .name = "tty3270",
569 .write = con3270_write,
570 .device = con3270_device,
571 .unblank = con3270_unblank,
572 .setup = con3270_consetup,
573 .flags = CON_PRINTBUFFER,
574};
575
576/*
577 * 3270 console initialization code called from console_init().
578 * NOTE: This is called before kmalloc is available.
579 */
580static int __init
581con3270_init(void)
582{
583 struct ccw_device *cdev;
584 struct raw3270 *rp;
585 void *cbuf;
586 int i;
587
588 /* Check if 3270 is to be the console */
589 if (!CONSOLE_IS_3270)
590 return -ENODEV;
591
592 /* Set the console mode for VM */
593 if (MACHINE_IS_VM) {
594 cpcmd("TERM CONMODE 3270", 0, 0);
595 cpcmd("TERM AUTOCR OFF", 0, 0);
596 }
597
598 cdev = ccw_device_probe_console();
599 if (!cdev)
600 return -ENODEV;
601 rp = raw3270_setup_console(cdev);
602 if (IS_ERR(rp))
603 return PTR_ERR(rp);
604
605 condev = (struct con3270 *) alloc_bootmem_low(sizeof(struct con3270));
606 memset(condev, 0, sizeof(struct con3270));
607 condev->view.dev = rp;
608
609 condev->read = raw3270_request_alloc_bootmem(0);
610 condev->read->callback = con3270_read_callback;
611 condev->read->callback_data = condev;
612 condev->write =
613 raw3270_request_alloc_bootmem(CON3270_OUTPUT_BUFFER_SIZE);
614 condev->kreset = raw3270_request_alloc_bootmem(1);
615
616 INIT_LIST_HEAD(&condev->lines);
617 INIT_LIST_HEAD(&condev->update);
618 init_timer(&condev->timer);
619 tasklet_init(&condev->readlet,
620 (void (*)(unsigned long)) con3270_read_tasklet,
621 (unsigned long) condev->read);
622
623 raw3270_add_view(&condev->view, &con3270_fn, 0);
624
625 INIT_LIST_HEAD(&condev->freemem);
626 for (i = 0; i < CON3270_STRING_PAGES; i++) {
627 cbuf = (void *) alloc_bootmem_low_pages(PAGE_SIZE);
628 add_string_memory(&condev->freemem, cbuf, PAGE_SIZE);
629 }
630 condev->cline = alloc_string(&condev->freemem, condev->view.cols);
631 condev->cline->len = 0;
632 con3270_create_status(condev);
633 condev->input = alloc_string(&condev->freemem, 80);
634 register_console(&con3270);
635 return 0;
636}
637
638console_initcall(con3270_init);
diff --git a/drivers/s390/char/ctrlchar.c b/drivers/s390/char/ctrlchar.c
new file mode 100644
index 000000000000..be463242cf0f
--- /dev/null
+++ b/drivers/s390/char/ctrlchar.c
@@ -0,0 +1,75 @@
1/*
2 * drivers/s390/char/ctrlchar.c
3 * Unified handling of special chars.
4 *
5 * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Fritz Elfert <felfert@millenux.com> <elfert@de.ibm.com>
7 *
8 */
9
10#include <linux/config.h>
11#include <linux/stddef.h>
12#include <asm/errno.h>
13#include <linux/sysrq.h>
14#include <linux/ctype.h>
15
16#include "ctrlchar.h"
17
18#ifdef CONFIG_MAGIC_SYSRQ
19static int ctrlchar_sysrq_key;
20
21static void
22ctrlchar_handle_sysrq(void *tty)
23{
24 handle_sysrq(ctrlchar_sysrq_key, NULL, (struct tty_struct *) tty);
25}
26
27static DECLARE_WORK(ctrlchar_work, ctrlchar_handle_sysrq, 0);
28#endif
29
30
31/**
32 * Check for special chars at start of input.
33 *
34 * @param buf Console input buffer.
35 * @param len Length of valid data in buffer.
36 * @param tty The tty struct for this console.
37 * @return CTRLCHAR_NONE, if nothing matched,
38 * CTRLCHAR_SYSRQ, if sysrq was encountered
39 * otherwise char to be inserted logically or'ed
40 * with CTRLCHAR_CTRL
41 */
42unsigned int
43ctrlchar_handle(const unsigned char *buf, int len, struct tty_struct *tty)
44{
45 if ((len < 2) || (len > 3))
46 return CTRLCHAR_NONE;
47
48 /* hat is 0xb1 in codepage 037 (US etc.) and thus */
49 /* converted to 0x5e in ascii ('^') */
50 if ((buf[0] != '^') && (buf[0] != '\252'))
51 return CTRLCHAR_NONE;
52
53#ifdef CONFIG_MAGIC_SYSRQ
54 /* racy */
55 if (len == 3 && buf[1] == '-') {
56 ctrlchar_sysrq_key = buf[2];
57 ctrlchar_work.data = tty;
58 schedule_work(&ctrlchar_work);
59 return CTRLCHAR_SYSRQ;
60 }
61#endif
62
63 if (len != 2)
64 return CTRLCHAR_NONE;
65
66 switch (tolower(buf[1])) {
67 case 'c':
68 return INTR_CHAR(tty) | CTRLCHAR_CTRL;
69 case 'd':
70 return EOF_CHAR(tty) | CTRLCHAR_CTRL;
71 case 'z':
72 return SUSP_CHAR(tty) | CTRLCHAR_CTRL;
73 }
74 return CTRLCHAR_NONE;
75}
diff --git a/drivers/s390/char/ctrlchar.h b/drivers/s390/char/ctrlchar.h
new file mode 100644
index 000000000000..935ffa0ea7c6
--- /dev/null
+++ b/drivers/s390/char/ctrlchar.h
@@ -0,0 +1,20 @@
1/*
2 * drivers/s390/char/ctrlchar.c
3 * Unified handling of special chars.
4 *
5 * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Fritz Elfert <felfert@millenux.com> <elfert@de.ibm.com>
7 *
8 */
9
10#include <linux/tty.h>
11
12extern unsigned int
13ctrlchar_handle(const unsigned char *buf, int len, struct tty_struct *tty);
14
15
16#define CTRLCHAR_NONE (1 << 8)
17#define CTRLCHAR_CTRL (2 << 8)
18#define CTRLCHAR_SYSRQ (3 << 8)
19
20#define CTRLCHAR_MASK (~0xffu)
diff --git a/drivers/s390/char/defkeymap.c b/drivers/s390/char/defkeymap.c
new file mode 100644
index 000000000000..ca15adb140d1
--- /dev/null
+++ b/drivers/s390/char/defkeymap.c
@@ -0,0 +1,156 @@
1
2/* Do not edit this file! It was automatically generated by */
3/* loadkeys --mktable defkeymap.map > defkeymap.c */
4
5#include <linux/types.h>
6#include <linux/keyboard.h>
7#include <linux/kd.h>
8
9u_short plain_map[NR_KEYS] = {
10 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
11 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
12 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
13 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
14 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
15 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
16 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
17 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
18 0xf020, 0xf000, 0xf0e2, 0xf0e4, 0xf0e0, 0xf0e1, 0xf0e3, 0xf0e5,
19 0xf0e7, 0xf0f1, 0xf0a2, 0xf02e, 0xf03c, 0xf028, 0xf02b, 0xf07c,
20 0xf026, 0xf0e9, 0xf0e2, 0xf0eb, 0xf0e8, 0xf0ed, 0xf0ee, 0xf0ef,
21 0xf0ec, 0xf0df, 0xf021, 0xf024, 0xf02a, 0xf029, 0xf03b, 0xf0ac,
22 0xf02d, 0xf02f, 0xf0c2, 0xf0c4, 0xf0c0, 0xf0c1, 0xf0c3, 0xf0c5,
23 0xf0c7, 0xf0d1, 0xf0a6, 0xf02c, 0xf025, 0xf05f, 0xf03e, 0xf03f,
24 0xf0f8, 0xf0c9, 0xf0ca, 0xf0cb, 0xf0c8, 0xf0cd, 0xf0ce, 0xf0cf,
25 0xf0cc, 0xf060, 0xf03a, 0xf023, 0xf040, 0xf027, 0xf03d, 0xf022,
26};
27
28static u_short shift_map[NR_KEYS] = {
29 0xf0d8, 0xf061, 0xf062, 0xf063, 0xf064, 0xf065, 0xf066, 0xf067,
30 0xf068, 0xf069, 0xf0ab, 0xf0bb, 0xf0f0, 0xf0fd, 0xf0fe, 0xf0b1,
31 0xf0b0, 0xf06a, 0xf06b, 0xf06c, 0xf06d, 0xf06e, 0xf06f, 0xf070,
32 0xf071, 0xf072, 0xf000, 0xf000, 0xf0e6, 0xf0b8, 0xf0c6, 0xf0a4,
33 0xf0b5, 0xf07e, 0xf073, 0xf074, 0xf075, 0xf076, 0xf077, 0xf078,
34 0xf079, 0xf07a, 0xf0a1, 0xf0bf, 0xf0d0, 0xf0dd, 0xf0de, 0xf0ae,
35 0xf402, 0xf0a3, 0xf0a5, 0xf0b7, 0xf0a9, 0xf0a7, 0xf0b6, 0xf0bc,
36 0xf0bd, 0xf0be, 0xf05b, 0xf05d, 0xf000, 0xf0a8, 0xf0b4, 0xf0d7,
37 0xf07b, 0xf041, 0xf042, 0xf043, 0xf044, 0xf045, 0xf046, 0xf047,
38 0xf048, 0xf049, 0xf000, 0xf0f4, 0xf0f6, 0xf0f2, 0xf0f3, 0xf0f5,
39 0xf07d, 0xf04a, 0xf04b, 0xf04c, 0xf04d, 0xf04e, 0xf04f, 0xf050,
40 0xf051, 0xf052, 0xf0b9, 0xf0fb, 0xf0fc, 0xf0f9, 0xf0fa, 0xf0ff,
41 0xf05c, 0xf0f7, 0xf053, 0xf054, 0xf055, 0xf056, 0xf057, 0xf058,
42 0xf059, 0xf05a, 0xf0b2, 0xf0d4, 0xf0d6, 0xf0d2, 0xf0d3, 0xf0d5,
43 0xf030, 0xf031, 0xf032, 0xf033, 0xf034, 0xf035, 0xf036, 0xf037,
44 0xf038, 0xf039, 0xf0b3, 0xf0db, 0xf0dc, 0xf0d9, 0xf0da, 0xf000,
45};
46
47static u_short ctrl_map[NR_KEYS] = {
48 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
49 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
50 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
51 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
52 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
53 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
54 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
55 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
56 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
57 0xf200, 0xf200, 0xf11f, 0xf120, 0xf121, 0xf200, 0xf200, 0xf200,
58 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
59 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
60 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
61 0xf200, 0xf200, 0xf200, 0xf01a, 0xf003, 0xf212, 0xf004, 0xf200,
62 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
63 0xf200, 0xf200, 0xf109, 0xf10a, 0xf206, 0xf00a, 0xf200, 0xf200,
64};
65
66static u_short shift_ctrl_map[NR_KEYS] = {
67 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
68 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
69 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
70 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
71 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
72 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
73 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
74 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
75 0xf200, 0xf10c, 0xf10d, 0xf10e, 0xf10f, 0xf110, 0xf111, 0xf112,
76 0xf113, 0xf11e, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
77 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
78 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
79 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
80 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
81 0xf200, 0xf100, 0xf101, 0xf211, 0xf103, 0xf104, 0xf105, 0xf20b,
82 0xf20a, 0xf108, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
83};
84
85ushort *key_maps[MAX_NR_KEYMAPS] = {
86 plain_map, shift_map, 0, 0,
87 ctrl_map, shift_ctrl_map, 0
88};
89
90unsigned int keymap_count = 4;
91
92
93/*
94 * Philosophy: most people do not define more strings, but they who do
95 * often want quite a lot of string space. So, we statically allocate
96 * the default and allocate dynamically in chunks of 512 bytes.
97 */
98
99char func_buf[] = {
100 '\033', '[', '[', 'A', 0,
101 '\033', '[', '[', 'B', 0,
102 '\033', '[', '[', 'C', 0,
103 '\033', '[', '[', 'D', 0,
104 '\033', '[', '[', 'E', 0,
105 '\033', '[', '1', '7', '~', 0,
106 '\033', '[', '1', '8', '~', 0,
107 '\033', '[', '1', '9', '~', 0,
108 '\033', '[', '2', '0', '~', 0,
109 '\033', '[', '2', '1', '~', 0,
110 '\033', '[', '2', '3', '~', 0,
111 '\033', '[', '2', '4', '~', 0,
112 '\033', '[', '2', '5', '~', 0,
113 '\033', '[', '2', '6', '~', 0,
114 '\033', '[', '2', '8', '~', 0,
115 '\033', '[', '2', '9', '~', 0,
116 '\033', '[', '3', '1', '~', 0,
117 '\033', '[', '3', '2', '~', 0,
118 '\033', '[', '3', '3', '~', 0,
119 '\033', '[', '3', '4', '~', 0,
120};
121
122
123char *funcbufptr = func_buf;
124int funcbufsize = sizeof(func_buf);
125int funcbufleft = 0; /* space left */
126
127char *func_table[MAX_NR_FUNC] = {
128 func_buf + 0,
129 func_buf + 5,
130 func_buf + 10,
131 func_buf + 15,
132 func_buf + 20,
133 func_buf + 25,
134 func_buf + 31,
135 func_buf + 37,
136 func_buf + 43,
137 func_buf + 49,
138 func_buf + 55,
139 func_buf + 61,
140 func_buf + 67,
141 func_buf + 73,
142 func_buf + 79,
143 func_buf + 85,
144 func_buf + 91,
145 func_buf + 97,
146 func_buf + 103,
147 func_buf + 109,
148 0,
149};
150
151struct kbdiacr accent_table[MAX_DIACR] = {
152 {'^', 'c', '\003'}, {'^', 'd', '\004'},
153 {'^', 'z', '\032'}, {'^', '\012', '\000'},
154};
155
156unsigned int accent_table_size = 4;
diff --git a/drivers/s390/char/defkeymap.map b/drivers/s390/char/defkeymap.map
new file mode 100644
index 000000000000..353b3f268824
--- /dev/null
+++ b/drivers/s390/char/defkeymap.map
@@ -0,0 +1,191 @@
1# Default keymap for 3270 (ebcdic codepage 037).
2keymaps 0-1,4-5
3
4keycode 0 = nul Oslash
5keycode 1 = nul a
6keycode 2 = nul b
7keycode 3 = nul c
8keycode 4 = nul d
9keycode 5 = nul e
10keycode 6 = nul f
11keycode 7 = nul g
12keycode 8 = nul h
13keycode 9 = nul i
14keycode 10 = nul guillemotleft
15keycode 11 = nul guillemotright
16keycode 12 = nul eth
17keycode 13 = nul yacute
18keycode 14 = nul thorn
19keycode 15 = nul plusminus
20keycode 16 = nul degree
21keycode 17 = nul j
22keycode 18 = nul k
23keycode 19 = nul l
24keycode 20 = nul m
25keycode 21 = nul n
26keycode 22 = nul o
27keycode 23 = nul p
28keycode 24 = nul q
29keycode 25 = nul r
30keycode 26 = nul nul
31keycode 27 = nul nul
32keycode 28 = nul ae
33keycode 29 = nul cedilla
34keycode 30 = nul AE
35keycode 31 = nul currency
36keycode 32 = nul mu
37keycode 33 = nul tilde
38keycode 34 = nul s
39keycode 35 = nul t
40keycode 36 = nul u
41keycode 37 = nul v
42keycode 38 = nul w
43keycode 39 = nul x
44keycode 40 = nul y
45keycode 41 = nul z
46keycode 42 = nul exclamdown
47keycode 43 = nul questiondown
48keycode 44 = nul ETH
49keycode 45 = nul Yacute
50keycode 46 = nul THORN
51keycode 47 = nul registered
52keycode 48 = nul dead_circumflex
53keycode 49 = nul sterling
54keycode 50 = nul yen
55keycode 51 = nul periodcentered
56keycode 52 = nul copyright
57keycode 53 = nul section
58keycode 54 = nul paragraph
59keycode 55 = nul onequarter
60keycode 56 = nul onehalf
61keycode 57 = nul threequarters
62keycode 58 = nul bracketleft
63keycode 59 = nul bracketright
64keycode 60 = nul nul
65keycode 61 = nul diaeresis
66keycode 62 = nul acute
67keycode 63 = nul multiply
68keycode 64 = space braceleft
69keycode 65 = nul A
70keycode 66 = acircumflex B
71keycode 67 = adiaeresis C
72keycode 68 = agrave D
73keycode 69 = aacute E
74keycode 70 = atilde F
75keycode 71 = aring G
76keycode 72 = ccedilla H
77keycode 73 = ntilde I
78keycode 74 = cent nul
79keycode 75 = period ocircumflex
80keycode 76 = less odiaeresis
81keycode 77 = parenleft ograve
82keycode 78 = plus oacute
83keycode 79 = bar otilde
84keycode 80 = ampersand braceright
85keycode 81 = eacute J
86keycode 82 = acircumflex K
87keycode 83 = ediaeresis L
88keycode 84 = egrave M
89keycode 85 = iacute N
90keycode 86 = icircumflex O
91keycode 87 = idiaeresis P
92keycode 88 = igrave Q
93keycode 89 = ssharp R
94keycode 90 = exclam onesuperior
95keycode 91 = dollar ucircumflex
96keycode 92 = asterisk udiaeresis
97keycode 93 = parenright ugrave
98keycode 94 = semicolon uacute
99keycode 95 = notsign ydiaeresis
100keycode 96 = minus backslash
101keycode 97 = slash division
102keycode 98 = Acircumflex S
103keycode 99 = Adiaeresis T
104keycode 100 = Agrave U
105keycode 101 = Aacute V
106keycode 102 = Atilde W
107keycode 103 = Aring X
108keycode 104 = Ccedilla Y
109keycode 105 = Ntilde Z
110keycode 106 = brokenbar twosuperior
111keycode 107 = comma Ocircumflex
112keycode 108 = percent Odiaeresis
113keycode 109 = underscore Ograve
114keycode 110 = greater Oacute
115keycode 111 = question Otilde
116keycode 112 = oslash zero
117keycode 113 = Eacute one
118keycode 114 = Ecircumflex two
119keycode 115 = Ediaeresis three
120keycode 116 = Egrave four
121keycode 117 = Iacute five
122keycode 118 = Icircumflex six
123keycode 119 = Idiaeresis seven
124keycode 120 = Igrave eight
125keycode 121 = grave nine
126keycode 122 = colon threesuperior
127keycode 123 = numbersign Ucircumflex
128keycode 124 = at Udiaeresis
129keycode 125 = apostrophe Ugrave
130keycode 126 = equal Uacute
131keycode 127 = quotedbl nul
132
133# AID keys
134control keycode 74 = F22
135control keycode 75 = F23
136control keycode 76 = F24
137control keycode 107 = Control_z # PA3
138control keycode 108 = Control_c # PA1
139control keycode 109 = KeyboardSignal # Clear
140control keycode 110 = Control_d # PA2
141control keycode 122 = F10
142control keycode 123 = F11 # F11
143control keycode 124 = Last_Console # F12
144control keycode 125 = Linefeed
145shift control keycode 65 = F13
146shift control keycode 66 = F14
147shift control keycode 67 = F15
148shift control keycode 68 = F16
149shift control keycode 69 = F17
150shift control keycode 70 = F18
151shift control keycode 71 = F19
152shift control keycode 72 = F20
153shift control keycode 73 = F21
154shift control keycode 113 = F1
155shift control keycode 114 = F2
156shift control keycode 115 = Incr_Console
157shift control keycode 116 = F4
158shift control keycode 117 = F5
159shift control keycode 118 = F6
160shift control keycode 119 = Scroll_Backward
161shift control keycode 120 = Scroll_Forward
162shift control keycode 121 = F9
163
164string F1 = "\033[[A"
165string F2 = "\033[[B"
166string F3 = "\033[[C"
167string F4 = "\033[[D"
168string F5 = "\033[[E"
169string F6 = "\033[17~"
170string F7 = "\033[18~"
171string F8 = "\033[19~"
172string F9 = "\033[20~"
173string F10 = "\033[21~"
174string F11 = "\033[23~"
175string F12 = "\033[24~"
176string F13 = "\033[25~"
177string F14 = "\033[26~"
178string F15 = "\033[28~"
179string F16 = "\033[29~"
180string F17 = "\033[31~"
181string F18 = "\033[32~"
182string F19 = "\033[33~"
183string F20 = "\033[34~"
184# string F21 ??
185# string F22 ??
186# string F23 ??
187# string F24 ??
188compose '^' 'c' to Control_c
189compose '^' 'd' to Control_d
190compose '^' 'z' to Control_z
191compose '^' '\012' to nul
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
new file mode 100644
index 000000000000..60afcdcf91c2
--- /dev/null
+++ b/drivers/s390/char/fs3270.c
@@ -0,0 +1,373 @@
1/*
2 * drivers/s390/char/fs3270.c
3 * IBM/3270 Driver - fullscreen driver.
4 *
5 * Author(s):
6 * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
7 * Rewritten for 2.5/2.6 by Martin Schwidefsky <schwidefsky@de.ibm.com>
8 * -- Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
9 */
10
11#include <linux/config.h>
12#include <linux/bootmem.h>
13#include <linux/console.h>
14#include <linux/init.h>
15#include <linux/interrupt.h>
16#include <linux/list.h>
17#include <linux/types.h>
18
19#include <asm/ccwdev.h>
20#include <asm/cio.h>
21#include <asm/cpcmd.h>
22#include <asm/ebcdic.h>
23#include <asm/idals.h>
24
25#include "raw3270.h"
26#include "ctrlchar.h"
27
28struct raw3270_fn fs3270_fn;
29
30struct fs3270 {
31 struct raw3270_view view;
32 pid_t fs_pid; /* Pid of controlling program. */
33 int read_command; /* ccw command to use for reads. */
34 int write_command; /* ccw command to use for writes. */
35 int attention; /* Got attention. */
36 struct raw3270_request *clear; /* single clear request. */
37 wait_queue_head_t attn_wait; /* Attention wait queue. */
38};
39
40static void
41fs3270_wake_up(struct raw3270_request *rq, void *data)
42{
43 wake_up((wait_queue_head_t *) data);
44}
45
46static int
47fs3270_do_io(struct raw3270_view *view, struct raw3270_request *rq)
48{
49 wait_queue_head_t wq;
50 int rc;
51
52 init_waitqueue_head(&wq);
53 rq->callback = fs3270_wake_up;
54 rq->callback_data = &wq;
55 rc = raw3270_start(view, rq);
56 if (rc)
57 return rc;
58 /* Started sucessfully. Now wait for completion. */
59 wait_event(wq, raw3270_request_final(rq));
60 return rq->rc;
61}
62
63static void
64fs3270_reset_callback(struct raw3270_request *rq, void *data)
65{
66 raw3270_request_reset(rq);
67}
68
69/*
70 * Switch to the fullscreen view.
71 */
72static int
73fs3270_activate(struct raw3270_view *view)
74{
75 struct fs3270 *fp;
76
77 fp = (struct fs3270 *) view;
78 raw3270_request_set_cmd(fp->clear, TC_EWRITEA);
79 fp->clear->callback = fs3270_reset_callback;
80 return raw3270_start(view, fp->clear);
81}
82
83/*
84 * Shutdown fullscreen view.
85 */
86static void
87fs3270_deactivate(struct raw3270_view *view)
88{
89 // FIXME: is this a good idea? The user program using fullscreen 3270
90 // will die just because a console message appeared. On the other
91 // hand the fullscreen device is unoperational now.
92 struct fs3270 *fp;
93
94 fp = (struct fs3270 *) view;
95 if (fp->fs_pid != 0)
96 kill_proc(fp->fs_pid, SIGHUP, 1);
97 fp->fs_pid = 0;
98}
99
100static int
101fs3270_irq(struct fs3270 *fp, struct raw3270_request *rq, struct irb *irb)
102{
103 /* Handle ATTN. Set indication and wake waiters for attention. */
104 if (irb->scsw.dstat & DEV_STAT_ATTENTION) {
105 fp->attention = 1;
106 wake_up(&fp->attn_wait);
107 }
108
109 if (rq) {
110 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK)
111 rq->rc = -EIO;
112 else
113 /* Normal end. Copy residual count. */
114 rq->rescnt = irb->scsw.count;
115 }
116 return RAW3270_IO_DONE;
117}
118
119/*
120 * Process reads from fullscreen 3270.
121 */
122static ssize_t
123fs3270_read(struct file *filp, char *data, size_t count, loff_t *off)
124{
125 struct fs3270 *fp;
126 struct raw3270_request *rq;
127 struct idal_buffer *ib;
128 int rc;
129
130 if (count == 0 || count > 65535)
131 return -EINVAL;
132 fp = filp->private_data;
133 if (!fp)
134 return -ENODEV;
135 ib = idal_buffer_alloc(count, 0);
136 if (!ib)
137 return -ENOMEM;
138 rq = raw3270_request_alloc(0);
139 if (!IS_ERR(rq)) {
140 if (fp->read_command == 0 && fp->write_command != 0)
141 fp->read_command = 6;
142 raw3270_request_set_cmd(rq, fp->read_command ? : 2);
143 raw3270_request_set_idal(rq, ib);
144 wait_event(fp->attn_wait, fp->attention);
145 rc = fs3270_do_io(&fp->view, rq);
146 if (rc == 0 && idal_buffer_to_user(ib, data, count))
147 rc = -EFAULT;
148 raw3270_request_free(rq);
149 } else
150 rc = PTR_ERR(rq);
151 idal_buffer_free(ib);
152 return rc;
153}
154
155/*
156 * Process writes to fullscreen 3270.
157 */
158static ssize_t
159fs3270_write(struct file *filp, const char *data, size_t count, loff_t *off)
160{
161 struct fs3270 *fp;
162 struct raw3270_request *rq;
163 struct idal_buffer *ib;
164 int write_command;
165 int rc;
166
167 fp = filp->private_data;
168 if (!fp)
169 return -ENODEV;
170 ib = idal_buffer_alloc(count, 0);
171 if (!ib)
172 return -ENOMEM;
173 rq = raw3270_request_alloc(0);
174 if (!IS_ERR(rq)) {
175 if (idal_buffer_from_user(ib, data, count) == 0) {
176 write_command = fp->write_command ? : 1;
177 if (write_command == 5)
178 write_command = 13;
179 raw3270_request_set_cmd(rq, write_command);
180 raw3270_request_set_idal(rq, ib);
181 rc = fs3270_do_io(&fp->view, rq);
182 } else
183 rc = -EFAULT;
184 raw3270_request_free(rq);
185 } else
186 rc = PTR_ERR(rq);
187 idal_buffer_free(ib);
188 return rc;
189}
190
191/*
192 * process ioctl commands for the tube driver
193 */
194static int
195fs3270_ioctl(struct inode *inode, struct file *filp,
196 unsigned int cmd, unsigned long arg)
197{
198 struct fs3270 *fp;
199 struct raw3270_iocb iocb;
200 int rc;
201
202 fp = filp->private_data;
203 if (!fp)
204 return -ENODEV;
205 rc = 0;
206 switch (cmd) {
207 case TUBICMD:
208 fp->read_command = arg;
209 break;
210 case TUBOCMD:
211 fp->write_command = arg;
212 break;
213 case TUBGETI:
214 rc = put_user(fp->read_command, (char *) arg);
215 break;
216 case TUBGETO:
217 rc = put_user(fp->write_command,(char *) arg);
218 break;
219 case TUBGETMOD:
220 iocb.model = fp->view.model;
221 iocb.line_cnt = fp->view.rows;
222 iocb.col_cnt = fp->view.cols;
223 iocb.pf_cnt = 24;
224 iocb.re_cnt = 20;
225 iocb.map = 0;
226 if (copy_to_user((char *) arg, &iocb,
227 sizeof(struct raw3270_iocb)))
228 rc = -EFAULT;
229 break;
230 }
231 return rc;
232}
233
234/*
235 * Allocate tty3270 structure.
236 */
237static struct fs3270 *
238fs3270_alloc_view(void)
239{
240 struct fs3270 *fp;
241
242 fp = (struct fs3270 *) kmalloc(sizeof(struct fs3270),GFP_KERNEL);
243 if (!fp)
244 return ERR_PTR(-ENOMEM);
245 memset(fp, 0, sizeof(struct fs3270));
246 fp->clear = raw3270_request_alloc(0);
247 if (!IS_ERR(fp->clear)) {
248 kfree(fp);
249 return ERR_PTR(-ENOMEM);
250 }
251 return fp;
252}
253
254/*
255 * Free tty3270 structure.
256 */
257static void
258fs3270_free_view(struct raw3270_view *view)
259{
260 raw3270_request_free(((struct fs3270 *) view)->clear);
261 kfree(view);
262}
263
264/*
265 * Unlink fs3270 data structure from filp.
266 */
267static void
268fs3270_release(struct raw3270_view *view)
269{
270}
271
272/* View to a 3270 device. Can be console, tty or fullscreen. */
273struct raw3270_fn fs3270_fn = {
274 .activate = fs3270_activate,
275 .deactivate = fs3270_deactivate,
276 .intv = (void *) fs3270_irq,
277 .release = fs3270_release,
278 .free = fs3270_free_view
279};
280
281/*
282 * This routine is called whenever a 3270 fullscreen device is opened.
283 */
284static int
285fs3270_open(struct inode *inode, struct file *filp)
286{
287 struct fs3270 *fp;
288 int minor, rc;
289
290 if (imajor(filp->f_dentry->d_inode) != IBM_FS3270_MAJOR)
291 return -ENODEV;
292 minor = iminor(filp->f_dentry->d_inode);
293 /* Check if some other program is already using fullscreen mode. */
294 fp = (struct fs3270 *) raw3270_find_view(&fs3270_fn, minor);
295 if (!IS_ERR(fp)) {
296 raw3270_put_view(&fp->view);
297 return -EBUSY;
298 }
299 /* Allocate fullscreen view structure. */
300 fp = fs3270_alloc_view();
301 if (IS_ERR(fp))
302 return PTR_ERR(fp);
303
304 init_waitqueue_head(&fp->attn_wait);
305 fp->fs_pid = current->pid;
306 rc = raw3270_add_view(&fp->view, &fs3270_fn, minor);
307 if (rc) {
308 fs3270_free_view(&fp->view);
309 return rc;
310 }
311
312 rc = raw3270_activate_view(&fp->view);
313 if (rc) {
314 raw3270_del_view(&fp->view);
315 return rc;
316 }
317 filp->private_data = fp;
318 return 0;
319}
320
321/*
322 * This routine is called when the 3270 tty is closed. We wait
323 * for the remaining request to be completed. Then we clean up.
324 */
325static int
326fs3270_close(struct inode *inode, struct file *filp)
327{
328 struct fs3270 *fp;
329
330 fp = filp->private_data;
331 filp->private_data = 0;
332 if (fp)
333 raw3270_del_view(&fp->view);
334 return 0;
335}
336
337static struct file_operations fs3270_fops = {
338 .owner = THIS_MODULE, /* owner */
339 .read = fs3270_read, /* read */
340 .write = fs3270_write, /* write */
341 .ioctl = fs3270_ioctl, /* ioctl */
342 .open = fs3270_open, /* open */
343 .release = fs3270_close, /* release */
344};
345
346/*
347 * 3270 fullscreen driver initialization.
348 */
349static int __init
350fs3270_init(void)
351{
352 int rc;
353
354 rc = register_chrdev(IBM_FS3270_MAJOR, "fs3270", &fs3270_fops);
355 if (rc) {
356 printk(KERN_ERR "fs3270 can't get major number %d: errno %d\n",
357 IBM_FS3270_MAJOR, rc);
358 return rc;
359 }
360 return 0;
361}
362
363static void __exit
364fs3270_exit(void)
365{
366 unregister_chrdev(IBM_FS3270_MAJOR, "fs3270");
367}
368
369MODULE_LICENSE("GPL");
370MODULE_ALIAS_CHARDEV_MAJOR(IBM_FS3270_MAJOR);
371
372module_init(fs3270_init);
373module_exit(fs3270_exit);
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
new file mode 100644
index 000000000000..fd43d99b45a3
--- /dev/null
+++ b/drivers/s390/char/keyboard.c
@@ -0,0 +1,519 @@
1/*
2 * drivers/s390/char/keyboard.c
3 * ebcdic keycode functions for s390 console drivers
4 *
5 * S390 version
6 * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
8 */
9
10#include <linux/config.h>
11#include <linux/module.h>
12#include <linux/sched.h>
13#include <linux/sysrq.h>
14
15#include <linux/kbd_kern.h>
16#include <linux/kbd_diacr.h>
17#include <asm/uaccess.h>
18
19#include "keyboard.h"
20
21/*
22 * Handler Tables.
23 */
24#define K_HANDLERS\
25 k_self, k_fn, k_spec, k_ignore,\
26 k_dead, k_ignore, k_ignore, k_ignore,\
27 k_ignore, k_ignore, k_ignore, k_ignore,\
28 k_ignore, k_ignore, k_ignore, k_ignore
29
30typedef void (k_handler_fn)(struct kbd_data *, unsigned char);
31static k_handler_fn K_HANDLERS;
32static k_handler_fn *k_handler[16] = { K_HANDLERS };
33
34/* maximum values each key_handler can handle */
35static const int kbd_max_vals[] = {
36 255, ARRAY_SIZE(func_table) - 1, NR_FN_HANDLER - 1, 0,
37 NR_DEAD - 1, 0, 0, 0, 0, 0, 0, 0, 0, 0
38};
39static const int KBD_NR_TYPES = ARRAY_SIZE(kbd_max_vals);
40
41static unsigned char ret_diacr[NR_DEAD] = {
42 '`', '\'', '^', '~', '"', ','
43};
44
45/*
46 * Alloc/free of kbd_data structures.
47 */
48struct kbd_data *
49kbd_alloc(void) {
50 struct kbd_data *kbd;
51 int i, len;
52
53 kbd = kmalloc(sizeof(struct kbd_data), GFP_KERNEL);
54 if (!kbd)
55 goto out;
56 memset(kbd, 0, sizeof(struct kbd_data));
57 kbd->key_maps = kmalloc(sizeof(key_maps), GFP_KERNEL);
58 if (!key_maps)
59 goto out_kbd;
60 memset(kbd->key_maps, 0, sizeof(key_maps));
61 for (i = 0; i < ARRAY_SIZE(key_maps); i++) {
62 if (key_maps[i]) {
63 kbd->key_maps[i] =
64 kmalloc(sizeof(u_short)*NR_KEYS, GFP_KERNEL);
65 if (!kbd->key_maps[i])
66 goto out_maps;
67 memcpy(kbd->key_maps[i], key_maps[i],
68 sizeof(u_short)*NR_KEYS);
69 }
70 }
71 kbd->func_table = kmalloc(sizeof(func_table), GFP_KERNEL);
72 if (!kbd->func_table)
73 goto out_maps;
74 memset(kbd->func_table, 0, sizeof(func_table));
75 for (i = 0; i < ARRAY_SIZE(func_table); i++) {
76 if (func_table[i]) {
77 len = strlen(func_table[i]) + 1;
78 kbd->func_table[i] = kmalloc(len, GFP_KERNEL);
79 if (!kbd->func_table[i])
80 goto out_func;
81 memcpy(kbd->func_table[i], func_table[i], len);
82 }
83 }
84 kbd->fn_handler =
85 kmalloc(sizeof(fn_handler_fn *) * NR_FN_HANDLER, GFP_KERNEL);
86 if (!kbd->fn_handler)
87 goto out_func;
88 memset(kbd->fn_handler, 0, sizeof(fn_handler_fn *) * NR_FN_HANDLER);
89 kbd->accent_table =
90 kmalloc(sizeof(struct kbdiacr)*MAX_DIACR, GFP_KERNEL);
91 if (!kbd->accent_table)
92 goto out_fn_handler;
93 memcpy(kbd->accent_table, accent_table,
94 sizeof(struct kbdiacr)*MAX_DIACR);
95 kbd->accent_table_size = accent_table_size;
96 return kbd;
97
98out_fn_handler:
99 kfree(kbd->fn_handler);
100out_func:
101 for (i = 0; i < ARRAY_SIZE(func_table); i++)
102 if (kbd->func_table[i])
103 kfree(kbd->func_table[i]);
104 kfree(kbd->func_table);
105out_maps:
106 for (i = 0; i < ARRAY_SIZE(key_maps); i++)
107 if (kbd->key_maps[i])
108 kfree(kbd->key_maps[i]);
109 kfree(kbd->key_maps);
110out_kbd:
111 kfree(kbd);
112out:
113 return 0;
114}
115
116void
117kbd_free(struct kbd_data *kbd)
118{
119 int i;
120
121 kfree(kbd->accent_table);
122 kfree(kbd->fn_handler);
123 for (i = 0; i < ARRAY_SIZE(func_table); i++)
124 if (kbd->func_table[i])
125 kfree(kbd->func_table[i]);
126 kfree(kbd->func_table);
127 for (i = 0; i < ARRAY_SIZE(key_maps); i++)
128 if (kbd->key_maps[i])
129 kfree(kbd->key_maps[i]);
130 kfree(kbd->key_maps);
131 kfree(kbd);
132}
133
134/*
135 * Generate ascii -> ebcdic translation table from kbd_data.
136 */
137void
138kbd_ascebc(struct kbd_data *kbd, unsigned char *ascebc)
139{
140 unsigned short *keymap, keysym;
141 int i, j, k;
142
143 memset(ascebc, 0x40, 256);
144 for (i = 0; i < ARRAY_SIZE(key_maps); i++) {
145 keymap = kbd->key_maps[i];
146 if (!keymap)
147 continue;
148 for (j = 0; j < NR_KEYS; j++) {
149 k = ((i & 1) << 7) + j;
150 keysym = keymap[j];
151 if (KTYP(keysym) == (KT_LATIN | 0xf0) ||
152 KTYP(keysym) == (KT_LETTER | 0xf0))
153 ascebc[KVAL(keysym)] = k;
154 else if (KTYP(keysym) == (KT_DEAD | 0xf0))
155 ascebc[ret_diacr[KVAL(keysym)]] = k;
156 }
157 }
158}
159
160/*
161 * Generate ebcdic -> ascii translation table from kbd_data.
162 */
163void
164kbd_ebcasc(struct kbd_data *kbd, unsigned char *ebcasc)
165{
166 unsigned short *keymap, keysym;
167 int i, j, k;
168
169 memset(ebcasc, ' ', 256);
170 for (i = 0; i < ARRAY_SIZE(key_maps); i++) {
171 keymap = kbd->key_maps[i];
172 if (!keymap)
173 continue;
174 for (j = 0; j < NR_KEYS; j++) {
175 keysym = keymap[j];
176 k = ((i & 1) << 7) + j;
177 if (KTYP(keysym) == (KT_LATIN | 0xf0) ||
178 KTYP(keysym) == (KT_LETTER | 0xf0))
179 ebcasc[k] = KVAL(keysym);
180 else if (KTYP(keysym) == (KT_DEAD | 0xf0))
181 ebcasc[k] = ret_diacr[KVAL(keysym)];
182 }
183 }
184}
185
186/*
187 * We have a combining character DIACR here, followed by the character CH.
188 * If the combination occurs in the table, return the corresponding value.
189 * Otherwise, if CH is a space or equals DIACR, return DIACR.
190 * Otherwise, conclude that DIACR was not combining after all,
191 * queue it and return CH.
192 */
193static unsigned char
194handle_diacr(struct kbd_data *kbd, unsigned char ch)
195{
196 int i, d;
197
198 d = kbd->diacr;
199 kbd->diacr = 0;
200
201 for (i = 0; i < kbd->accent_table_size; i++) {
202 if (kbd->accent_table[i].diacr == d &&
203 kbd->accent_table[i].base == ch)
204 return kbd->accent_table[i].result;
205 }
206
207 if (ch == ' ' || ch == d)
208 return d;
209
210 kbd_put_queue(kbd->tty, d);
211 return ch;
212}
213
214/*
215 * Handle dead key.
216 */
217static void
218k_dead(struct kbd_data *kbd, unsigned char value)
219{
220 value = ret_diacr[value];
221 kbd->diacr = (kbd->diacr ? handle_diacr(kbd, value) : value);
222}
223
224/*
225 * Normal character handler.
226 */
227static void
228k_self(struct kbd_data *kbd, unsigned char value)
229{
230 if (kbd->diacr)
231 value = handle_diacr(kbd, value);
232 kbd_put_queue(kbd->tty, value);
233}
234
235/*
236 * Special key handlers
237 */
238static void
239k_ignore(struct kbd_data *kbd, unsigned char value)
240{
241}
242
243/*
244 * Function key handler.
245 */
246static void
247k_fn(struct kbd_data *kbd, unsigned char value)
248{
249 if (kbd->func_table[value])
250 kbd_puts_queue(kbd->tty, kbd->func_table[value]);
251}
252
253static void
254k_spec(struct kbd_data *kbd, unsigned char value)
255{
256 if (value >= NR_FN_HANDLER)
257 return;
258 if (kbd->fn_handler[value])
259 kbd->fn_handler[value](kbd);
260}
261
262/*
263 * Put utf8 character to tty flip buffer.
264 * UTF-8 is defined for words of up to 31 bits,
265 * but we need only 16 bits here
266 */
267static void
268to_utf8(struct tty_struct *tty, ushort c)
269{
270 if (c < 0x80)
271 /* 0******* */
272 kbd_put_queue(tty, c);
273 else if (c < 0x800) {
274 /* 110***** 10****** */
275 kbd_put_queue(tty, 0xc0 | (c >> 6));
276 kbd_put_queue(tty, 0x80 | (c & 0x3f));
277 } else {
278 /* 1110**** 10****** 10****** */
279 kbd_put_queue(tty, 0xe0 | (c >> 12));
280 kbd_put_queue(tty, 0x80 | ((c >> 6) & 0x3f));
281 kbd_put_queue(tty, 0x80 | (c & 0x3f));
282 }
283}
284
285/*
286 * Process keycode.
287 */
288void
289kbd_keycode(struct kbd_data *kbd, unsigned int keycode)
290{
291 unsigned short keysym;
292 unsigned char type, value;
293
294 if (!kbd || !kbd->tty)
295 return;
296
297 if (keycode >= 384)
298 keysym = kbd->key_maps[5][keycode - 384];
299 else if (keycode >= 256)
300 keysym = kbd->key_maps[4][keycode - 256];
301 else if (keycode >= 128)
302 keysym = kbd->key_maps[1][keycode - 128];
303 else
304 keysym = kbd->key_maps[0][keycode];
305
306 type = KTYP(keysym);
307 if (type >= 0xf0) {
308 type -= 0xf0;
309 if (type == KT_LETTER)
310 type = KT_LATIN;
311 value = KVAL(keysym);
312#ifdef CONFIG_MAGIC_SYSRQ /* Handle the SysRq Hack */
313 if (kbd->sysrq) {
314 if (kbd->sysrq == K(KT_LATIN, '-')) {
315 kbd->sysrq = 0;
316 handle_sysrq(value, 0, kbd->tty);
317 return;
318 }
319 if (value == '-') {
320 kbd->sysrq = K(KT_LATIN, '-');
321 return;
322 }
323 /* Incomplete sysrq sequence. */
324 (*k_handler[KTYP(kbd->sysrq)])(kbd, KVAL(kbd->sysrq));
325 kbd->sysrq = 0;
326 } else if ((type == KT_LATIN && value == '^') ||
327 (type == KT_DEAD && ret_diacr[value] == '^')) {
328 kbd->sysrq = K(type, value);
329 return;
330 }
331#endif
332 (*k_handler[type])(kbd, value);
333 } else
334 to_utf8(kbd->tty, keysym);
335}
336
337/*
338 * Ioctl stuff.
339 */
340static int
341do_kdsk_ioctl(struct kbd_data *kbd, struct kbentry __user *user_kbe,
342 int cmd, int perm)
343{
344 struct kbentry tmp;
345 ushort *key_map, val, ov;
346
347 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
348 return -EFAULT;
349#if NR_KEYS < 256
350 if (tmp.kb_index >= NR_KEYS)
351 return -EINVAL;
352#endif
353#if MAX_NR_KEYMAPS < 256
354 if (tmp.kb_table >= MAX_NR_KEYMAPS)
355 return -EINVAL;
356#endif
357
358 switch (cmd) {
359 case KDGKBENT:
360 key_map = kbd->key_maps[tmp.kb_table];
361 if (key_map) {
362 val = U(key_map[tmp.kb_index]);
363 if (KTYP(val) >= KBD_NR_TYPES)
364 val = K_HOLE;
365 } else
366 val = (tmp.kb_index ? K_HOLE : K_NOSUCHMAP);
367 return put_user(val, &user_kbe->kb_value);
368 case KDSKBENT:
369 if (!perm)
370 return -EPERM;
371 if (!tmp.kb_index && tmp.kb_value == K_NOSUCHMAP) {
372 /* disallocate map */
373 key_map = kbd->key_maps[tmp.kb_table];
374 if (key_map) {
375 kbd->key_maps[tmp.kb_table] = 0;
376 kfree(key_map);
377 }
378 break;
379 }
380
381 if (KTYP(tmp.kb_value) >= KBD_NR_TYPES)
382 return -EINVAL;
383 if (KVAL(tmp.kb_value) > kbd_max_vals[KTYP(tmp.kb_value)])
384 return -EINVAL;
385
386 if (!(key_map = kbd->key_maps[tmp.kb_table])) {
387 int j;
388
389 key_map = (ushort *) kmalloc(sizeof(plain_map),
390 GFP_KERNEL);
391 if (!key_map)
392 return -ENOMEM;
393 kbd->key_maps[tmp.kb_table] = key_map;
394 for (j = 0; j < NR_KEYS; j++)
395 key_map[j] = U(K_HOLE);
396 }
397 ov = U(key_map[tmp.kb_index]);
398 if (tmp.kb_value == ov)
399 break; /* nothing to do */
400 /*
401 * Attention Key.
402 */
403 if (((ov == K_SAK) || (tmp.kb_value == K_SAK)) &&
404 !capable(CAP_SYS_ADMIN))
405 return -EPERM;
406 key_map[tmp.kb_index] = U(tmp.kb_value);
407 break;
408 }
409 return 0;
410}
411
412static int
413do_kdgkb_ioctl(struct kbd_data *kbd, struct kbsentry __user *u_kbs,
414 int cmd, int perm)
415{
416 unsigned char kb_func;
417 char *p;
418 int len;
419
420 /* Get u_kbs->kb_func. */
421 if (get_user(kb_func, &u_kbs->kb_func))
422 return -EFAULT;
423#if MAX_NR_FUNC < 256
424 if (kb_func >= MAX_NR_FUNC)
425 return -EINVAL;
426#endif
427
428 switch (cmd) {
429 case KDGKBSENT:
430 p = kbd->func_table[kb_func];
431 if (p) {
432 len = strlen(p);
433 if (len >= sizeof(u_kbs->kb_string))
434 len = sizeof(u_kbs->kb_string) - 1;
435 if (copy_to_user(u_kbs->kb_string, p, len))
436 return -EFAULT;
437 } else
438 len = 0;
439 if (put_user('\0', u_kbs->kb_string + len))
440 return -EFAULT;
441 break;
442 case KDSKBSENT:
443 if (!perm)
444 return -EPERM;
445 len = strnlen_user(u_kbs->kb_string,
446 sizeof(u_kbs->kb_string) - 1);
447 p = kmalloc(len, GFP_KERNEL);
448 if (!p)
449 return -ENOMEM;
450 if (copy_from_user(p, u_kbs->kb_string, len)) {
451 kfree(p);
452 return -EFAULT;
453 }
454 p[len] = 0;
455 if (kbd->func_table[kb_func])
456 kfree(kbd->func_table[kb_func]);
457 kbd->func_table[kb_func] = p;
458 break;
459 }
460 return 0;
461}
462
463int
464kbd_ioctl(struct kbd_data *kbd, struct file *file,
465 unsigned int cmd, unsigned long arg)
466{
467 struct kbdiacrs __user *a;
468 void __user *argp;
469 int ct, perm;
470
471 argp = (void __user *)arg;
472
473 /*
474 * To have permissions to do most of the vt ioctls, we either have
475 * to be the owner of the tty, or have CAP_SYS_TTY_CONFIG.
476 */
477 perm = current->signal->tty == kbd->tty || capable(CAP_SYS_TTY_CONFIG);
478 switch (cmd) {
479 case KDGKBTYPE:
480 return put_user(KB_101, (char __user *)argp);
481 case KDGKBENT:
482 case KDSKBENT:
483 return do_kdsk_ioctl(kbd, argp, cmd, perm);
484 case KDGKBSENT:
485 case KDSKBSENT:
486 return do_kdgkb_ioctl(kbd, argp, cmd, perm);
487 case KDGKBDIACR:
488 a = argp;
489
490 if (put_user(kbd->accent_table_size, &a->kb_cnt))
491 return -EFAULT;
492 ct = kbd->accent_table_size;
493 if (copy_to_user(a->kbdiacr, kbd->accent_table,
494 ct * sizeof(struct kbdiacr)))
495 return -EFAULT;
496 return 0;
497 case KDSKBDIACR:
498 a = argp;
499 if (!perm)
500 return -EPERM;
501 if (get_user(ct, &a->kb_cnt))
502 return -EFAULT;
503 if (ct >= MAX_DIACR)
504 return -EINVAL;
505 kbd->accent_table_size = ct;
506 if (copy_from_user(kbd->accent_table, a->kbdiacr,
507 ct * sizeof(struct kbdiacr)))
508 return -EFAULT;
509 return 0;
510 default:
511 return -ENOIOCTLCMD;
512 }
513}
514
515EXPORT_SYMBOL(kbd_ioctl);
516EXPORT_SYMBOL(kbd_ascebc);
517EXPORT_SYMBOL(kbd_free);
518EXPORT_SYMBOL(kbd_alloc);
519EXPORT_SYMBOL(kbd_keycode);
diff --git a/drivers/s390/char/keyboard.h b/drivers/s390/char/keyboard.h
new file mode 100644
index 000000000000..3b4da5a9cf79
--- /dev/null
+++ b/drivers/s390/char/keyboard.h
@@ -0,0 +1,57 @@
1/*
2 * drivers/s390/char/keyboard.h
3 * ebcdic keycode functions for s390 console drivers
4 *
5 * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 */
8
9#include <linux/tty.h>
10#include <linux/tty_flip.h>
11#include <linux/keyboard.h>
12
13#define NR_FN_HANDLER 20
14
15struct kbd_data;
16
17typedef void (fn_handler_fn)(struct kbd_data *);
18
19/*
20 * FIXME: explain key_maps tricks.
21 */
22
23struct kbd_data {
24 struct tty_struct *tty;
25 unsigned short **key_maps;
26 char **func_table;
27 fn_handler_fn **fn_handler;
28 struct kbdiacr *accent_table;
29 unsigned int accent_table_size;
30 unsigned char diacr;
31 unsigned short sysrq;
32};
33
34struct kbd_data *kbd_alloc(void);
35void kbd_free(struct kbd_data *);
36void kbd_ascebc(struct kbd_data *, unsigned char *);
37
38void kbd_keycode(struct kbd_data *, unsigned int);
39int kbd_ioctl(struct kbd_data *, struct file *, unsigned int, unsigned long);
40
41/*
42 * Helper Functions.
43 */
44extern inline void
45kbd_put_queue(struct tty_struct *tty, int ch)
46{
47 tty_insert_flip_char(tty, ch, 0);
48 tty_schedule_flip(tty);
49}
50
51extern inline void
52kbd_puts_queue(struct tty_struct *tty, char *cp)
53{
54 while (*cp)
55 tty_insert_flip_char(tty, *cp++, 0);
56 tty_schedule_flip(tty);
57}
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
new file mode 100644
index 000000000000..5fd3ad867386
--- /dev/null
+++ b/drivers/s390/char/monreader.c
@@ -0,0 +1,662 @@
1/*
2 * drivers/s390/char/monreader.c
3 *
4 * Character device driver for reading z/VM *MONITOR service records.
5 *
6 * Copyright (C) 2004 IBM Corporation, IBM Deutschland Entwicklung GmbH.
7 *
8 * Author: Gerald Schaefer <geraldsc@de.ibm.com>
9 */
10
11#include <linux/module.h>
12#include <linux/moduleparam.h>
13#include <linux/init.h>
14#include <linux/errno.h>
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/miscdevice.h>
18#include <linux/ctype.h>
19#include <linux/spinlock.h>
20#include <linux/interrupt.h>
21#include <asm/uaccess.h>
22#include <asm/ebcdic.h>
23#include <asm/extmem.h>
24#include <linux/poll.h>
25#include "../net/iucv.h"
26
27
28//#define MON_DEBUG /* Debug messages on/off */
29
30#define MON_NAME "monreader"
31
32#define P_INFO(x...) printk(KERN_INFO MON_NAME " info: " x)
33#define P_ERROR(x...) printk(KERN_ERR MON_NAME " error: " x)
34#define P_WARNING(x...) printk(KERN_WARNING MON_NAME " warning: " x)
35
36#ifdef MON_DEBUG
37#define P_DEBUG(x...) printk(KERN_DEBUG MON_NAME " debug: " x)
38#else
39#define P_DEBUG(x...) do {} while (0)
40#endif
41
42#define MON_COLLECT_SAMPLE 0x80
43#define MON_COLLECT_EVENT 0x40
44#define MON_SERVICE "*MONITOR"
45#define MON_IN_USE 0x01
46#define MON_MSGLIM 255
47
48static char mon_dcss_name[9] = "MONDCSS\0";
49
50struct mon_msg {
51 u32 pos;
52 u32 mca_offset;
53 iucv_MessagePending local_eib;
54 char msglim_reached;
55 char replied_msglim;
56};
57
58struct mon_private {
59 u16 pathid;
60 iucv_handle_t iucv_handle;
61 struct mon_msg *msg_array[MON_MSGLIM];
62 unsigned int write_index;
63 unsigned int read_index;
64 atomic_t msglim_count;
65 atomic_t read_ready;
66 atomic_t iucv_connected;
67 atomic_t iucv_severed;
68};
69
70static unsigned long mon_in_use = 0;
71
72static unsigned long mon_dcss_start;
73static unsigned long mon_dcss_end;
74
75static DECLARE_WAIT_QUEUE_HEAD(mon_read_wait_queue);
76static DECLARE_WAIT_QUEUE_HEAD(mon_conn_wait_queue);
77
78static u8 iucv_host[8] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
79
80static u8 user_data_connect[16] = {
81 /* Version code, must be 0x01 for shared mode */
82 0x01,
83 /* what to collect */
84 MON_COLLECT_SAMPLE | MON_COLLECT_EVENT,
85 /* DCSS name in EBCDIC, 8 bytes padded with blanks */
86 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
87 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
88};
89
90static u8 user_data_sever[16] = {
91 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
92 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
93};
94
95
96/******************************************************************************
97 * helper functions *
98 *****************************************************************************/
99/*
100 * Create the 8 bytes EBCDIC DCSS segment name from
101 * an ASCII name, incl. padding
102 */
103static inline void
104dcss_mkname(char *ascii_name, char *ebcdic_name)
105{
106 int i;
107
108 for (i = 0; i < 8; i++) {
109 if (ascii_name[i] == '\0')
110 break;
111 ebcdic_name[i] = toupper(ascii_name[i]);
112 };
113 for (; i < 8; i++)
114 ebcdic_name[i] = ' ';
115 ASCEBC(ebcdic_name, 8);
116}
117
118/*
119 * print appropriate error message for segment_load()/segment_type()
120 * return code
121 */
122static void
123mon_segment_warn(int rc, char* seg_name)
124{
125 switch (rc) {
126 case -ENOENT:
127 P_WARNING("cannot load/query segment %s, does not exist\n",
128 seg_name);
129 break;
130 case -ENOSYS:
131 P_WARNING("cannot load/query segment %s, not running on VM\n",
132 seg_name);
133 break;
134 case -EIO:
135 P_WARNING("cannot load/query segment %s, hardware error\n",
136 seg_name);
137 break;
138 case -ENOTSUPP:
139 P_WARNING("cannot load/query segment %s, is a multi-part "
140 "segment\n", seg_name);
141 break;
142 case -ENOSPC:
143 P_WARNING("cannot load/query segment %s, overlaps with "
144 "storage\n", seg_name);
145 break;
146 case -EBUSY:
147 P_WARNING("cannot load/query segment %s, overlaps with "
148 "already loaded dcss\n", seg_name);
149 break;
150 case -EPERM:
151 P_WARNING("cannot load/query segment %s, already loaded in "
152 "incompatible mode\n", seg_name);
153 break;
154 case -ENOMEM:
155 P_WARNING("cannot load/query segment %s, out of memory\n",
156 seg_name);
157 break;
158 case -ERANGE:
159 P_WARNING("cannot load/query segment %s, exceeds kernel "
160 "mapping range\n", seg_name);
161 break;
162 default:
163 P_WARNING("cannot load/query segment %s, return value %i\n",
164 seg_name, rc);
165 break;
166 }
167}
168
169static inline unsigned long
170mon_mca_start(struct mon_msg *monmsg)
171{
172 return monmsg->local_eib.ln1msg1.iprmmsg1_u32;
173}
174
175static inline unsigned long
176mon_mca_end(struct mon_msg *monmsg)
177{
178 return monmsg->local_eib.ln1msg2.ipbfln1f;
179}
180
181static inline u8
182mon_mca_type(struct mon_msg *monmsg, u8 index)
183{
184 return *((u8 *) mon_mca_start(monmsg) + monmsg->mca_offset + index);
185}
186
187static inline u32
188mon_mca_size(struct mon_msg *monmsg)
189{
190 return mon_mca_end(monmsg) - mon_mca_start(monmsg) + 1;
191}
192
193static inline u32
194mon_rec_start(struct mon_msg *monmsg)
195{
196 return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 4));
197}
198
199static inline u32
200mon_rec_end(struct mon_msg *monmsg)
201{
202 return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 8));
203}
204
205static inline int
206mon_check_mca(struct mon_msg *monmsg)
207{
208 if ((mon_rec_end(monmsg) <= mon_rec_start(monmsg)) ||
209 (mon_rec_start(monmsg) < mon_dcss_start) ||
210 (mon_rec_end(monmsg) > mon_dcss_end) ||
211 (mon_mca_type(monmsg, 0) == 0) ||
212 (mon_mca_size(monmsg) % 12 != 0) ||
213 (mon_mca_end(monmsg) <= mon_mca_start(monmsg)) ||
214 (mon_mca_end(monmsg) > mon_dcss_end) ||
215 (mon_mca_start(monmsg) < mon_dcss_start) ||
216 ((mon_mca_type(monmsg, 1) == 0) && (mon_mca_type(monmsg, 2) == 0)))
217 {
218 P_DEBUG("READ, IGNORED INVALID MCA\n\n");
219 return -EINVAL;
220 }
221 return 0;
222}
223
224static inline int
225mon_send_reply(struct mon_msg *monmsg, struct mon_private *monpriv)
226{
227 u8 prmmsg[8];
228 int rc;
229
230 P_DEBUG("read, REPLY: pathid = 0x%04X, msgid = 0x%08X, trgcls = "
231 "0x%08X\n\n",
232 monmsg->local_eib.ippathid, monmsg->local_eib.ipmsgid,
233 monmsg->local_eib.iptrgcls);
234 rc = iucv_reply_prmmsg(monmsg->local_eib.ippathid,
235 monmsg->local_eib.ipmsgid,
236 monmsg->local_eib.iptrgcls,
237 0, prmmsg);
238 atomic_dec(&monpriv->msglim_count);
239 if (likely(!monmsg->msglim_reached)) {
240 monmsg->pos = 0;
241 monmsg->mca_offset = 0;
242 monpriv->read_index = (monpriv->read_index + 1) %
243 MON_MSGLIM;
244 atomic_dec(&monpriv->read_ready);
245 } else
246 monmsg->replied_msglim = 1;
247 if (rc) {
248 P_ERROR("read, IUCV reply failed with rc = %i\n\n", rc);
249 return -EIO;
250 }
251 return 0;
252}
253
254static inline struct mon_private *
255mon_alloc_mem(void)
256{
257 int i,j;
258 struct mon_private *monpriv;
259
260 monpriv = kmalloc(sizeof(struct mon_private), GFP_KERNEL);
261 if (!monpriv) {
262 P_ERROR("no memory for monpriv\n");
263 return NULL;
264 }
265 memset(monpriv, 0, sizeof(struct mon_private));
266 for (i = 0; i < MON_MSGLIM; i++) {
267 monpriv->msg_array[i] = kmalloc(sizeof(struct mon_msg),
268 GFP_KERNEL);
269 if (!monpriv->msg_array[i]) {
270 P_ERROR("open, no memory for msg_array\n");
271 for (j = 0; j < i; j++)
272 kfree(monpriv->msg_array[j]);
273 return NULL;
274 }
275 memset(monpriv->msg_array[i], 0, sizeof(struct mon_msg));
276 }
277 return monpriv;
278}
279
280static inline void
281mon_read_debug(struct mon_msg *monmsg, struct mon_private *monpriv)
282{
283#ifdef MON_DEBUG
284 u8 msg_type[2], mca_type;
285 unsigned long records_len;
286
287 records_len = mon_rec_end(monmsg) - mon_rec_start(monmsg) + 1;
288
289 memcpy(msg_type, &monmsg->local_eib.iptrgcls, 2);
290 EBCASC(msg_type, 2);
291 mca_type = mon_mca_type(monmsg, 0);
292 EBCASC(&mca_type, 1);
293
294 P_DEBUG("read, mon_read_index = %i, mon_write_index = %i\n",
295 monpriv->read_index, monpriv->write_index);
296 P_DEBUG("read, pathid = 0x%04X, msgid = 0x%08X, trgcls = 0x%08X\n",
297 monmsg->local_eib.ippathid, monmsg->local_eib.ipmsgid,
298 monmsg->local_eib.iptrgcls);
299 P_DEBUG("read, msg_type = '%c%c', mca_type = '%c' / 0x%X / 0x%X\n",
300 msg_type[0], msg_type[1], mca_type ? mca_type : 'X',
301 mon_mca_type(monmsg, 1), mon_mca_type(monmsg, 2));
302 P_DEBUG("read, MCA: start = 0x%lX, end = 0x%lX\n",
303 mon_mca_start(monmsg), mon_mca_end(monmsg));
304 P_DEBUG("read, REC: start = 0x%X, end = 0x%X, len = %lu\n\n",
305 mon_rec_start(monmsg), mon_rec_end(monmsg), records_len);
306 if (mon_mca_size(monmsg) > 12)
307 P_DEBUG("READ, MORE THAN ONE MCA\n\n");
308#endif
309}
310
311static inline void
312mon_next_mca(struct mon_msg *monmsg)
313{
314 if (likely((mon_mca_size(monmsg) - monmsg->mca_offset) == 12))
315 return;
316 P_DEBUG("READ, NEXT MCA\n\n");
317 monmsg->mca_offset += 12;
318 monmsg->pos = 0;
319}
320
321static inline struct mon_msg *
322mon_next_message(struct mon_private *monpriv)
323{
324 struct mon_msg *monmsg;
325
326 if (!atomic_read(&monpriv->read_ready))
327 return NULL;
328 monmsg = monpriv->msg_array[monpriv->read_index];
329 if (unlikely(monmsg->replied_msglim)) {
330 monmsg->replied_msglim = 0;
331 monmsg->msglim_reached = 0;
332 monmsg->pos = 0;
333 monmsg->mca_offset = 0;
334 P_WARNING("read, message limit reached\n");
335 monpriv->read_index = (monpriv->read_index + 1) %
336 MON_MSGLIM;
337 atomic_dec(&monpriv->read_ready);
338 return ERR_PTR(-EOVERFLOW);
339 }
340 return monmsg;
341}
342
343
344/******************************************************************************
345 * IUCV handler *
346 *****************************************************************************/
347static void
348mon_iucv_ConnectionComplete(iucv_ConnectionComplete *eib, void *pgm_data)
349{
350 struct mon_private *monpriv = (struct mon_private *) pgm_data;
351
352 P_DEBUG("IUCV connection completed\n");
353 P_DEBUG("IUCV ACCEPT (from *MONITOR): Version = 0x%02X, Event = "
354 "0x%02X, Sample = 0x%02X\n",
355 eib->ipuser[0], eib->ipuser[1], eib->ipuser[2]);
356 atomic_set(&monpriv->iucv_connected, 1);
357 wake_up(&mon_conn_wait_queue);
358}
359
360static void
361mon_iucv_ConnectionSevered(iucv_ConnectionSevered *eib, void *pgm_data)
362{
363 struct mon_private *monpriv = (struct mon_private *) pgm_data;
364
365 P_ERROR("IUCV connection severed with rc = 0x%X\n",
366 (u8) eib->ipuser[0]);
367 atomic_set(&monpriv->iucv_severed, 1);
368 wake_up(&mon_conn_wait_queue);
369 wake_up_interruptible(&mon_read_wait_queue);
370}
371
372static void
373mon_iucv_MessagePending(iucv_MessagePending *eib, void *pgm_data)
374{
375 struct mon_private *monpriv = (struct mon_private *) pgm_data;
376
377 P_DEBUG("IUCV message pending\n");
378 memcpy(&monpriv->msg_array[monpriv->write_index]->local_eib, eib,
379 sizeof(iucv_MessagePending));
380 if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) {
381 P_WARNING("IUCV message pending, message limit (%i) reached\n",
382 MON_MSGLIM);
383 monpriv->msg_array[monpriv->write_index]->msglim_reached = 1;
384 }
385 monpriv->write_index = (monpriv->write_index + 1) % MON_MSGLIM;
386 atomic_inc(&monpriv->read_ready);
387 wake_up_interruptible(&mon_read_wait_queue);
388}
389
390static iucv_interrupt_ops_t mon_iucvops = {
391 .ConnectionComplete = mon_iucv_ConnectionComplete,
392 .ConnectionSevered = mon_iucv_ConnectionSevered,
393 .MessagePending = mon_iucv_MessagePending,
394};
395
396/******************************************************************************
397 * file operations *
398 *****************************************************************************/
399static int
400mon_open(struct inode *inode, struct file *filp)
401{
402 int rc, i;
403 struct mon_private *monpriv;
404
405 /*
406 * only one user allowed
407 */
408 if (test_and_set_bit(MON_IN_USE, &mon_in_use))
409 return -EBUSY;
410
411 monpriv = mon_alloc_mem();
412 if (!monpriv)
413 return -ENOMEM;
414
415 /*
416 * Register with IUCV and connect to *MONITOR service
417 */
418 monpriv->iucv_handle = iucv_register_program("my_monreader ",
419 MON_SERVICE,
420 NULL,
421 &mon_iucvops,
422 monpriv);
423 if (!monpriv->iucv_handle) {
424 P_ERROR("failed to register with iucv driver\n");
425 rc = -EIO;
426 goto out_error;
427 }
428 P_INFO("open, registered with IUCV\n");
429
430 rc = iucv_connect(&monpriv->pathid, MON_MSGLIM, user_data_connect,
431 MON_SERVICE, iucv_host, IPRMDATA, NULL, NULL,
432 monpriv->iucv_handle, NULL);
433 if (rc) {
434 P_ERROR("iucv connection to *MONITOR failed with "
435 "IPUSER SEVER code = %i\n", rc);
436 rc = -EIO;
437 goto out_unregister;
438 }
439 /*
440 * Wait for connection confirmation
441 */
442 wait_event(mon_conn_wait_queue,
443 atomic_read(&monpriv->iucv_connected) ||
444 atomic_read(&monpriv->iucv_severed));
445 if (atomic_read(&monpriv->iucv_severed)) {
446 atomic_set(&monpriv->iucv_severed, 0);
447 atomic_set(&monpriv->iucv_connected, 0);
448 rc = -EIO;
449 goto out_unregister;
450 }
451 P_INFO("open, established connection to *MONITOR service\n\n");
452 filp->private_data = monpriv;
453 return nonseekable_open(inode, filp);
454
455out_unregister:
456 iucv_unregister_program(monpriv->iucv_handle);
457out_error:
458 for (i = 0; i < MON_MSGLIM; i++)
459 kfree(monpriv->msg_array[i]);
460 kfree(monpriv);
461 clear_bit(MON_IN_USE, &mon_in_use);
462 return rc;
463}
464
465static int
466mon_close(struct inode *inode, struct file *filp)
467{
468 int rc, i;
469 struct mon_private *monpriv = filp->private_data;
470
471 /*
472 * Close IUCV connection and unregister
473 */
474 rc = iucv_sever(monpriv->pathid, user_data_sever);
475 if (rc)
476 P_ERROR("close, iucv_sever failed with rc = %i\n", rc);
477 else
478 P_INFO("close, terminated connection to *MONITOR service\n");
479
480 rc = iucv_unregister_program(monpriv->iucv_handle);
481 if (rc)
482 P_ERROR("close, iucv_unregister failed with rc = %i\n", rc);
483 else
484 P_INFO("close, unregistered with IUCV\n");
485
486 atomic_set(&monpriv->iucv_severed, 0);
487 atomic_set(&monpriv->iucv_connected, 0);
488 atomic_set(&monpriv->read_ready, 0);
489 atomic_set(&monpriv->msglim_count, 0);
490 monpriv->write_index = 0;
491 monpriv->read_index = 0;
492
493 for (i = 0; i < MON_MSGLIM; i++)
494 kfree(monpriv->msg_array[i]);
495 kfree(monpriv);
496 clear_bit(MON_IN_USE, &mon_in_use);
497 return 0;
498}
499
500static ssize_t
501mon_read(struct file *filp, char __user *data, size_t count, loff_t *ppos)
502{
503 struct mon_private *monpriv = filp->private_data;
504 struct mon_msg *monmsg;
505 int ret;
506 u32 mce_start;
507
508 monmsg = mon_next_message(monpriv);
509 if (IS_ERR(monmsg))
510 return PTR_ERR(monmsg);
511
512 if (!monmsg) {
513 if (filp->f_flags & O_NONBLOCK)
514 return -EAGAIN;
515 ret = wait_event_interruptible(mon_read_wait_queue,
516 atomic_read(&monpriv->read_ready) ||
517 atomic_read(&monpriv->iucv_severed));
518 if (ret)
519 return ret;
520 if (unlikely(atomic_read(&monpriv->iucv_severed)))
521 return -EIO;
522 monmsg = monpriv->msg_array[monpriv->read_index];
523 }
524
525 if (!monmsg->pos) {
526 monmsg->pos = mon_mca_start(monmsg) + monmsg->mca_offset;
527 mon_read_debug(monmsg, monpriv);
528 }
529 if (mon_check_mca(monmsg))
530 goto reply;
531
532 /* read monitor control element (12 bytes) first */
533 mce_start = mon_mca_start(monmsg) + monmsg->mca_offset;
534 if ((monmsg->pos >= mce_start) && (monmsg->pos < mce_start + 12)) {
535 count = min(count, (size_t) mce_start + 12 - monmsg->pos);
536 ret = copy_to_user(data, (void *) (unsigned long) monmsg->pos,
537 count);
538 if (ret)
539 return -EFAULT;
540 monmsg->pos += count;
541 if (monmsg->pos == mce_start + 12)
542 monmsg->pos = mon_rec_start(monmsg);
543 goto out_copy;
544 }
545
546 /* read records */
547 if (monmsg->pos <= mon_rec_end(monmsg)) {
548 count = min(count, (size_t) mon_rec_end(monmsg) - monmsg->pos
549 + 1);
550 ret = copy_to_user(data, (void *) (unsigned long) monmsg->pos,
551 count);
552 if (ret)
553 return -EFAULT;
554 monmsg->pos += count;
555 if (monmsg->pos > mon_rec_end(monmsg))
556 mon_next_mca(monmsg);
557 goto out_copy;
558 }
559reply:
560 ret = mon_send_reply(monmsg, monpriv);
561 return ret;
562
563out_copy:
564 *ppos += count;
565 return count;
566}
567
568static unsigned int
569mon_poll(struct file *filp, struct poll_table_struct *p)
570{
571 struct mon_private *monpriv = filp->private_data;
572
573 poll_wait(filp, &mon_read_wait_queue, p);
574 if (unlikely(atomic_read(&monpriv->iucv_severed)))
575 return POLLERR;
576 if (atomic_read(&monpriv->read_ready))
577 return POLLIN | POLLRDNORM;
578 return 0;
579}
580
581static struct file_operations mon_fops = {
582 .owner = THIS_MODULE,
583 .open = &mon_open,
584 .release = &mon_close,
585 .read = &mon_read,
586 .poll = &mon_poll,
587};
588
589static struct miscdevice mon_dev = {
590 .name = "monreader",
591 .devfs_name = "monreader",
592 .fops = &mon_fops,
593 .minor = MISC_DYNAMIC_MINOR,
594};
595
596/******************************************************************************
597 * module init/exit *
598 *****************************************************************************/
599static int __init
600mon_init(void)
601{
602 int rc;
603
604 if (!MACHINE_IS_VM) {
605 P_ERROR("not running under z/VM, driver not loaded\n");
606 return -ENODEV;
607 }
608
609 rc = segment_type(mon_dcss_name);
610 if (rc < 0) {
611 mon_segment_warn(rc, mon_dcss_name);
612 return rc;
613 }
614 if (rc != SEG_TYPE_SC) {
615 P_ERROR("segment %s has unsupported type, should be SC\n",
616 mon_dcss_name);
617 return -EINVAL;
618 }
619
620 rc = segment_load(mon_dcss_name, SEGMENT_SHARED,
621 &mon_dcss_start, &mon_dcss_end);
622 if (rc < 0) {
623 mon_segment_warn(rc, mon_dcss_name);
624 return -EINVAL;
625 }
626 dcss_mkname(mon_dcss_name, &user_data_connect[8]);
627
628 rc = misc_register(&mon_dev);
629 if (rc < 0 ) {
630 P_ERROR("misc_register failed, rc = %i\n", rc);
631 goto out;
632 }
633 P_INFO("Loaded segment %s from %p to %p, size = %lu Byte\n",
634 mon_dcss_name, (void *) mon_dcss_start, (void *) mon_dcss_end,
635 mon_dcss_end - mon_dcss_start + 1);
636 return 0;
637
638out:
639 segment_unload(mon_dcss_name);
640 return rc;
641}
642
643static void __exit
644mon_exit(void)
645{
646 segment_unload(mon_dcss_name);
647 WARN_ON(misc_deregister(&mon_dev) != 0);
648 return;
649}
650
651
652module_init(mon_init);
653module_exit(mon_exit);
654
655module_param_string(mondcss, mon_dcss_name, 9, 0444);
656MODULE_PARM_DESC(mondcss, "Name of DCSS segment to be used for *MONITOR "
657 "service, max. 8 chars. Default is MONDCSS");
658
659MODULE_AUTHOR("Gerald Schaefer <geraldsc@de.ibm.com>");
660MODULE_DESCRIPTION("Character device driver for reading z/VM "
661 "monitor service records.");
662MODULE_LICENSE("GPL");
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
new file mode 100644
index 000000000000..8e16a9716686
--- /dev/null
+++ b/drivers/s390/char/raw3270.c
@@ -0,0 +1,1335 @@
1/*
2 * drivers/s390/char/raw3270.c
3 * IBM/3270 Driver - core functions.
4 *
5 * Author(s):
6 * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
7 * Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
8 * -- Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
9 */
10
11#include <linux/config.h>
12#include <linux/bootmem.h>
13#include <linux/module.h>
14#include <linux/err.h>
15#include <linux/init.h>
16#include <linux/interrupt.h>
17#include <linux/list.h>
18#include <linux/slab.h>
19#include <linux/types.h>
20#include <linux/wait.h>
21
22#include <asm/ccwdev.h>
23#include <asm/cio.h>
24#include <asm/ebcdic.h>
25
26#include "raw3270.h"
27
28/* The main 3270 data structure. */
29struct raw3270 {
30 struct list_head list;
31 struct ccw_device *cdev;
32 int minor;
33
34 short model, rows, cols;
35 unsigned long flags;
36
37 struct list_head req_queue; /* Request queue. */
38 struct list_head view_list; /* List of available views. */
39 struct raw3270_view *view; /* Active view. */
40
41 struct timer_list timer; /* Device timer. */
42
43 unsigned char *ascebc; /* ascii -> ebcdic table */
44};
45
46/* raw3270->flags */
47#define RAW3270_FLAGS_14BITADDR 0 /* 14-bit buffer addresses */
48#define RAW3270_FLAGS_BUSY 1 /* Device busy, leave it alone */
49#define RAW3270_FLAGS_ATTN 2 /* Device sent an ATTN interrupt */
50#define RAW3270_FLAGS_READY 4 /* Device is useable by views */
51#define RAW3270_FLAGS_CONSOLE 8 /* Device is the console. */
52
53/* Semaphore to protect global data of raw3270 (devices, views, etc). */
54static DECLARE_MUTEX(raw3270_sem);
55
56/* List of 3270 devices. */
57static struct list_head raw3270_devices = LIST_HEAD_INIT(raw3270_devices);
58
59/*
60 * Flag to indicate if the driver has been registered. Some operations
61 * like waiting for the end of i/o need to be done differently as long
62 * as the kernel is still starting up (console support).
63 */
64static int raw3270_registered;
65
66/* Module parameters */
67static int tubxcorrect = 0;
68module_param(tubxcorrect, bool, 0);
69
70/*
71 * Wait queue for device init/delete, view delete.
72 */
73DECLARE_WAIT_QUEUE_HEAD(raw3270_wait_queue);
74
75/*
76 * Encode array for 12 bit 3270 addresses.
77 */
78unsigned char raw3270_ebcgraf[64] = {
79 0x40, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
80 0xc8, 0xc9, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
81 0x50, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
82 0xd8, 0xd9, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
83 0x60, 0x61, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
84 0xe8, 0xe9, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
85 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
86 0xf8, 0xf9, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f
87};
88
89void
90raw3270_buffer_address(struct raw3270 *rp, char *cp, unsigned short addr)
91{
92 if (test_bit(RAW3270_FLAGS_14BITADDR, &rp->flags)) {
93 cp[0] = (addr >> 8) & 0x3f;
94 cp[1] = addr & 0xff;
95 } else {
96 cp[0] = raw3270_ebcgraf[(addr >> 6) & 0x3f];
97 cp[1] = raw3270_ebcgraf[addr & 0x3f];
98 }
99}
100
101/*
102 * Allocate a new 3270 ccw request
103 */
104struct raw3270_request *
105raw3270_request_alloc(size_t size)
106{
107 struct raw3270_request *rq;
108
109 /* Allocate request structure */
110 rq = kmalloc(sizeof(struct raw3270_request), GFP_KERNEL | GFP_DMA);
111 if (!rq)
112 return ERR_PTR(-ENOMEM);
113 memset(rq, 0, sizeof(struct raw3270_request));
114
115 /* alloc output buffer. */
116 if (size > 0) {
117 rq->buffer = kmalloc(size, GFP_KERNEL | GFP_DMA);
118 if (!rq->buffer) {
119 kfree(rq);
120 return ERR_PTR(-ENOMEM);
121 }
122 }
123 rq->size = size;
124 INIT_LIST_HEAD(&rq->list);
125
126 /*
127 * Setup ccw.
128 */
129 rq->ccw.cda = __pa(rq->buffer);
130 rq->ccw.flags = CCW_FLAG_SLI;
131
132 return rq;
133}
134
135#ifdef CONFIG_TN3270_CONSOLE
136/*
137 * Allocate a new 3270 ccw request from bootmem. Only works very
138 * early in the boot process. Only con3270.c should be using this.
139 */
140struct raw3270_request *
141raw3270_request_alloc_bootmem(size_t size)
142{
143 struct raw3270_request *rq;
144
145 rq = alloc_bootmem_low(sizeof(struct raw3270));
146 if (!rq)
147 return ERR_PTR(-ENOMEM);
148 memset(rq, 0, sizeof(struct raw3270_request));
149
150 /* alloc output buffer. */
151 if (size > 0) {
152 rq->buffer = alloc_bootmem_low(size);
153 if (!rq->buffer) {
154 free_bootmem((unsigned long) rq,
155 sizeof(struct raw3270));
156 return ERR_PTR(-ENOMEM);
157 }
158 }
159 rq->size = size;
160 INIT_LIST_HEAD(&rq->list);
161
162 /*
163 * Setup ccw.
164 */
165 rq->ccw.cda = __pa(rq->buffer);
166 rq->ccw.flags = CCW_FLAG_SLI;
167
168 return rq;
169}
170#endif
171
172/*
173 * Free 3270 ccw request
174 */
175void
176raw3270_request_free (struct raw3270_request *rq)
177{
178 if (rq->buffer)
179 kfree(rq->buffer);
180 kfree(rq);
181}
182
183/*
184 * Reset request to initial state.
185 */
186void
187raw3270_request_reset(struct raw3270_request *rq)
188{
189 BUG_ON(!list_empty(&rq->list));
190 rq->ccw.cmd_code = 0;
191 rq->ccw.count = 0;
192 rq->ccw.cda = __pa(rq->buffer);
193 rq->ccw.flags = CCW_FLAG_SLI;
194 rq->rescnt = 0;
195 rq->rc = 0;
196}
197
198/*
199 * Set command code to ccw of a request.
200 */
201void
202raw3270_request_set_cmd(struct raw3270_request *rq, u8 cmd)
203{
204 rq->ccw.cmd_code = cmd;
205}
206
207/*
208 * Add data fragment to output buffer.
209 */
210int
211raw3270_request_add_data(struct raw3270_request *rq, void *data, size_t size)
212{
213 if (size + rq->ccw.count > rq->size)
214 return -E2BIG;
215 memcpy(rq->buffer + rq->ccw.count, data, size);
216 rq->ccw.count += size;
217 return 0;
218}
219
220/*
221 * Set address/length pair to ccw of a request.
222 */
223void
224raw3270_request_set_data(struct raw3270_request *rq, void *data, size_t size)
225{
226 rq->ccw.cda = __pa(data);
227 rq->ccw.count = size;
228}
229
230/*
231 * Set idal buffer to ccw of a request.
232 */
233void
234raw3270_request_set_idal(struct raw3270_request *rq, struct idal_buffer *ib)
235{
236 rq->ccw.cda = __pa(ib->data);
237 rq->ccw.count = ib->size;
238 rq->ccw.flags |= CCW_FLAG_IDA;
239}
240
241/*
242 * Stop running ccw.
243 */
244static int
245raw3270_halt_io_nolock(struct raw3270 *rp, struct raw3270_request *rq)
246{
247 int retries;
248 int rc;
249
250 if (raw3270_request_final(rq))
251 return 0;
252 /* Check if interrupt has already been processed */
253 for (retries = 0; retries < 5; retries++) {
254 if (retries < 2)
255 rc = ccw_device_halt(rp->cdev, (long) rq);
256 else
257 rc = ccw_device_clear(rp->cdev, (long) rq);
258 if (rc == 0)
259 break; /* termination successful */
260 }
261 return rc;
262}
263
264static int
265raw3270_halt_io(struct raw3270 *rp, struct raw3270_request *rq)
266{
267 unsigned long flags;
268 int rc;
269
270 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
271 rc = raw3270_halt_io_nolock(rp, rq);
272 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
273 return rc;
274}
275
276/*
277 * Add the request to the request queue, try to start it if the
278 * 3270 device is idle. Return without waiting for end of i/o.
279 */
280static int
281__raw3270_start(struct raw3270 *rp, struct raw3270_view *view,
282 struct raw3270_request *rq)
283{
284 rq->view = view;
285 raw3270_get_view(view);
286 if (list_empty(&rp->req_queue) &&
287 !test_bit(RAW3270_FLAGS_BUSY, &rp->flags)) {
288 /* No other requests are on the queue. Start this one. */
289 rq->rc = ccw_device_start(rp->cdev, &rq->ccw,
290 (unsigned long) rq, 0, 0);
291 if (rq->rc) {
292 raw3270_put_view(view);
293 return rq->rc;
294 }
295 }
296 list_add_tail(&rq->list, &rp->req_queue);
297 return 0;
298}
299
300int
301raw3270_start(struct raw3270_view *view, struct raw3270_request *rq)
302{
303 unsigned long flags;
304 struct raw3270 *rp;
305 int rc;
306
307 spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags);
308 rp = view->dev;
309 if (!rp || rp->view != view)
310 rc = -EACCES;
311 else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags))
312 rc = -ENODEV;
313 else
314 rc = __raw3270_start(rp, view, rq);
315 spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags);
316 return rc;
317}
318
319int
320raw3270_start_irq(struct raw3270_view *view, struct raw3270_request *rq)
321{
322 struct raw3270 *rp;
323
324 rp = view->dev;
325 rq->view = view;
326 raw3270_get_view(view);
327 list_add_tail(&rq->list, &rp->req_queue);
328 return 0;
329}
330
331/*
332 * 3270 interrupt routine, called from the ccw_device layer
333 */
334static void
335raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
336{
337 struct raw3270 *rp;
338 struct raw3270_view *view;
339 struct raw3270_request *rq;
340 int rc;
341
342 rp = (struct raw3270 *) cdev->dev.driver_data;
343 if (!rp)
344 return;
345 rq = (struct raw3270_request *) intparm;
346 view = rq ? rq->view : rp->view;
347
348 if (IS_ERR(irb))
349 rc = RAW3270_IO_RETRY;
350 else if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC) {
351 rq->rc = -EIO;
352 rc = RAW3270_IO_DONE;
353 } else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END |
354 DEV_STAT_UNIT_EXCEP)) {
355 /* Handle CE-DE-UE and subsequent UDE */
356 set_bit(RAW3270_FLAGS_BUSY, &rp->flags);
357 rc = RAW3270_IO_BUSY;
358 } else if (test_bit(RAW3270_FLAGS_BUSY, &rp->flags)) {
359 /* Wait for UDE if busy flag is set. */
360 if (irb->scsw.dstat & DEV_STAT_DEV_END) {
361 clear_bit(RAW3270_FLAGS_BUSY, &rp->flags);
362 /* Got it, now retry. */
363 rc = RAW3270_IO_RETRY;
364 } else
365 rc = RAW3270_IO_BUSY;
366 } else if (view)
367 rc = view->fn->intv(view, rq, irb);
368 else
369 rc = RAW3270_IO_DONE;
370
371 switch (rc) {
372 case RAW3270_IO_DONE:
373 break;
374 case RAW3270_IO_BUSY:
375 /*
376 * Intervention required by the operator. We have to wait
377 * for unsolicited device end.
378 */
379 return;
380 case RAW3270_IO_RETRY:
381 if (!rq)
382 break;
383 rq->rc = ccw_device_start(rp->cdev, &rq->ccw,
384 (unsigned long) rq, 0, 0);
385 if (rq->rc == 0)
386 return; /* Sucessfully restarted. */
387 break;
388 case RAW3270_IO_STOP:
389 if (!rq)
390 break;
391 raw3270_halt_io_nolock(rp, rq);
392 rq->rc = -EIO;
393 break;
394 default:
395 BUG();
396 }
397 if (rq) {
398 BUG_ON(list_empty(&rq->list));
399 /* The request completed, remove from queue and do callback. */
400 list_del_init(&rq->list);
401 if (rq->callback)
402 rq->callback(rq, rq->callback_data);
403 /* Do put_device for get_device in raw3270_start. */
404 raw3270_put_view(view);
405 }
406 /*
407 * Try to start each request on request queue until one is
408 * started successful.
409 */
410 while (!list_empty(&rp->req_queue)) {
411 rq = list_entry(rp->req_queue.next,struct raw3270_request,list);
412 rq->rc = ccw_device_start(rp->cdev, &rq->ccw,
413 (unsigned long) rq, 0, 0);
414 if (rq->rc == 0)
415 break;
416 /* Start failed. Remove request and do callback. */
417 list_del_init(&rq->list);
418 if (rq->callback)
419 rq->callback(rq, rq->callback_data);
420 /* Do put_device for get_device in raw3270_start. */
421 raw3270_put_view(view);
422 }
423}
424
425/*
426 * Size sensing.
427 */
428
429struct raw3270_ua { /* Query Reply structure for Usable Area */
430 struct { /* Usable Area Query Reply Base */
431 short l; /* Length of this structured field */
432 char sfid; /* 0x81 if Query Reply */
433 char qcode; /* 0x81 if Usable Area */
434 char flags0;
435 char flags1;
436 short w; /* Width of usable area */
437 short h; /* Heigth of usavle area */
438 char units; /* 0x00:in; 0x01:mm */
439 int xr;
440 int yr;
441 char aw;
442 char ah;
443 short buffsz; /* Character buffer size, bytes */
444 char xmin;
445 char ymin;
446 char xmax;
447 char ymax;
448 } __attribute__ ((packed)) uab;
449 struct { /* Alternate Usable Area Self-Defining Parameter */
450 char l; /* Length of this Self-Defining Parm */
451 char sdpid; /* 0x02 if Alternate Usable Area */
452 char res;
453 char auaid; /* 0x01 is Id for the A U A */
454 short wauai; /* Width of AUAi */
455 short hauai; /* Height of AUAi */
456 char auaunits; /* 0x00:in, 0x01:mm */
457 int auaxr;
458 int auayr;
459 char awauai;
460 char ahauai;
461 } __attribute__ ((packed)) aua;
462} __attribute__ ((packed));
463
464static unsigned char raw3270_init_data[256];
465static struct raw3270_request raw3270_init_request;
466static struct diag210 raw3270_init_diag210;
467static DECLARE_MUTEX(raw3270_init_sem);
468
469static int
470raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq,
471 struct irb *irb)
472{
473 /*
474 * Unit-Check Processing:
475 * Expect Command Reject or Intervention Required.
476 */
477 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) {
478 /* Request finished abnormally. */
479 if (irb->ecw[0] & SNS0_INTERVENTION_REQ) {
480 set_bit(RAW3270_FLAGS_BUSY, &view->dev->flags);
481 return RAW3270_IO_BUSY;
482 }
483 }
484 if (rq) {
485 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) {
486 if (irb->ecw[0] & SNS0_CMD_REJECT)
487 rq->rc = -EOPNOTSUPP;
488 else
489 rq->rc = -EIO;
490 } else
491 /* Request finished normally. Copy residual count. */
492 rq->rescnt = irb->scsw.count;
493 }
494 if (irb->scsw.dstat & DEV_STAT_ATTENTION) {
495 set_bit(RAW3270_FLAGS_ATTN, &view->dev->flags);
496 wake_up(&raw3270_wait_queue);
497 }
498 return RAW3270_IO_DONE;
499}
500
501static struct raw3270_fn raw3270_init_fn = {
502 .intv = raw3270_init_irq
503};
504
505static struct raw3270_view raw3270_init_view = {
506 .fn = &raw3270_init_fn
507};
508
509/*
510 * raw3270_wait/raw3270_wait_interruptible/__raw3270_wakeup
511 * Wait for end of request. The request must have been started
512 * with raw3270_start, rc = 0. The device lock may NOT have been
513 * released between calling raw3270_start and raw3270_wait.
514 */
515static void
516raw3270_wake_init(struct raw3270_request *rq, void *data)
517{
518 wake_up((wait_queue_head_t *) data);
519}
520
521/*
522 * Special wait function that can cope with console initialization.
523 */
524static int
525raw3270_start_init(struct raw3270 *rp, struct raw3270_view *view,
526 struct raw3270_request *rq)
527{
528 unsigned long flags;
529 wait_queue_head_t wq;
530 int rc;
531
532#ifdef CONFIG_TN3270_CONSOLE
533 if (raw3270_registered == 0) {
534 spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags);
535 rq->callback = 0;
536 rc = __raw3270_start(rp, view, rq);
537 if (rc == 0)
538 while (!raw3270_request_final(rq)) {
539 wait_cons_dev();
540 barrier();
541 }
542 spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags);
543 return rq->rc;
544 }
545#endif
546 init_waitqueue_head(&wq);
547 rq->callback = raw3270_wake_init;
548 rq->callback_data = &wq;
549 spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags);
550 rc = __raw3270_start(rp, view, rq);
551 spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags);
552 if (rc)
553 return rc;
554 /* Now wait for the completion. */
555 rc = wait_event_interruptible(wq, raw3270_request_final(rq));
556 if (rc == -ERESTARTSYS) { /* Interrupted by a signal. */
557 raw3270_halt_io(view->dev, rq);
558 /* No wait for the halt to complete. */
559 wait_event(wq, raw3270_request_final(rq));
560 return -ERESTARTSYS;
561 }
562 return rq->rc;
563}
564
565static int
566__raw3270_size_device_vm(struct raw3270 *rp)
567{
568 int rc, model;
569
570 raw3270_init_diag210.vrdcdvno =
571 _ccw_device_get_device_number(rp->cdev);
572 raw3270_init_diag210.vrdclen = sizeof(struct diag210);
573 rc = diag210(&raw3270_init_diag210);
574 if (rc)
575 return rc;
576 model = raw3270_init_diag210.vrdccrmd;
577 switch (model) {
578 case 2:
579 rp->model = model;
580 rp->rows = 24;
581 rp->cols = 80;
582 break;
583 case 3:
584 rp->model = model;
585 rp->rows = 32;
586 rp->cols = 80;
587 break;
588 case 4:
589 rp->model = model;
590 rp->rows = 43;
591 rp->cols = 80;
592 break;
593 case 5:
594 rp->model = model;
595 rp->rows = 27;
596 rp->cols = 132;
597 break;
598 default:
599 printk(KERN_WARNING "vrdccrmd is 0x%.8x\n", model);
600 rc = -EOPNOTSUPP;
601 break;
602 }
603 return rc;
604}
605
606static int
607__raw3270_size_device(struct raw3270 *rp)
608{
609 static const unsigned char wbuf[] =
610 { 0x00, 0x07, 0x01, 0xff, 0x03, 0x00, 0x81 };
611 struct raw3270_ua *uap;
612 unsigned short count;
613 int rc;
614
615 /*
616 * To determine the size of the 3270 device we need to do:
617 * 1) send a 'read partition' data stream to the device
618 * 2) wait for the attn interrupt that preceeds the query reply
619 * 3) do a read modified to get the query reply
620 * To make things worse we have to cope with intervention
621 * required (3270 device switched to 'stand-by') and command
622 * rejects (old devices that can't do 'read partition').
623 */
624 memset(&raw3270_init_request, 0, sizeof(raw3270_init_request));
625 memset(raw3270_init_data, 0, sizeof(raw3270_init_data));
626 /* Store 'read partition' data stream to raw3270_init_data */
627 memcpy(raw3270_init_data, wbuf, sizeof(wbuf));
628 INIT_LIST_HEAD(&raw3270_init_request.list);
629 raw3270_init_request.ccw.cmd_code = TC_WRITESF;
630 raw3270_init_request.ccw.flags = CCW_FLAG_SLI;
631 raw3270_init_request.ccw.count = sizeof(wbuf);
632 raw3270_init_request.ccw.cda = (__u32) __pa(raw3270_init_data);
633
634 rc = raw3270_start_init(rp, &raw3270_init_view, &raw3270_init_request);
635 if (rc) {
636 /* Check error cases: -ERESTARTSYS, -EIO and -EOPNOTSUPP */
637 if (rc == -EOPNOTSUPP && MACHINE_IS_VM)
638 return __raw3270_size_device_vm(rp);
639 return rc;
640 }
641
642 /* Wait for attention interrupt. */
643#ifdef CONFIG_TN3270_CONSOLE
644 if (raw3270_registered == 0) {
645 unsigned long flags;
646
647 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
648 while (!test_and_clear_bit(RAW3270_FLAGS_ATTN, &rp->flags))
649 wait_cons_dev();
650 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
651 } else
652#endif
653 rc = wait_event_interruptible(raw3270_wait_queue,
654 test_and_clear_bit(RAW3270_FLAGS_ATTN, &rp->flags));
655 if (rc)
656 return rc;
657
658 /*
659 * The device accepted the 'read partition' command. Now
660 * set up a read ccw and issue it.
661 */
662 raw3270_init_request.ccw.cmd_code = TC_READMOD;
663 raw3270_init_request.ccw.flags = CCW_FLAG_SLI;
664 raw3270_init_request.ccw.count = sizeof(raw3270_init_data);
665 raw3270_init_request.ccw.cda = (__u32) __pa(raw3270_init_data);
666 rc = raw3270_start_init(rp, &raw3270_init_view, &raw3270_init_request);
667 if (rc)
668 return rc;
669 /* Got a Query Reply */
670 count = sizeof(raw3270_init_data) - raw3270_init_request.rescnt;
671 uap = (struct raw3270_ua *) (raw3270_init_data + 1);
672 /* Paranoia check. */
673 if (raw3270_init_data[0] != 0x88 || uap->uab.qcode != 0x81)
674 return -EOPNOTSUPP;
675 /* Copy rows/columns of default Usable Area */
676 rp->rows = uap->uab.h;
677 rp->cols = uap->uab.w;
678 /* Check for 14 bit addressing */
679 if ((uap->uab.flags0 & 0x0d) == 0x01)
680 set_bit(RAW3270_FLAGS_14BITADDR, &rp->flags);
681 /* Check for Alternate Usable Area */
682 if (uap->uab.l == sizeof(struct raw3270_ua) &&
683 uap->aua.sdpid == 0x02) {
684 rp->rows = uap->aua.hauai;
685 rp->cols = uap->aua.wauai;
686 }
687 return 0;
688}
689
690static int
691raw3270_size_device(struct raw3270 *rp)
692{
693 int rc;
694
695 down(&raw3270_init_sem);
696 rp->view = &raw3270_init_view;
697 raw3270_init_view.dev = rp;
698 rc = __raw3270_size_device(rp);
699 raw3270_init_view.dev = 0;
700 rp->view = 0;
701 up(&raw3270_init_sem);
702 if (rc == 0) { /* Found something. */
703 /* Try to find a model. */
704 rp->model = 0;
705 if (rp->rows == 24 && rp->cols == 80)
706 rp->model = 2;
707 if (rp->rows == 32 && rp->cols == 80)
708 rp->model = 3;
709 if (rp->rows == 43 && rp->cols == 80)
710 rp->model = 4;
711 if (rp->rows == 27 && rp->cols == 132)
712 rp->model = 5;
713 }
714 return rc;
715}
716
717static int
718raw3270_reset_device(struct raw3270 *rp)
719{
720 int rc;
721
722 down(&raw3270_init_sem);
723 memset(&raw3270_init_request, 0, sizeof(raw3270_init_request));
724 memset(raw3270_init_data, 0, sizeof(raw3270_init_data));
725 /* Store reset data stream to raw3270_init_data/raw3270_init_request */
726 raw3270_init_data[0] = TW_KR;
727 INIT_LIST_HEAD(&raw3270_init_request.list);
728 raw3270_init_request.ccw.cmd_code = TC_EWRITEA;
729 raw3270_init_request.ccw.flags = CCW_FLAG_SLI;
730 raw3270_init_request.ccw.count = 1;
731 raw3270_init_request.ccw.cda = (__u32) __pa(raw3270_init_data);
732 rp->view = &raw3270_init_view;
733 raw3270_init_view.dev = rp;
734 rc = raw3270_start_init(rp, &raw3270_init_view, &raw3270_init_request);
735 raw3270_init_view.dev = 0;
736 rp->view = 0;
737 up(&raw3270_init_sem);
738 return rc;
739}
740
741/*
742 * Setup new 3270 device.
743 */
744static int
745raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc)
746{
747 struct list_head *l;
748 struct raw3270 *tmp;
749 int minor;
750
751 memset(rp, 0, sizeof(struct raw3270));
752 /* Copy ebcdic -> ascii translation table. */
753 memcpy(ascebc, _ascebc, 256);
754 if (tubxcorrect) {
755 /* correct brackets and circumflex */
756 ascebc['['] = 0xad;
757 ascebc[']'] = 0xbd;
758 ascebc['^'] = 0xb0;
759 }
760 rp->ascebc = ascebc;
761
762 /* Set defaults. */
763 rp->rows = 24;
764 rp->cols = 80;
765
766 INIT_LIST_HEAD(&rp->req_queue);
767 INIT_LIST_HEAD(&rp->view_list);
768
769 /*
770 * Add device to list and find the smallest unused minor
771 * number for it.
772 */
773 down(&raw3270_sem);
774 /* Keep the list sorted. */
775 minor = 0;
776 rp->minor = -1;
777 list_for_each(l, &raw3270_devices) {
778 tmp = list_entry(l, struct raw3270, list);
779 if (tmp->minor > minor) {
780 rp->minor = minor;
781 __list_add(&rp->list, l->prev, l);
782 break;
783 }
784 minor++;
785 }
786 if (rp->minor == -1 && minor < RAW3270_MAXDEVS) {
787 rp->minor = minor;
788 list_add_tail(&rp->list, &raw3270_devices);
789 }
790 up(&raw3270_sem);
791 /* No free minor number? Then give up. */
792 if (rp->minor == -1)
793 return -EUSERS;
794 rp->cdev = cdev;
795 cdev->dev.driver_data = rp;
796 cdev->handler = raw3270_irq;
797 return 0;
798}
799
800#ifdef CONFIG_TN3270_CONSOLE
801/*
802 * Setup 3270 device configured as console.
803 */
804struct raw3270 *
805raw3270_setup_console(struct ccw_device *cdev)
806{
807 struct raw3270 *rp;
808 char *ascebc;
809 int rc;
810
811 rp = (struct raw3270 *) alloc_bootmem(sizeof(struct raw3270));
812 ascebc = (char *) alloc_bootmem(256);
813 rc = raw3270_setup_device(cdev, rp, ascebc);
814 if (rc)
815 return ERR_PTR(rc);
816 set_bit(RAW3270_FLAGS_CONSOLE, &rp->flags);
817 rc = raw3270_reset_device(rp);
818 if (rc)
819 return ERR_PTR(rc);
820 rc = raw3270_size_device(rp);
821 if (rc)
822 return ERR_PTR(rc);
823 rc = raw3270_reset_device(rp);
824 if (rc)
825 return ERR_PTR(rc);
826 set_bit(RAW3270_FLAGS_READY, &rp->flags);
827 return rp;
828}
829
830void
831raw3270_wait_cons_dev(struct raw3270 *rp)
832{
833 unsigned long flags;
834
835 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
836 wait_cons_dev();
837 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
838}
839
840#endif
841
842/*
843 * Create a 3270 device structure.
844 */
845static struct raw3270 *
846raw3270_create_device(struct ccw_device *cdev)
847{
848 struct raw3270 *rp;
849 char *ascebc;
850 int rc;
851
852 rp = kmalloc(sizeof(struct raw3270), GFP_KERNEL);
853 if (!rp)
854 return ERR_PTR(-ENOMEM);
855 ascebc = kmalloc(256, GFP_KERNEL);
856 if (!ascebc) {
857 kfree(rp);
858 return ERR_PTR(-ENOMEM);
859 }
860 rc = raw3270_setup_device(cdev, rp, ascebc);
861 if (rc) {
862 kfree(rp->ascebc);
863 kfree(rp);
864 rp = ERR_PTR(rc);
865 }
866 /* Get reference to ccw_device structure. */
867 get_device(&cdev->dev);
868 return rp;
869}
870
871/*
872 * Activate a view.
873 */
874int
875raw3270_activate_view(struct raw3270_view *view)
876{
877 struct raw3270 *rp;
878 struct raw3270_view *oldview, *nv;
879 unsigned long flags;
880 int rc;
881
882 rp = view->dev;
883 if (!rp)
884 return -ENODEV;
885 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
886 if (rp->view == view)
887 rc = 0;
888 else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags))
889 rc = -ENODEV;
890 else {
891 oldview = 0;
892 if (rp->view) {
893 oldview = rp->view;
894 oldview->fn->deactivate(oldview);
895 }
896 rp->view = view;
897 rc = view->fn->activate(view);
898 if (rc) {
899 /* Didn't work. Try to reactivate the old view. */
900 rp->view = oldview;
901 if (!oldview || oldview->fn->activate(oldview) != 0) {
902 /* Didn't work as well. Try any other view. */
903 list_for_each_entry(nv, &rp->view_list, list)
904 if (nv != view && nv != oldview) {
905 rp->view = nv;
906 if (nv->fn->activate(nv) == 0)
907 break;
908 rp->view = 0;
909 }
910 }
911 }
912 }
913 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
914 return rc;
915}
916
917/*
918 * Deactivate current view.
919 */
920void
921raw3270_deactivate_view(struct raw3270_view *view)
922{
923 unsigned long flags;
924 struct raw3270 *rp;
925
926 rp = view->dev;
927 if (!rp)
928 return;
929 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
930 if (rp->view == view) {
931 view->fn->deactivate(view);
932 rp->view = 0;
933 /* Move deactivated view to end of list. */
934 list_del_init(&view->list);
935 list_add_tail(&view->list, &rp->view_list);
936 /* Try to activate another view. */
937 if (test_bit(RAW3270_FLAGS_READY, &rp->flags)) {
938 list_for_each_entry(view, &rp->view_list, list)
939 if (view->fn->activate(view) == 0) {
940 rp->view = view;
941 break;
942 }
943 }
944 }
945 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
946}
947
948/*
949 * Add view to device with minor "minor".
950 */
951int
952raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor)
953{
954 unsigned long flags;
955 struct raw3270 *rp;
956 int rc;
957
958 down(&raw3270_sem);
959 rc = -ENODEV;
960 list_for_each_entry(rp, &raw3270_devices, list) {
961 if (rp->minor != minor)
962 continue;
963 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
964 if (test_bit(RAW3270_FLAGS_READY, &rp->flags)) {
965 atomic_set(&view->ref_count, 2);
966 view->dev = rp;
967 view->fn = fn;
968 view->model = rp->model;
969 view->rows = rp->rows;
970 view->cols = rp->cols;
971 view->ascebc = rp->ascebc;
972 spin_lock_init(&view->lock);
973 list_add_tail(&view->list, &rp->view_list);
974 rc = 0;
975 }
976 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
977 break;
978 }
979 up(&raw3270_sem);
980 return rc;
981}
982
983/*
984 * Find specific view of device with minor "minor".
985 */
986struct raw3270_view *
987raw3270_find_view(struct raw3270_fn *fn, int minor)
988{
989 struct raw3270 *rp;
990 struct raw3270_view *view, *tmp;
991 unsigned long flags;
992
993 down(&raw3270_sem);
994 view = ERR_PTR(-ENODEV);
995 list_for_each_entry(rp, &raw3270_devices, list) {
996 if (rp->minor != minor)
997 continue;
998 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
999 if (test_bit(RAW3270_FLAGS_READY, &rp->flags)) {
1000 view = ERR_PTR(-ENOENT);
1001 list_for_each_entry(tmp, &rp->view_list, list) {
1002 if (tmp->fn == fn) {
1003 raw3270_get_view(tmp);
1004 view = tmp;
1005 break;
1006 }
1007 }
1008 }
1009 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
1010 break;
1011 }
1012 up(&raw3270_sem);
1013 return view;
1014}
1015
1016/*
1017 * Remove view from device and free view structure via call to view->fn->free.
1018 */
1019void
1020raw3270_del_view(struct raw3270_view *view)
1021{
1022 unsigned long flags;
1023 struct raw3270 *rp;
1024 struct raw3270_view *nv;
1025
1026 rp = view->dev;
1027 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
1028 if (rp->view == view) {
1029 view->fn->deactivate(view);
1030 rp->view = 0;
1031 }
1032 list_del_init(&view->list);
1033 if (!rp->view && test_bit(RAW3270_FLAGS_READY, &rp->flags)) {
1034 /* Try to activate another view. */
1035 list_for_each_entry(nv, &rp->view_list, list) {
1036 if (nv->fn->activate(view) == 0) {
1037 rp->view = nv;
1038 break;
1039 }
1040 }
1041 }
1042 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
1043 /* Wait for reference counter to drop to zero. */
1044 atomic_dec(&view->ref_count);
1045 wait_event(raw3270_wait_queue, atomic_read(&view->ref_count) == 0);
1046 if (view->fn->free)
1047 view->fn->free(view);
1048}
1049
1050/*
1051 * Remove a 3270 device structure.
1052 */
1053static void
1054raw3270_delete_device(struct raw3270 *rp)
1055{
1056 struct ccw_device *cdev;
1057
1058 /* Remove from device chain. */
1059 down(&raw3270_sem);
1060 list_del_init(&rp->list);
1061 up(&raw3270_sem);
1062
1063 /* Disconnect from ccw_device. */
1064 cdev = rp->cdev;
1065 rp->cdev = 0;
1066 cdev->dev.driver_data = 0;
1067 cdev->handler = 0;
1068
1069 /* Put ccw_device structure. */
1070 put_device(&cdev->dev);
1071
1072 /* Now free raw3270 structure. */
1073 kfree(rp->ascebc);
1074 kfree(rp);
1075}
1076
1077static int
1078raw3270_probe (struct ccw_device *cdev)
1079{
1080 return 0;
1081}
1082
1083/*
1084 * Additional attributes for a 3270 device
1085 */
1086static ssize_t
1087raw3270_model_show(struct device *dev, char *buf)
1088{
1089 return snprintf(buf, PAGE_SIZE, "%i\n",
1090 ((struct raw3270 *) dev->driver_data)->model);
1091}
1092static DEVICE_ATTR(model, 0444, raw3270_model_show, 0);
1093
1094static ssize_t
1095raw3270_rows_show(struct device *dev, char *buf)
1096{
1097 return snprintf(buf, PAGE_SIZE, "%i\n",
1098 ((struct raw3270 *) dev->driver_data)->rows);
1099}
1100static DEVICE_ATTR(rows, 0444, raw3270_rows_show, 0);
1101
1102static ssize_t
1103raw3270_columns_show(struct device *dev, char *buf)
1104{
1105 return snprintf(buf, PAGE_SIZE, "%i\n",
1106 ((struct raw3270 *) dev->driver_data)->cols);
1107}
1108static DEVICE_ATTR(columns, 0444, raw3270_columns_show, 0);
1109
1110static struct attribute * raw3270_attrs[] = {
1111 &dev_attr_model.attr,
1112 &dev_attr_rows.attr,
1113 &dev_attr_columns.attr,
1114 NULL,
1115};
1116
1117static struct attribute_group raw3270_attr_group = {
1118 .attrs = raw3270_attrs,
1119};
1120
1121static void
1122raw3270_create_attributes(struct raw3270 *rp)
1123{
1124 //FIXME: check return code
1125 sysfs_create_group(&rp->cdev->dev.kobj, &raw3270_attr_group);
1126}
1127
1128/*
1129 * Notifier for device addition/removal
1130 */
1131struct raw3270_notifier {
1132 struct list_head list;
1133 void (*notifier)(int, int);
1134};
1135
1136static struct list_head raw3270_notifier = LIST_HEAD_INIT(raw3270_notifier);
1137
1138int raw3270_register_notifier(void (*notifier)(int, int))
1139{
1140 struct raw3270_notifier *np;
1141 struct raw3270 *rp;
1142
1143 np = kmalloc(sizeof(struct raw3270_notifier), GFP_KERNEL);
1144 if (!np)
1145 return -ENOMEM;
1146 np->notifier = notifier;
1147 down(&raw3270_sem);
1148 list_add_tail(&np->list, &raw3270_notifier);
1149 list_for_each_entry(rp, &raw3270_devices, list) {
1150 get_device(&rp->cdev->dev);
1151 notifier(rp->minor, 1);
1152 }
1153 up(&raw3270_sem);
1154 return 0;
1155}
1156
1157void raw3270_unregister_notifier(void (*notifier)(int, int))
1158{
1159 struct raw3270_notifier *np;
1160
1161 down(&raw3270_sem);
1162 list_for_each_entry(np, &raw3270_notifier, list)
1163 if (np->notifier == notifier) {
1164 list_del(&np->list);
1165 kfree(np);
1166 break;
1167 }
1168 up(&raw3270_sem);
1169}
1170
1171/*
1172 * Set 3270 device online.
1173 */
1174static int
1175raw3270_set_online (struct ccw_device *cdev)
1176{
1177 struct raw3270 *rp;
1178 struct raw3270_notifier *np;
1179 int rc;
1180
1181 rp = raw3270_create_device(cdev);
1182 if (IS_ERR(rp))
1183 return PTR_ERR(rp);
1184 rc = raw3270_reset_device(rp);
1185 if (rc)
1186 return rc;
1187 rc = raw3270_size_device(rp);
1188 if (rc)
1189 return rc;
1190 rc = raw3270_reset_device(rp);
1191 if (rc)
1192 return rc;
1193 raw3270_create_attributes(rp);
1194 set_bit(RAW3270_FLAGS_READY, &rp->flags);
1195 down(&raw3270_sem);
1196 list_for_each_entry(np, &raw3270_notifier, list)
1197 np->notifier(rp->minor, 1);
1198 up(&raw3270_sem);
1199 return 0;
1200}
1201
1202/*
1203 * Remove 3270 device structure.
1204 */
1205static void
1206raw3270_remove (struct ccw_device *cdev)
1207{
1208 unsigned long flags;
1209 struct raw3270 *rp;
1210 struct raw3270_view *v;
1211 struct raw3270_notifier *np;
1212
1213 rp = cdev->dev.driver_data;
1214 clear_bit(RAW3270_FLAGS_READY, &rp->flags);
1215
1216 sysfs_remove_group(&cdev->dev.kobj, &raw3270_attr_group);
1217
1218 /* Deactivate current view and remove all views. */
1219 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1220 if (rp->view) {
1221 rp->view->fn->deactivate(rp->view);
1222 rp->view = 0;
1223 }
1224 while (!list_empty(&rp->view_list)) {
1225 v = list_entry(rp->view_list.next, struct raw3270_view, list);
1226 if (v->fn->release)
1227 v->fn->release(v);
1228 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1229 raw3270_del_view(v);
1230 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1231 }
1232 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1233
1234 down(&raw3270_sem);
1235 list_for_each_entry(np, &raw3270_notifier, list)
1236 np->notifier(rp->minor, 0);
1237 up(&raw3270_sem);
1238
1239 /* Reset 3270 device. */
1240 raw3270_reset_device(rp);
1241 /* And finally remove it. */
1242 raw3270_delete_device(rp);
1243}
1244
1245/*
1246 * Set 3270 device offline.
1247 */
1248static int
1249raw3270_set_offline (struct ccw_device *cdev)
1250{
1251 struct raw3270 *rp;
1252
1253 rp = cdev->dev.driver_data;
1254 if (test_bit(RAW3270_FLAGS_CONSOLE, &rp->flags))
1255 return -EBUSY;
1256 raw3270_remove(cdev);
1257 return 0;
1258}
1259
1260static struct ccw_device_id raw3270_id[] = {
1261 { CCW_DEVICE(0x3270, 0) },
1262 { CCW_DEVICE(0x3271, 0) },
1263 { CCW_DEVICE(0x3272, 0) },
1264 { CCW_DEVICE(0x3273, 0) },
1265 { CCW_DEVICE(0x3274, 0) },
1266 { CCW_DEVICE(0x3275, 0) },
1267 { CCW_DEVICE(0x3276, 0) },
1268 { CCW_DEVICE(0x3277, 0) },
1269 { CCW_DEVICE(0x3278, 0) },
1270 { CCW_DEVICE(0x3279, 0) },
1271 { CCW_DEVICE(0x3174, 0) },
1272 { /* end of list */ },
1273};
1274
1275static struct ccw_driver raw3270_ccw_driver = {
1276 .name = "3270",
1277 .owner = THIS_MODULE,
1278 .ids = raw3270_id,
1279 .probe = &raw3270_probe,
1280 .remove = &raw3270_remove,
1281 .set_online = &raw3270_set_online,
1282 .set_offline = &raw3270_set_offline,
1283};
1284
1285static int
1286raw3270_init(void)
1287{
1288 struct raw3270 *rp;
1289 int rc;
1290
1291 if (raw3270_registered)
1292 return 0;
1293 raw3270_registered = 1;
1294 rc = ccw_driver_register(&raw3270_ccw_driver);
1295 if (rc == 0) {
1296 /* Create attributes for early (= console) device. */
1297 down(&raw3270_sem);
1298 list_for_each_entry(rp, &raw3270_devices, list) {
1299 get_device(&rp->cdev->dev);
1300 raw3270_create_attributes(rp);
1301 }
1302 up(&raw3270_sem);
1303 }
1304 return rc;
1305}
1306
1307static void
1308raw3270_exit(void)
1309{
1310 ccw_driver_unregister(&raw3270_ccw_driver);
1311}
1312
1313MODULE_LICENSE("GPL");
1314
1315module_init(raw3270_init);
1316module_exit(raw3270_exit);
1317
1318EXPORT_SYMBOL(raw3270_request_alloc);
1319EXPORT_SYMBOL(raw3270_request_free);
1320EXPORT_SYMBOL(raw3270_request_reset);
1321EXPORT_SYMBOL(raw3270_request_set_cmd);
1322EXPORT_SYMBOL(raw3270_request_add_data);
1323EXPORT_SYMBOL(raw3270_request_set_data);
1324EXPORT_SYMBOL(raw3270_request_set_idal);
1325EXPORT_SYMBOL(raw3270_buffer_address);
1326EXPORT_SYMBOL(raw3270_add_view);
1327EXPORT_SYMBOL(raw3270_del_view);
1328EXPORT_SYMBOL(raw3270_find_view);
1329EXPORT_SYMBOL(raw3270_activate_view);
1330EXPORT_SYMBOL(raw3270_deactivate_view);
1331EXPORT_SYMBOL(raw3270_start);
1332EXPORT_SYMBOL(raw3270_start_irq);
1333EXPORT_SYMBOL(raw3270_register_notifier);
1334EXPORT_SYMBOL(raw3270_unregister_notifier);
1335EXPORT_SYMBOL(raw3270_wait_queue);
diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h
new file mode 100644
index 000000000000..ed5d4eb9f623
--- /dev/null
+++ b/drivers/s390/char/raw3270.h
@@ -0,0 +1,274 @@
1/*
2 * drivers/s390/char/raw3270.h
3 * IBM/3270 Driver
4 *
5 * Author(s):
6 * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
7 * Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
8 * -- Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
9 */
10
11#include <asm/idals.h>
12#include <asm/ioctl.h>
13
14/* ioctls for fullscreen 3270 */
15#define TUBICMD _IO('3', 3) /* set ccw command for fs reads. */
16#define TUBOCMD _IO('3', 4) /* set ccw command for fs writes. */
17#define TUBGETI _IO('3', 7) /* get ccw command for fs reads. */
18#define TUBGETO _IO('3', 8) /* get ccw command for fs writes. */
19#define TUBSETMOD _IO('3',12) /* FIXME: what does it do ?*/
20#define TUBGETMOD _IO('3',13) /* FIXME: what does it do ?*/
21
22/* Local Channel Commands */
23#define TC_WRITE 0x01 /* Write */
24#define TC_EWRITE 0x05 /* Erase write */
25#define TC_READMOD 0x06 /* Read modified */
26#define TC_EWRITEA 0x0d /* Erase write alternate */
27#define TC_WRITESF 0x11 /* Write structured field */
28
29/* Buffer Control Orders */
30#define TO_SF 0x1d /* Start field */
31#define TO_SBA 0x11 /* Set buffer address */
32#define TO_IC 0x13 /* Insert cursor */
33#define TO_PT 0x05 /* Program tab */
34#define TO_RA 0x3c /* Repeat to address */
35#define TO_SFE 0x29 /* Start field extended */
36#define TO_EUA 0x12 /* Erase unprotected to address */
37#define TO_MF 0x2c /* Modify field */
38#define TO_SA 0x28 /* Set attribute */
39
40/* Field Attribute Bytes */
41#define TF_INPUT 0x40 /* Visible input */
42#define TF_INPUTN 0x4c /* Invisible input */
43#define TF_INMDT 0xc1 /* Visible, Set-MDT */
44#define TF_LOG 0x60
45
46/* Character Attribute Bytes */
47#define TAT_RESET 0x00
48#define TAT_FIELD 0xc0
49#define TAT_EXTHI 0x41
50#define TAT_COLOR 0x42
51#define TAT_CHARS 0x43
52#define TAT_TRANS 0x46
53
54/* Extended-Highlighting Bytes */
55#define TAX_RESET 0x00
56#define TAX_BLINK 0xf1
57#define TAX_REVER 0xf2
58#define TAX_UNDER 0xf4
59
60/* Reset value */
61#define TAR_RESET 0x00
62
63/* Color values */
64#define TAC_RESET 0x00
65#define TAC_BLUE 0xf1
66#define TAC_RED 0xf2
67#define TAC_PINK 0xf3
68#define TAC_GREEN 0xf4
69#define TAC_TURQ 0xf5
70#define TAC_YELLOW 0xf6
71#define TAC_WHITE 0xf7
72#define TAC_DEFAULT 0x00
73
74/* Write Control Characters */
75#define TW_NONE 0x40 /* No particular action */
76#define TW_KR 0xc2 /* Keyboard restore */
77#define TW_PLUSALARM 0x04 /* Add this bit for alarm */
78
79#define RAW3270_MAXDEVS 256
80
81/* For TUBGETMOD and TUBSETMOD. Should include. */
82struct raw3270_iocb {
83 short model;
84 short line_cnt;
85 short col_cnt;
86 short pf_cnt;
87 short re_cnt;
88 short map;
89};
90
91struct raw3270;
92struct raw3270_view;
93
94/* 3270 CCW request */
95struct raw3270_request {
96 struct list_head list; /* list head for request queueing. */
97 struct raw3270_view *view; /* view of this request */
98 struct ccw1 ccw; /* single ccw. */
99 void *buffer; /* output buffer. */
100 size_t size; /* size of output buffer. */
101 int rescnt; /* residual count from devstat. */
102 int rc; /* return code for this request. */
103
104 /* Callback for delivering final status. */
105 void (*callback)(struct raw3270_request *, void *);
106 void *callback_data;
107};
108
109struct raw3270_request *raw3270_request_alloc(size_t size);
110struct raw3270_request *raw3270_request_alloc_bootmem(size_t size);
111void raw3270_request_free(struct raw3270_request *);
112void raw3270_request_reset(struct raw3270_request *);
113void raw3270_request_set_cmd(struct raw3270_request *, u8 cmd);
114int raw3270_request_add_data(struct raw3270_request *, void *, size_t);
115void raw3270_request_set_data(struct raw3270_request *, void *, size_t);
116void raw3270_request_set_idal(struct raw3270_request *, struct idal_buffer *);
117
118static inline int
119raw3270_request_final(struct raw3270_request *rq)
120{
121 return list_empty(&rq->list);
122}
123
124void raw3270_buffer_address(struct raw3270 *, char *, unsigned short);
125
126/* Return value of *intv (see raw3270_fn below) can be one of the following: */
127#define RAW3270_IO_DONE 0 /* request finished */
128#define RAW3270_IO_BUSY 1 /* request still active */
129#define RAW3270_IO_RETRY 2 /* retry current request */
130#define RAW3270_IO_STOP 3 /* kill current request */
131
132/*
133 * Functions of a 3270 view.
134 */
135struct raw3270_fn {
136 int (*activate)(struct raw3270_view *);
137 void (*deactivate)(struct raw3270_view *);
138 int (*intv)(struct raw3270_view *,
139 struct raw3270_request *, struct irb *);
140 void (*release)(struct raw3270_view *);
141 void (*free)(struct raw3270_view *);
142};
143
144/*
145 * View structure chaining. The raw3270_view structure is meant to
146 * be embedded at the start of the real view data structure, e.g.:
147 * struct example {
148 * struct raw3270_view view;
149 * ...
150 * };
151 */
152struct raw3270_view {
153 struct list_head list;
154 spinlock_t lock;
155 atomic_t ref_count;
156 struct raw3270 *dev;
157 struct raw3270_fn *fn;
158 unsigned int model;
159 unsigned int rows, cols; /* # of rows & colums of the view */
160 unsigned char *ascebc; /* ascii -> ebcdic table */
161};
162
163int raw3270_add_view(struct raw3270_view *, struct raw3270_fn *, int);
164int raw3270_activate_view(struct raw3270_view *);
165void raw3270_del_view(struct raw3270_view *);
166void raw3270_deactivate_view(struct raw3270_view *);
167struct raw3270_view *raw3270_find_view(struct raw3270_fn *, int);
168int raw3270_start(struct raw3270_view *, struct raw3270_request *);
169int raw3270_start_irq(struct raw3270_view *, struct raw3270_request *);
170
171/* Reference count inliner for view structures. */
172static inline void
173raw3270_get_view(struct raw3270_view *view)
174{
175 atomic_inc(&view->ref_count);
176}
177
178extern wait_queue_head_t raw3270_wait_queue;
179
180static inline void
181raw3270_put_view(struct raw3270_view *view)
182{
183 if (atomic_dec_return(&view->ref_count) == 0)
184 wake_up(&raw3270_wait_queue);
185}
186
187struct raw3270 *raw3270_setup_console(struct ccw_device *cdev);
188void raw3270_wait_cons_dev(struct raw3270 *);
189
190/* Notifier for device addition/removal */
191int raw3270_register_notifier(void (*notifier)(int, int));
192void raw3270_unregister_notifier(void (*notifier)(int, int));
193
194/*
195 * Little memory allocator for string objects.
196 */
197struct string
198{
199 struct list_head list;
200 struct list_head update;
201 unsigned long size;
202 unsigned long len;
203 char string[0];
204} __attribute__ ((aligned(8)));
205
206static inline struct string *
207alloc_string(struct list_head *free_list, unsigned long len)
208{
209 struct string *cs, *tmp;
210 unsigned long size;
211
212 size = (len + 7L) & -8L;
213 list_for_each_entry(cs, free_list, list) {
214 if (cs->size < size)
215 continue;
216 if (cs->size > size + sizeof(struct string)) {
217 char *endaddr = (char *) (cs + 1) + cs->size;
218 tmp = (struct string *) (endaddr - size) - 1;
219 tmp->size = size;
220 cs->size -= size + sizeof(struct string);
221 cs = tmp;
222 } else
223 list_del(&cs->list);
224 cs->len = len;
225 INIT_LIST_HEAD(&cs->list);
226 INIT_LIST_HEAD(&cs->update);
227 return cs;
228 }
229 return 0;
230}
231
232static inline unsigned long
233free_string(struct list_head *free_list, struct string *cs)
234{
235 struct string *tmp;
236 struct list_head *p, *left;
237
238 /* Find out the left neighbour in free memory list. */
239 left = free_list;
240 list_for_each(p, free_list) {
241 if (list_entry(p, struct string, list) > cs)
242 break;
243 left = p;
244 }
245 /* Try to merge with right neighbour = next element from left. */
246 if (left->next != free_list) {
247 tmp = list_entry(left->next, struct string, list);
248 if ((char *) (cs + 1) + cs->size == (char *) tmp) {
249 list_del(&tmp->list);
250 cs->size += tmp->size + sizeof(struct string);
251 }
252 }
253 /* Try to merge with left neighbour. */
254 if (left != free_list) {
255 tmp = list_entry(left, struct string, list);
256 if ((char *) (tmp + 1) + tmp->size == (char *) cs) {
257 tmp->size += cs->size + sizeof(struct string);
258 return tmp->size;
259 }
260 }
261 __list_add(&cs->list, left, left->next);
262 return cs->size;
263}
264
265static inline void
266add_string_memory(struct list_head *free_list, void *mem, unsigned long size)
267{
268 struct string *cs;
269
270 cs = (struct string *) mem;
271 cs->size = size - sizeof(struct string);
272 free_string(free_list, cs);
273}
274
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
new file mode 100644
index 000000000000..ceb0e474fde4
--- /dev/null
+++ b/drivers/s390/char/sclp.c
@@ -0,0 +1,915 @@
1/*
2 * drivers/s390/char/sclp.c
3 * core function to access sclp interface
4 *
5 * S390 version
6 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
9 */
10
11#include <linux/module.h>
12#include <linux/err.h>
13#include <linux/spinlock.h>
14#include <linux/interrupt.h>
15#include <linux/timer.h>
16#include <linux/reboot.h>
17#include <linux/jiffies.h>
18#include <asm/types.h>
19#include <asm/s390_ext.h>
20
21#include "sclp.h"
22
23#define SCLP_HEADER "sclp: "
24
25/* Structure for register_early_external_interrupt. */
26static ext_int_info_t ext_int_info_hwc;
27
28/* Lock to protect internal data consistency. */
29static DEFINE_SPINLOCK(sclp_lock);
30
31/* Mask of events that we can receive from the sclp interface. */
32static sccb_mask_t sclp_receive_mask;
33
34/* Mask of events that we can send to the sclp interface. */
35static sccb_mask_t sclp_send_mask;
36
37/* List of registered event listeners and senders. */
38static struct list_head sclp_reg_list;
39
40/* List of queued requests. */
41static struct list_head sclp_req_queue;
42
43/* Data for read and and init requests. */
44static struct sclp_req sclp_read_req;
45static struct sclp_req sclp_init_req;
46static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
47static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
48
49/* Timer for request retries. */
50static struct timer_list sclp_request_timer;
51
52/* Internal state: is the driver initialized? */
53static volatile enum sclp_init_state_t {
54 sclp_init_state_uninitialized,
55 sclp_init_state_initializing,
56 sclp_init_state_initialized
57} sclp_init_state = sclp_init_state_uninitialized;
58
59/* Internal state: is a request active at the sclp? */
60static volatile enum sclp_running_state_t {
61 sclp_running_state_idle,
62 sclp_running_state_running
63} sclp_running_state = sclp_running_state_idle;
64
65/* Internal state: is a read request pending? */
66static volatile enum sclp_reading_state_t {
67 sclp_reading_state_idle,
68 sclp_reading_state_reading
69} sclp_reading_state = sclp_reading_state_idle;
70
71/* Internal state: is the driver currently serving requests? */
72static volatile enum sclp_activation_state_t {
73 sclp_activation_state_active,
74 sclp_activation_state_deactivating,
75 sclp_activation_state_inactive,
76 sclp_activation_state_activating
77} sclp_activation_state = sclp_activation_state_active;
78
79/* Internal state: is an init mask request pending? */
80static volatile enum sclp_mask_state_t {
81 sclp_mask_state_idle,
82 sclp_mask_state_initializing
83} sclp_mask_state = sclp_mask_state_idle;
84
85/* Maximum retry counts */
86#define SCLP_INIT_RETRY 3
87#define SCLP_MASK_RETRY 3
88#define SCLP_REQUEST_RETRY 3
89
90/* Timeout intervals in seconds.*/
91#define SCLP_BUSY_INTERVAL 2
92#define SCLP_RETRY_INTERVAL 5
93
94static void sclp_process_queue(void);
95static int sclp_init_mask(int calculate);
96static int sclp_init(void);
97
98/* Perform service call. Return 0 on success, non-zero otherwise. */
99static int
100service_call(sclp_cmdw_t command, void *sccb)
101{
102 int cc;
103
104 __asm__ __volatile__(
105 " .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */
106 " ipm %0\n"
107 " srl %0,28"
108 : "=&d" (cc)
109 : "d" (command), "a" (__pa(sccb))
110 : "cc", "memory" );
111 if (cc == 3)
112 return -EIO;
113 if (cc == 2)
114 return -EBUSY;
115 return 0;
116}
117
118/* Request timeout handler. Restart the request queue. If DATA is non-zero,
119 * force restart of running request. */
120static void
121sclp_request_timeout(unsigned long data)
122{
123 unsigned long flags;
124
125 if (data) {
126 spin_lock_irqsave(&sclp_lock, flags);
127 sclp_running_state = sclp_running_state_idle;
128 spin_unlock_irqrestore(&sclp_lock, flags);
129 }
130 sclp_process_queue();
131}
132
133/* Set up request retry timer. Called while sclp_lock is locked. */
134static inline void
135__sclp_set_request_timer(unsigned long time, void (*function)(unsigned long),
136 unsigned long data)
137{
138 del_timer(&sclp_request_timer);
139 sclp_request_timer.function = function;
140 sclp_request_timer.data = data;
141 sclp_request_timer.expires = jiffies + time;
142 add_timer(&sclp_request_timer);
143}
144
145/* Try to start a request. Return zero if the request was successfully
146 * started or if it will be started at a later time. Return non-zero otherwise.
147 * Called while sclp_lock is locked. */
148static int
149__sclp_start_request(struct sclp_req *req)
150{
151 int rc;
152
153 if (sclp_running_state != sclp_running_state_idle)
154 return 0;
155 del_timer(&sclp_request_timer);
156 if (req->start_count <= SCLP_REQUEST_RETRY) {
157 rc = service_call(req->command, req->sccb);
158 req->start_count++;
159 } else
160 rc = -EIO;
161 if (rc == 0) {
162 /* Sucessfully started request */
163 req->status = SCLP_REQ_RUNNING;
164 sclp_running_state = sclp_running_state_running;
165 __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
166 sclp_request_timeout, 1);
167 return 0;
168 } else if (rc == -EBUSY) {
169 /* Try again later */
170 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
171 sclp_request_timeout, 0);
172 return 0;
173 }
174 /* Request failed */
175 req->status = SCLP_REQ_FAILED;
176 return rc;
177}
178
179/* Try to start queued requests. */
180static void
181sclp_process_queue(void)
182{
183 struct sclp_req *req;
184 int rc;
185 unsigned long flags;
186
187 spin_lock_irqsave(&sclp_lock, flags);
188 if (sclp_running_state != sclp_running_state_idle) {
189 spin_unlock_irqrestore(&sclp_lock, flags);
190 return;
191 }
192 del_timer(&sclp_request_timer);
193 while (!list_empty(&sclp_req_queue)) {
194 req = list_entry(sclp_req_queue.next, struct sclp_req, list);
195 rc = __sclp_start_request(req);
196 if (rc == 0)
197 break;
198 /* Request failed. */
199 list_del(&req->list);
200 if (req->callback) {
201 spin_unlock_irqrestore(&sclp_lock, flags);
202 req->callback(req, req->callback_data);
203 spin_lock_irqsave(&sclp_lock, flags);
204 }
205 }
206 spin_unlock_irqrestore(&sclp_lock, flags);
207}
208
209/* Queue a new request. Return zero on success, non-zero otherwise. */
210int
211sclp_add_request(struct sclp_req *req)
212{
213 unsigned long flags;
214 int rc;
215
216 spin_lock_irqsave(&sclp_lock, flags);
217 if ((sclp_init_state != sclp_init_state_initialized ||
218 sclp_activation_state != sclp_activation_state_active) &&
219 req != &sclp_init_req) {
220 spin_unlock_irqrestore(&sclp_lock, flags);
221 return -EIO;
222 }
223 req->status = SCLP_REQ_QUEUED;
224 req->start_count = 0;
225 list_add_tail(&req->list, &sclp_req_queue);
226 rc = 0;
227 /* Start if request is first in list */
228 if (req->list.prev == &sclp_req_queue) {
229 rc = __sclp_start_request(req);
230 if (rc)
231 list_del(&req->list);
232 }
233 spin_unlock_irqrestore(&sclp_lock, flags);
234 return rc;
235}
236
237EXPORT_SYMBOL(sclp_add_request);
238
239/* Dispatch events found in request buffer to registered listeners. Return 0
240 * if all events were dispatched, non-zero otherwise. */
241static int
242sclp_dispatch_evbufs(struct sccb_header *sccb)
243{
244 unsigned long flags;
245 struct evbuf_header *evbuf;
246 struct list_head *l;
247 struct sclp_register *reg;
248 int offset;
249 int rc;
250
251 spin_lock_irqsave(&sclp_lock, flags);
252 rc = 0;
253 for (offset = sizeof(struct sccb_header); offset < sccb->length;
254 offset += evbuf->length) {
255 /* Search for event handler */
256 evbuf = (struct evbuf_header *) ((addr_t) sccb + offset);
257 reg = NULL;
258 list_for_each(l, &sclp_reg_list) {
259 reg = list_entry(l, struct sclp_register, list);
260 if (reg->receive_mask & (1 << (32 - evbuf->type)))
261 break;
262 else
263 reg = NULL;
264 }
265 if (reg && reg->receiver_fn) {
266 spin_unlock_irqrestore(&sclp_lock, flags);
267 reg->receiver_fn(evbuf);
268 spin_lock_irqsave(&sclp_lock, flags);
269 } else if (reg == NULL)
270 rc = -ENOSYS;
271 }
272 spin_unlock_irqrestore(&sclp_lock, flags);
273 return rc;
274}
275
276/* Read event data request callback. */
277static void
278sclp_read_cb(struct sclp_req *req, void *data)
279{
280 unsigned long flags;
281 struct sccb_header *sccb;
282
283 sccb = (struct sccb_header *) req->sccb;
284 if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 ||
285 sccb->response_code == 0x220))
286 sclp_dispatch_evbufs(sccb);
287 spin_lock_irqsave(&sclp_lock, flags);
288 sclp_reading_state = sclp_reading_state_idle;
289 spin_unlock_irqrestore(&sclp_lock, flags);
290}
291
292/* Prepare read event data request. Called while sclp_lock is locked. */
293static inline void
294__sclp_make_read_req(void)
295{
296 struct sccb_header *sccb;
297
298 sccb = (struct sccb_header *) sclp_read_sccb;
299 clear_page(sccb);
300 memset(&sclp_read_req, 0, sizeof(struct sclp_req));
301 sclp_read_req.command = SCLP_CMDW_READDATA;
302 sclp_read_req.status = SCLP_REQ_QUEUED;
303 sclp_read_req.start_count = 0;
304 sclp_read_req.callback = sclp_read_cb;
305 sclp_read_req.sccb = sccb;
306 sccb->length = PAGE_SIZE;
307 sccb->function_code = 0;
308 sccb->control_mask[2] = 0x80;
309}
310
311/* Search request list for request with matching sccb. Return request if found,
312 * NULL otherwise. Called while sclp_lock is locked. */
313static inline struct sclp_req *
314__sclp_find_req(u32 sccb)
315{
316 struct list_head *l;
317 struct sclp_req *req;
318
319 list_for_each(l, &sclp_req_queue) {
320 req = list_entry(l, struct sclp_req, list);
321 if (sccb == (u32) (addr_t) req->sccb)
322 return req;
323 }
324 return NULL;
325}
326
327/* Handler for external interruption. Perform request post-processing.
328 * Prepare read event data request if necessary. Start processing of next
329 * request on queue. */
330static void
331sclp_interrupt_handler(struct pt_regs *regs, __u16 code)
332{
333 struct sclp_req *req;
334 u32 finished_sccb;
335 u32 evbuf_pending;
336
337 spin_lock(&sclp_lock);
338 finished_sccb = S390_lowcore.ext_params & 0xfffffff8;
339 evbuf_pending = S390_lowcore.ext_params & 0x3;
340 if (finished_sccb) {
341 req = __sclp_find_req(finished_sccb);
342 if (req) {
343 /* Request post-processing */
344 list_del(&req->list);
345 req->status = SCLP_REQ_DONE;
346 if (req->callback) {
347 spin_unlock(&sclp_lock);
348 req->callback(req, req->callback_data);
349 spin_lock(&sclp_lock);
350 }
351 }
352 sclp_running_state = sclp_running_state_idle;
353 }
354 if (evbuf_pending && sclp_receive_mask != 0 &&
355 sclp_reading_state == sclp_reading_state_idle &&
356 sclp_activation_state == sclp_activation_state_active ) {
357 sclp_reading_state = sclp_reading_state_reading;
358 __sclp_make_read_req();
359 /* Add request to head of queue */
360 list_add(&sclp_read_req.list, &sclp_req_queue);
361 }
362 spin_unlock(&sclp_lock);
363 sclp_process_queue();
364}
365
366/* Return current Time-Of-Day clock. */
367static inline u64
368sclp_get_clock(void)
369{
370 u64 result;
371
372 asm volatile ("STCK 0(%1)" : "=m" (result) : "a" (&(result)) : "cc");
373 return result;
374}
375
376/* Convert interval in jiffies to TOD ticks. */
377static inline u64
378sclp_tod_from_jiffies(unsigned long jiffies)
379{
380 return (u64) (jiffies / HZ) << 32;
381}
382
383/* Wait until a currently running request finished. Note: while this function
384 * is running, no timers are served on the calling CPU. */
385void
386sclp_sync_wait(void)
387{
388 unsigned long psw_mask;
389 unsigned long cr0, cr0_sync;
390 u64 timeout;
391
392 /* We'll be disabling timer interrupts, so we need a custom timeout
393 * mechanism */
394 timeout = 0;
395 if (timer_pending(&sclp_request_timer)) {
396 /* Get timeout TOD value */
397 timeout = sclp_get_clock() +
398 sclp_tod_from_jiffies(sclp_request_timer.expires -
399 jiffies);
400 }
401 /* Prevent bottom half from executing once we force interrupts open */
402 local_bh_disable();
403 /* Enable service-signal interruption, disable timer interrupts */
404 __ctl_store(cr0, 0, 0);
405 cr0_sync = cr0;
406 cr0_sync |= 0x00000200;
407 cr0_sync &= 0xFFFFF3AC;
408 __ctl_load(cr0_sync, 0, 0);
409 asm volatile ("STOSM 0(%1),0x01"
410 : "=m" (psw_mask) : "a" (&psw_mask) : "memory");
411 /* Loop until driver state indicates finished request */
412 while (sclp_running_state != sclp_running_state_idle) {
413 /* Check for expired request timer */
414 if (timer_pending(&sclp_request_timer) &&
415 sclp_get_clock() > timeout &&
416 del_timer(&sclp_request_timer))
417 sclp_request_timer.function(sclp_request_timer.data);
418 barrier();
419 cpu_relax();
420 }
421 /* Restore interrupt settings */
422 asm volatile ("SSM 0(%0)"
423 : : "a" (&psw_mask) : "memory");
424 __ctl_load(cr0, 0, 0);
425 __local_bh_enable();
426}
427
428EXPORT_SYMBOL(sclp_sync_wait);
429
430/* Dispatch changes in send and receive mask to registered listeners. */
431static inline void
432sclp_dispatch_state_change(void)
433{
434 struct list_head *l;
435 struct sclp_register *reg;
436 unsigned long flags;
437 sccb_mask_t receive_mask;
438 sccb_mask_t send_mask;
439
440 do {
441 spin_lock_irqsave(&sclp_lock, flags);
442 reg = NULL;
443 list_for_each(l, &sclp_reg_list) {
444 reg = list_entry(l, struct sclp_register, list);
445 receive_mask = reg->receive_mask & sclp_receive_mask;
446 send_mask = reg->send_mask & sclp_send_mask;
447 if (reg->sclp_receive_mask != receive_mask ||
448 reg->sclp_send_mask != send_mask) {
449 reg->sclp_receive_mask = receive_mask;
450 reg->sclp_send_mask = send_mask;
451 break;
452 } else
453 reg = NULL;
454 }
455 spin_unlock_irqrestore(&sclp_lock, flags);
456 if (reg && reg->state_change_fn)
457 reg->state_change_fn(reg);
458 } while (reg);
459}
460
461struct sclp_statechangebuf {
462 struct evbuf_header header;
463 u8 validity_sclp_active_facility_mask : 1;
464 u8 validity_sclp_receive_mask : 1;
465 u8 validity_sclp_send_mask : 1;
466 u8 validity_read_data_function_mask : 1;
467 u16 _zeros : 12;
468 u16 mask_length;
469 u64 sclp_active_facility_mask;
470 sccb_mask_t sclp_receive_mask;
471 sccb_mask_t sclp_send_mask;
472 u32 read_data_function_mask;
473} __attribute__((packed));
474
475
476/* State change event callback. Inform listeners of changes. */
477static void
478sclp_state_change_cb(struct evbuf_header *evbuf)
479{
480 unsigned long flags;
481 struct sclp_statechangebuf *scbuf;
482
483 scbuf = (struct sclp_statechangebuf *) evbuf;
484 if (scbuf->mask_length != sizeof(sccb_mask_t))
485 return;
486 spin_lock_irqsave(&sclp_lock, flags);
487 if (scbuf->validity_sclp_receive_mask)
488 sclp_receive_mask = scbuf->sclp_receive_mask;
489 if (scbuf->validity_sclp_send_mask)
490 sclp_send_mask = scbuf->sclp_send_mask;
491 spin_unlock_irqrestore(&sclp_lock, flags);
492 sclp_dispatch_state_change();
493}
494
495static struct sclp_register sclp_state_change_event = {
496 .receive_mask = EvTyp_StateChange_Mask,
497 .receiver_fn = sclp_state_change_cb
498};
499
500/* Calculate receive and send mask of currently registered listeners.
501 * Called while sclp_lock is locked. */
502static inline void
503__sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask)
504{
505 struct list_head *l;
506 struct sclp_register *t;
507
508 *receive_mask = 0;
509 *send_mask = 0;
510 list_for_each(l, &sclp_reg_list) {
511 t = list_entry(l, struct sclp_register, list);
512 *receive_mask |= t->receive_mask;
513 *send_mask |= t->send_mask;
514 }
515}
516
517/* Register event listener. Return 0 on success, non-zero otherwise. */
518int
519sclp_register(struct sclp_register *reg)
520{
521 unsigned long flags;
522 sccb_mask_t receive_mask;
523 sccb_mask_t send_mask;
524 int rc;
525
526 rc = sclp_init();
527 if (rc)
528 return rc;
529 spin_lock_irqsave(&sclp_lock, flags);
530 /* Check event mask for collisions */
531 __sclp_get_mask(&receive_mask, &send_mask);
532 if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) {
533 spin_unlock_irqrestore(&sclp_lock, flags);
534 return -EBUSY;
535 }
536 /* Trigger initial state change callback */
537 reg->sclp_receive_mask = 0;
538 reg->sclp_send_mask = 0;
539 list_add(&reg->list, &sclp_reg_list);
540 spin_unlock_irqrestore(&sclp_lock, flags);
541 rc = sclp_init_mask(1);
542 if (rc) {
543 spin_lock_irqsave(&sclp_lock, flags);
544 list_del(&reg->list);
545 spin_unlock_irqrestore(&sclp_lock, flags);
546 }
547 return rc;
548}
549
550EXPORT_SYMBOL(sclp_register);
551
552/* Unregister event listener. */
553void
554sclp_unregister(struct sclp_register *reg)
555{
556 unsigned long flags;
557
558 spin_lock_irqsave(&sclp_lock, flags);
559 list_del(&reg->list);
560 spin_unlock_irqrestore(&sclp_lock, flags);
561 sclp_init_mask(1);
562}
563
564EXPORT_SYMBOL(sclp_unregister);
565
566/* Remove event buffers which are marked processed. Return the number of
567 * remaining event buffers. */
568int
569sclp_remove_processed(struct sccb_header *sccb)
570{
571 struct evbuf_header *evbuf;
572 int unprocessed;
573 u16 remaining;
574
575 evbuf = (struct evbuf_header *) (sccb + 1);
576 unprocessed = 0;
577 remaining = sccb->length - sizeof(struct sccb_header);
578 while (remaining > 0) {
579 remaining -= evbuf->length;
580 if (evbuf->flags & 0x80) {
581 sccb->length -= evbuf->length;
582 memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length),
583 remaining);
584 } else {
585 unprocessed++;
586 evbuf = (struct evbuf_header *)
587 ((addr_t) evbuf + evbuf->length);
588 }
589 }
590 return unprocessed;
591}
592
593EXPORT_SYMBOL(sclp_remove_processed);
594
595struct init_sccb {
596 struct sccb_header header;
597 u16 _reserved;
598 u16 mask_length;
599 sccb_mask_t receive_mask;
600 sccb_mask_t send_mask;
601 sccb_mask_t sclp_send_mask;
602 sccb_mask_t sclp_receive_mask;
603} __attribute__((packed));
604
605/* Prepare init mask request. Called while sclp_lock is locked. */
606static inline void
607__sclp_make_init_req(u32 receive_mask, u32 send_mask)
608{
609 struct init_sccb *sccb;
610
611 sccb = (struct init_sccb *) sclp_init_sccb;
612 clear_page(sccb);
613 memset(&sclp_init_req, 0, sizeof(struct sclp_req));
614 sclp_init_req.command = SCLP_CMDW_WRITEMASK;
615 sclp_init_req.status = SCLP_REQ_FILLED;
616 sclp_init_req.start_count = 0;
617 sclp_init_req.callback = NULL;
618 sclp_init_req.callback_data = NULL;
619 sclp_init_req.sccb = sccb;
620 sccb->header.length = sizeof(struct init_sccb);
621 sccb->mask_length = sizeof(sccb_mask_t);
622 sccb->receive_mask = receive_mask;
623 sccb->send_mask = send_mask;
624 sccb->sclp_receive_mask = 0;
625 sccb->sclp_send_mask = 0;
626}
627
628/* Start init mask request. If calculate is non-zero, calculate the mask as
629 * requested by registered listeners. Use zero mask otherwise. Return 0 on
630 * success, non-zero otherwise. */
631static int
632sclp_init_mask(int calculate)
633{
634 unsigned long flags;
635 struct init_sccb *sccb = (struct init_sccb *) sclp_init_sccb;
636 sccb_mask_t receive_mask;
637 sccb_mask_t send_mask;
638 int retry;
639 int rc;
640 unsigned long wait;
641
642 spin_lock_irqsave(&sclp_lock, flags);
643 /* Check if interface is in appropriate state */
644 if (sclp_mask_state != sclp_mask_state_idle) {
645 spin_unlock_irqrestore(&sclp_lock, flags);
646 return -EBUSY;
647 }
648 if (sclp_activation_state == sclp_activation_state_inactive) {
649 spin_unlock_irqrestore(&sclp_lock, flags);
650 return -EINVAL;
651 }
652 sclp_mask_state = sclp_mask_state_initializing;
653 /* Determine mask */
654 if (calculate)
655 __sclp_get_mask(&receive_mask, &send_mask);
656 else {
657 receive_mask = 0;
658 send_mask = 0;
659 }
660 rc = -EIO;
661 for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) {
662 /* Prepare request */
663 __sclp_make_init_req(receive_mask, send_mask);
664 spin_unlock_irqrestore(&sclp_lock, flags);
665 if (sclp_add_request(&sclp_init_req)) {
666 /* Try again later */
667 wait = jiffies + SCLP_BUSY_INTERVAL * HZ;
668 while (time_before(jiffies, wait))
669 sclp_sync_wait();
670 spin_lock_irqsave(&sclp_lock, flags);
671 continue;
672 }
673 while (sclp_init_req.status != SCLP_REQ_DONE &&
674 sclp_init_req.status != SCLP_REQ_FAILED)
675 sclp_sync_wait();
676 spin_lock_irqsave(&sclp_lock, flags);
677 if (sclp_init_req.status == SCLP_REQ_DONE &&
678 sccb->header.response_code == 0x20) {
679 /* Successful request */
680 if (calculate) {
681 sclp_receive_mask = sccb->sclp_receive_mask;
682 sclp_send_mask = sccb->sclp_send_mask;
683 } else {
684 sclp_receive_mask = 0;
685 sclp_send_mask = 0;
686 }
687 spin_unlock_irqrestore(&sclp_lock, flags);
688 sclp_dispatch_state_change();
689 spin_lock_irqsave(&sclp_lock, flags);
690 rc = 0;
691 break;
692 }
693 }
694 sclp_mask_state = sclp_mask_state_idle;
695 spin_unlock_irqrestore(&sclp_lock, flags);
696 return rc;
697}
698
699/* Deactivate SCLP interface. On success, new requests will be rejected,
700 * events will no longer be dispatched. Return 0 on success, non-zero
701 * otherwise. */
702int
703sclp_deactivate(void)
704{
705 unsigned long flags;
706 int rc;
707
708 spin_lock_irqsave(&sclp_lock, flags);
709 /* Deactivate can only be called when active */
710 if (sclp_activation_state != sclp_activation_state_active) {
711 spin_unlock_irqrestore(&sclp_lock, flags);
712 return -EINVAL;
713 }
714 sclp_activation_state = sclp_activation_state_deactivating;
715 spin_unlock_irqrestore(&sclp_lock, flags);
716 rc = sclp_init_mask(0);
717 spin_lock_irqsave(&sclp_lock, flags);
718 if (rc == 0)
719 sclp_activation_state = sclp_activation_state_inactive;
720 else
721 sclp_activation_state = sclp_activation_state_active;
722 spin_unlock_irqrestore(&sclp_lock, flags);
723 return rc;
724}
725
726EXPORT_SYMBOL(sclp_deactivate);
727
728/* Reactivate SCLP interface after sclp_deactivate. On success, new
729 * requests will be accepted, events will be dispatched again. Return 0 on
730 * success, non-zero otherwise. */
731int
732sclp_reactivate(void)
733{
734 unsigned long flags;
735 int rc;
736
737 spin_lock_irqsave(&sclp_lock, flags);
738 /* Reactivate can only be called when inactive */
739 if (sclp_activation_state != sclp_activation_state_inactive) {
740 spin_unlock_irqrestore(&sclp_lock, flags);
741 return -EINVAL;
742 }
743 sclp_activation_state = sclp_activation_state_activating;
744 spin_unlock_irqrestore(&sclp_lock, flags);
745 rc = sclp_init_mask(1);
746 spin_lock_irqsave(&sclp_lock, flags);
747 if (rc == 0)
748 sclp_activation_state = sclp_activation_state_active;
749 else
750 sclp_activation_state = sclp_activation_state_inactive;
751 spin_unlock_irqrestore(&sclp_lock, flags);
752 return rc;
753}
754
755EXPORT_SYMBOL(sclp_reactivate);
756
757/* Handler for external interruption used during initialization. Modify
758 * request state to done. */
759static void
760sclp_check_handler(struct pt_regs *regs, __u16 code)
761{
762 u32 finished_sccb;
763
764 finished_sccb = S390_lowcore.ext_params & 0xfffffff8;
765 /* Is this the interrupt we are waiting for? */
766 if (finished_sccb == 0)
767 return;
768 if (finished_sccb != (u32) (addr_t) sclp_init_sccb) {
769 printk(KERN_WARNING SCLP_HEADER "unsolicited interrupt "
770 "for buffer at 0x%x\n", finished_sccb);
771 return;
772 }
773 spin_lock(&sclp_lock);
774 if (sclp_running_state == sclp_running_state_running) {
775 sclp_init_req.status = SCLP_REQ_DONE;
776 sclp_running_state = sclp_running_state_idle;
777 }
778 spin_unlock(&sclp_lock);
779}
780
781/* Initial init mask request timed out. Modify request state to failed. */
782static void
783sclp_check_timeout(unsigned long data)
784{
785 unsigned long flags;
786
787 spin_lock_irqsave(&sclp_lock, flags);
788 if (sclp_running_state == sclp_running_state_running) {
789 sclp_init_req.status = SCLP_REQ_FAILED;
790 sclp_running_state = sclp_running_state_idle;
791 }
792 spin_unlock_irqrestore(&sclp_lock, flags);
793}
794
795/* Perform a check of the SCLP interface. Return zero if the interface is
796 * available and there are no pending requests from a previous instance.
797 * Return non-zero otherwise. */
798static int
799sclp_check_interface(void)
800{
801 struct init_sccb *sccb;
802 unsigned long flags;
803 int retry;
804 int rc;
805
806 spin_lock_irqsave(&sclp_lock, flags);
807 /* Prepare init mask command */
808 rc = register_early_external_interrupt(0x2401, sclp_check_handler,
809 &ext_int_info_hwc);
810 if (rc) {
811 spin_unlock_irqrestore(&sclp_lock, flags);
812 return rc;
813 }
814 for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
815 __sclp_make_init_req(0, 0);
816 sccb = (struct init_sccb *) sclp_init_req.sccb;
817 rc = service_call(sclp_init_req.command, sccb);
818 if (rc == -EIO)
819 break;
820 sclp_init_req.status = SCLP_REQ_RUNNING;
821 sclp_running_state = sclp_running_state_running;
822 __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
823 sclp_check_timeout, 0);
824 spin_unlock_irqrestore(&sclp_lock, flags);
825 /* Enable service-signal interruption - needs to happen
826 * with IRQs enabled. */
827 ctl_set_bit(0, 9);
828 /* Wait for signal from interrupt or timeout */
829 sclp_sync_wait();
830 /* Disable service-signal interruption - needs to happen
831 * with IRQs enabled. */
832 ctl_clear_bit(0,9);
833 spin_lock_irqsave(&sclp_lock, flags);
834 del_timer(&sclp_request_timer);
835 if (sclp_init_req.status == SCLP_REQ_DONE &&
836 sccb->header.response_code == 0x20) {
837 rc = 0;
838 break;
839 } else
840 rc = -EBUSY;
841 }
842 unregister_early_external_interrupt(0x2401, sclp_check_handler,
843 &ext_int_info_hwc);
844 spin_unlock_irqrestore(&sclp_lock, flags);
845 return rc;
846}
847
848/* Reboot event handler. Reset send and receive mask to prevent pending SCLP
849 * events from interfering with rebooted system. */
850static int
851sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
852{
853 sclp_deactivate();
854 return NOTIFY_DONE;
855}
856
857static struct notifier_block sclp_reboot_notifier = {
858 .notifier_call = sclp_reboot_event
859};
860
861/* Initialize SCLP driver. Return zero if driver is operational, non-zero
862 * otherwise. */
863static int
864sclp_init(void)
865{
866 unsigned long flags;
867 int rc;
868
869 if (!MACHINE_HAS_SCLP)
870 return -ENODEV;
871 spin_lock_irqsave(&sclp_lock, flags);
872 /* Check for previous or running initialization */
873 if (sclp_init_state != sclp_init_state_uninitialized) {
874 spin_unlock_irqrestore(&sclp_lock, flags);
875 return 0;
876 }
877 sclp_init_state = sclp_init_state_initializing;
878 /* Set up variables */
879 INIT_LIST_HEAD(&sclp_req_queue);
880 INIT_LIST_HEAD(&sclp_reg_list);
881 list_add(&sclp_state_change_event.list, &sclp_reg_list);
882 init_timer(&sclp_request_timer);
883 /* Check interface */
884 spin_unlock_irqrestore(&sclp_lock, flags);
885 rc = sclp_check_interface();
886 spin_lock_irqsave(&sclp_lock, flags);
887 if (rc) {
888 sclp_init_state = sclp_init_state_uninitialized;
889 spin_unlock_irqrestore(&sclp_lock, flags);
890 return rc;
891 }
892 /* Register reboot handler */
893 rc = register_reboot_notifier(&sclp_reboot_notifier);
894 if (rc) {
895 sclp_init_state = sclp_init_state_uninitialized;
896 spin_unlock_irqrestore(&sclp_lock, flags);
897 return rc;
898 }
899 /* Register interrupt handler */
900 rc = register_early_external_interrupt(0x2401, sclp_interrupt_handler,
901 &ext_int_info_hwc);
902 if (rc) {
903 unregister_reboot_notifier(&sclp_reboot_notifier);
904 sclp_init_state = sclp_init_state_uninitialized;
905 spin_unlock_irqrestore(&sclp_lock, flags);
906 return rc;
907 }
908 sclp_init_state = sclp_init_state_initialized;
909 spin_unlock_irqrestore(&sclp_lock, flags);
910 /* Enable service-signal external interruption - needs to happen with
911 * IRQs enabled. */
912 ctl_set_bit(0, 9);
913 sclp_init_mask(1);
914 return 0;
915}
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
new file mode 100644
index 000000000000..2c71d6ee7b5b
--- /dev/null
+++ b/drivers/s390/char/sclp.h
@@ -0,0 +1,159 @@
1/*
2 * drivers/s390/char/sclp.h
3 *
4 * S390 version
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
7 * Martin Schwidefsky <schwidefsky@de.ibm.com>
8 */
9
10#ifndef __SCLP_H__
11#define __SCLP_H__
12
13#include <linux/types.h>
14#include <linux/list.h>
15
16#include <asm/ebcdic.h>
17
18/* maximum number of pages concerning our own memory management */
19#define MAX_KMEM_PAGES (sizeof(unsigned long) << 3)
20#define MAX_CONSOLE_PAGES 4
21
22#define EvTyp_OpCmd 0x01
23#define EvTyp_Msg 0x02
24#define EvTyp_StateChange 0x08
25#define EvTyp_PMsgCmd 0x09
26#define EvTyp_CntlProgOpCmd 0x20
27#define EvTyp_CntlProgIdent 0x0B
28#define EvTyp_SigQuiesce 0x1D
29#define EvTyp_VT220Msg 0x1A
30
31#define EvTyp_OpCmd_Mask 0x80000000
32#define EvTyp_Msg_Mask 0x40000000
33#define EvTyp_StateChange_Mask 0x01000000
34#define EvTyp_PMsgCmd_Mask 0x00800000
35#define EvTyp_CtlProgOpCmd_Mask 0x00000001
36#define EvTyp_CtlProgIdent_Mask 0x00200000
37#define EvTyp_SigQuiesce_Mask 0x00000008
38#define EvTyp_VT220Msg_Mask 0x00000040
39
40#define GnrlMsgFlgs_DOM 0x8000
41#define GnrlMsgFlgs_SndAlrm 0x4000
42#define GnrlMsgFlgs_HoldMsg 0x2000
43
44#define LnTpFlgs_CntlText 0x8000
45#define LnTpFlgs_LabelText 0x4000
46#define LnTpFlgs_DataText 0x2000
47#define LnTpFlgs_EndText 0x1000
48#define LnTpFlgs_PromptText 0x0800
49
50typedef unsigned int sclp_cmdw_t;
51
52#define SCLP_CMDW_READDATA 0x00770005
53#define SCLP_CMDW_WRITEDATA 0x00760005
54#define SCLP_CMDW_WRITEMASK 0x00780005
55
56#define GDS_ID_MDSMU 0x1310
57#define GDS_ID_MDSRouteInfo 0x1311
58#define GDS_ID_AgUnWrkCorr 0x1549
59#define GDS_ID_SNACondReport 0x1532
60#define GDS_ID_CPMSU 0x1212
61#define GDS_ID_RoutTargInstr 0x154D
62#define GDS_ID_OpReq 0x8070
63#define GDS_ID_TextCmd 0x1320
64
65#define GDS_KEY_SelfDefTextMsg 0x31
66
67typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */
68
69struct sccb_header {
70 u16 length;
71 u8 function_code;
72 u8 control_mask[3];
73 u16 response_code;
74} __attribute__((packed));
75
76struct gds_subvector {
77 u8 length;
78 u8 key;
79} __attribute__((packed));
80
81struct gds_vector {
82 u16 length;
83 u16 gds_id;
84} __attribute__((packed));
85
86struct evbuf_header {
87 u16 length;
88 u8 type;
89 u8 flags;
90 u16 _reserved;
91} __attribute__((packed));
92
93struct sclp_req {
94 struct list_head list; /* list_head for request queueing. */
95 sclp_cmdw_t command; /* sclp command to execute */
96 void *sccb; /* pointer to the sccb to execute */
97 char status; /* status of this request */
98 int start_count; /* number of SVCs done for this req */
99 /* Callback that is called after reaching final status. */
100 void (*callback)(struct sclp_req *, void *data);
101 void *callback_data;
102};
103
104#define SCLP_REQ_FILLED 0x00 /* request is ready to be processed */
105#define SCLP_REQ_QUEUED 0x01 /* request is queued to be processed */
106#define SCLP_REQ_RUNNING 0x02 /* request is currently running */
107#define SCLP_REQ_DONE 0x03 /* request is completed successfully */
108#define SCLP_REQ_FAILED 0x05 /* request is finally failed */
109
110/* function pointers that a high level driver has to use for registration */
111/* of some routines it wants to be called from the low level driver */
112struct sclp_register {
113 struct list_head list;
114 /* event masks this user is registered for */
115 sccb_mask_t receive_mask;
116 sccb_mask_t send_mask;
117 /* actually present events */
118 sccb_mask_t sclp_receive_mask;
119 sccb_mask_t sclp_send_mask;
120 /* called if event type availability changes */
121 void (*state_change_fn)(struct sclp_register *);
122 /* called for events in cp_receive_mask/sclp_receive_mask */
123 void (*receiver_fn)(struct evbuf_header *);
124};
125
126/* externals from sclp.c */
127int sclp_add_request(struct sclp_req *req);
128void sclp_sync_wait(void);
129int sclp_register(struct sclp_register *reg);
130void sclp_unregister(struct sclp_register *reg);
131int sclp_remove_processed(struct sccb_header *sccb);
132int sclp_deactivate(void);
133int sclp_reactivate(void);
134
135/* useful inlines */
136
137/* VM uses EBCDIC 037, LPAR+native(SE+HMC) use EBCDIC 500 */
138/* translate single character from ASCII to EBCDIC */
139static inline unsigned char
140sclp_ascebc(unsigned char ch)
141{
142 return (MACHINE_IS_VM) ? _ascebc[ch] : _ascebc_500[ch];
143}
144
145/* translate string from EBCDIC to ASCII */
146static inline void
147sclp_ebcasc_str(unsigned char *str, int nr)
148{
149 (MACHINE_IS_VM) ? EBCASC(str, nr) : EBCASC_500(str, nr);
150}
151
152/* translate string from ASCII to EBCDIC */
153static inline void
154sclp_ascebc_str(unsigned char *str, int nr)
155{
156 (MACHINE_IS_VM) ? ASCEBC(str, nr) : ASCEBC_500(str, nr);
157}
158
159#endif /* __SCLP_H__ */
diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c
new file mode 100644
index 000000000000..10ef22f13541
--- /dev/null
+++ b/drivers/s390/char/sclp_con.c
@@ -0,0 +1,252 @@
1/*
2 * drivers/s390/char/sclp_con.c
3 * SCLP line mode console driver
4 *
5 * S390 version
6 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
9 */
10
11#include <linux/config.h>
12#include <linux/kmod.h>
13#include <linux/console.h>
14#include <linux/init.h>
15#include <linux/timer.h>
16#include <linux/jiffies.h>
17#include <linux/bootmem.h>
18#include <linux/err.h>
19
20#include "sclp.h"
21#include "sclp_rw.h"
22#include "sclp_tty.h"
23
24#define SCLP_CON_PRINT_HEADER "sclp console driver: "
25
26#define sclp_console_major 4 /* TTYAUX_MAJOR */
27#define sclp_console_minor 64
28#define sclp_console_name "ttyS"
29
30/* Lock to guard over changes to global variables */
31static spinlock_t sclp_con_lock;
32/* List of free pages that can be used for console output buffering */
33static struct list_head sclp_con_pages;
34/* List of full struct sclp_buffer structures ready for output */
35static struct list_head sclp_con_outqueue;
36/* Counter how many buffers are emitted (max 1) and how many */
37/* are on the output queue. */
38static int sclp_con_buffer_count;
39/* Pointer to current console buffer */
40static struct sclp_buffer *sclp_conbuf;
41/* Timer for delayed output of console messages */
42static struct timer_list sclp_con_timer;
43
44/* Output format for console messages */
45static unsigned short sclp_con_columns;
46static unsigned short sclp_con_width_htab;
47
48static void
49sclp_conbuf_callback(struct sclp_buffer *buffer, int rc)
50{
51 unsigned long flags;
52 void *page;
53
54 do {
55 page = sclp_unmake_buffer(buffer);
56 spin_lock_irqsave(&sclp_con_lock, flags);
57 /* Remove buffer from outqueue */
58 list_del(&buffer->list);
59 sclp_con_buffer_count--;
60 list_add_tail((struct list_head *) page, &sclp_con_pages);
61 /* Check if there is a pending buffer on the out queue. */
62 buffer = NULL;
63 if (!list_empty(&sclp_con_outqueue))
64 buffer = list_entry(sclp_con_outqueue.next,
65 struct sclp_buffer, list);
66 spin_unlock_irqrestore(&sclp_con_lock, flags);
67 } while (buffer && sclp_emit_buffer(buffer, sclp_conbuf_callback));
68}
69
70static inline void
71sclp_conbuf_emit(void)
72{
73 struct sclp_buffer* buffer;
74 unsigned long flags;
75 int count;
76 int rc;
77
78 spin_lock_irqsave(&sclp_con_lock, flags);
79 buffer = sclp_conbuf;
80 sclp_conbuf = NULL;
81 if (buffer == NULL) {
82 spin_unlock_irqrestore(&sclp_con_lock, flags);
83 return;
84 }
85 list_add_tail(&buffer->list, &sclp_con_outqueue);
86 count = sclp_con_buffer_count++;
87 spin_unlock_irqrestore(&sclp_con_lock, flags);
88 if (count)
89 return;
90 rc = sclp_emit_buffer(buffer, sclp_conbuf_callback);
91 if (rc)
92 sclp_conbuf_callback(buffer, rc);
93}
94
95/*
96 * When this routine is called from the timer then we flush the
97 * temporary write buffer without further waiting on a final new line.
98 */
99static void
100sclp_console_timeout(unsigned long data)
101{
102 sclp_conbuf_emit();
103}
104
105/*
106 * Writes the given message to S390 system console
107 */
108static void
109sclp_console_write(struct console *console, const char *message,
110 unsigned int count)
111{
112 unsigned long flags;
113 void *page;
114 int written;
115
116 if (count == 0)
117 return;
118 spin_lock_irqsave(&sclp_con_lock, flags);
119 /*
120 * process escape characters, write message into buffer,
121 * send buffer to SCLP
122 */
123 do {
124 /* make sure we have a console output buffer */
125 if (sclp_conbuf == NULL) {
126 while (list_empty(&sclp_con_pages)) {
127 spin_unlock_irqrestore(&sclp_con_lock, flags);
128 sclp_sync_wait();
129 spin_lock_irqsave(&sclp_con_lock, flags);
130 }
131 page = sclp_con_pages.next;
132 list_del((struct list_head *) page);
133 sclp_conbuf = sclp_make_buffer(page, sclp_con_columns,
134 sclp_con_width_htab);
135 }
136 /* try to write the string to the current output buffer */
137 written = sclp_write(sclp_conbuf, (const unsigned char *)
138 message, count);
139 if (written == count)
140 break;
141 /*
142 * Not all characters could be written to the current
143 * output buffer. Emit the buffer, create a new buffer
144 * and then output the rest of the string.
145 */
146 spin_unlock_irqrestore(&sclp_con_lock, flags);
147 sclp_conbuf_emit();
148 spin_lock_irqsave(&sclp_con_lock, flags);
149 message += written;
150 count -= written;
151 } while (count > 0);
152 /* Setup timer to output current console buffer after 1/10 second */
153 if (sclp_conbuf != NULL && sclp_chars_in_buffer(sclp_conbuf) != 0 &&
154 !timer_pending(&sclp_con_timer)) {
155 init_timer(&sclp_con_timer);
156 sclp_con_timer.function = sclp_console_timeout;
157 sclp_con_timer.data = 0UL;
158 sclp_con_timer.expires = jiffies + HZ/10;
159 add_timer(&sclp_con_timer);
160 }
161 spin_unlock_irqrestore(&sclp_con_lock, flags);
162}
163
164static struct tty_driver *
165sclp_console_device(struct console *c, int *index)
166{
167 *index = c->index;
168 return sclp_tty_driver;
169}
170
171/*
172 * This routine is called from panic when the kernel
173 * is going to give up. We have to make sure that all buffers
174 * will be flushed to the SCLP.
175 */
176static void
177sclp_console_unblank(void)
178{
179 unsigned long flags;
180
181 sclp_conbuf_emit();
182 spin_lock_irqsave(&sclp_con_lock, flags);
183 if (timer_pending(&sclp_con_timer))
184 del_timer(&sclp_con_timer);
185 while (sclp_con_buffer_count > 0) {
186 spin_unlock_irqrestore(&sclp_con_lock, flags);
187 sclp_sync_wait();
188 spin_lock_irqsave(&sclp_con_lock, flags);
189 }
190 spin_unlock_irqrestore(&sclp_con_lock, flags);
191}
192
193/*
194 * used to register the SCLP console to the kernel and to
195 * give printk necessary information
196 */
197static struct console sclp_console =
198{
199 .name = sclp_console_name,
200 .write = sclp_console_write,
201 .device = sclp_console_device,
202 .unblank = sclp_console_unblank,
203 .flags = CON_PRINTBUFFER,
204 .index = 0 /* ttyS0 */
205};
206
207/*
208 * called by console_init() in drivers/char/tty_io.c at boot-time.
209 */
210static int __init
211sclp_console_init(void)
212{
213 void *page;
214 int i;
215 int rc;
216
217 if (!CONSOLE_IS_SCLP)
218 return 0;
219 rc = sclp_rw_init();
220 if (rc)
221 return rc;
222 /* Allocate pages for output buffering */
223 INIT_LIST_HEAD(&sclp_con_pages);
224 for (i = 0; i < MAX_CONSOLE_PAGES; i++) {
225 page = alloc_bootmem_low_pages(PAGE_SIZE);
226 if (page == NULL)
227 return -ENOMEM;
228 list_add_tail((struct list_head *) page, &sclp_con_pages);
229 }
230 INIT_LIST_HEAD(&sclp_con_outqueue);
231 spin_lock_init(&sclp_con_lock);
232 sclp_con_buffer_count = 0;
233 sclp_conbuf = NULL;
234 init_timer(&sclp_con_timer);
235
236 /* Set output format */
237 if (MACHINE_IS_VM)
238 /*
239 * save 4 characters for the CPU number
240 * written at start of each line by VM/CP
241 */
242 sclp_con_columns = 76;
243 else
244 sclp_con_columns = 80;
245 sclp_con_width_htab = 8;
246
247 /* enable printk-access to this driver */
248 register_console(&sclp_console);
249 return 0;
250}
251
252console_initcall(sclp_console_init);
diff --git a/drivers/s390/char/sclp_cpi.c b/drivers/s390/char/sclp_cpi.c
new file mode 100644
index 000000000000..5a6cef2dfa13
--- /dev/null
+++ b/drivers/s390/char/sclp_cpi.c
@@ -0,0 +1,254 @@
1/*
2 * Author: Martin Peschke <mpeschke@de.ibm.com>
3 * Copyright (C) 2001 IBM Entwicklung GmbH, IBM Corporation
4 *
5 * SCLP Control-Program Identification.
6 */
7
8#include <linux/config.h>
9#include <linux/version.h>
10#include <linux/kmod.h>
11#include <linux/module.h>
12#include <linux/moduleparam.h>
13#include <linux/init.h>
14#include <linux/timer.h>
15#include <linux/string.h>
16#include <linux/err.h>
17#include <linux/slab.h>
18#include <asm/ebcdic.h>
19#include <asm/semaphore.h>
20
21#include "sclp.h"
22#include "sclp_rw.h"
23
24#define CPI_LENGTH_SYSTEM_TYPE 8
25#define CPI_LENGTH_SYSTEM_NAME 8
26#define CPI_LENGTH_SYSPLEX_NAME 8
27
28struct cpi_evbuf {
29 struct evbuf_header header;
30 u8 id_format;
31 u8 reserved0;
32 u8 system_type[CPI_LENGTH_SYSTEM_TYPE];
33 u64 reserved1;
34 u8 system_name[CPI_LENGTH_SYSTEM_NAME];
35 u64 reserved2;
36 u64 system_level;
37 u64 reserved3;
38 u8 sysplex_name[CPI_LENGTH_SYSPLEX_NAME];
39 u8 reserved4[16];
40} __attribute__((packed));
41
42struct cpi_sccb {
43 struct sccb_header header;
44 struct cpi_evbuf cpi_evbuf;
45} __attribute__((packed));
46
47/* Event type structure for write message and write priority message */
48static struct sclp_register sclp_cpi_event =
49{
50 .send_mask = EvTyp_CtlProgIdent_Mask
51};
52
53MODULE_AUTHOR(
54 "Martin Peschke, IBM Deutschland Entwicklung GmbH "
55 "<mpeschke@de.ibm.com>");
56
57MODULE_DESCRIPTION(
58 "identify this operating system instance to the S/390 "
59 "or zSeries hardware");
60
61static char *system_name = NULL;
62module_param(system_name, charp, 0);
63MODULE_PARM_DESC(system_name, "e.g. hostname - max. 8 characters");
64
65static char *sysplex_name = NULL;
66#ifdef ALLOW_SYSPLEX_NAME
67module_param(sysplex_name, charp, 0);
68MODULE_PARM_DESC(sysplex_name, "if applicable - max. 8 characters");
69#endif
70
71/* use default value for this field (as well as for system level) */
72static char *system_type = "LINUX";
73
74static int
75cpi_check_parms(void)
76{
77 /* reject if no system type specified */
78 if (!system_type) {
79 printk("cpi: bug: no system type specified\n");
80 return -EINVAL;
81 }
82
83 /* reject if system type larger than 8 characters */
84 if (strlen(system_type) > CPI_LENGTH_SYSTEM_NAME) {
85 printk("cpi: bug: system type has length of %li characters - "
86 "only %i characters supported\n",
87 strlen(system_type), CPI_LENGTH_SYSTEM_TYPE);
88 return -EINVAL;
89 }
90
91 /* reject if no system name specified */
92 if (!system_name) {
93 printk("cpi: no system name specified\n");
94 return -EINVAL;
95 }
96
97 /* reject if system name larger than 8 characters */
98 if (strlen(system_name) > CPI_LENGTH_SYSTEM_NAME) {
99 printk("cpi: system name has length of %li characters - "
100 "only %i characters supported\n",
101 strlen(system_name), CPI_LENGTH_SYSTEM_NAME);
102 return -EINVAL;
103 }
104
105 /* reject if specified sysplex name larger than 8 characters */
106 if (sysplex_name && strlen(sysplex_name) > CPI_LENGTH_SYSPLEX_NAME) {
107 printk("cpi: sysplex name has length of %li characters"
108 " - only %i characters supported\n",
109 strlen(sysplex_name), CPI_LENGTH_SYSPLEX_NAME);
110 return -EINVAL;
111 }
112 return 0;
113}
114
115static void
116cpi_callback(struct sclp_req *req, void *data)
117{
118 struct semaphore *sem;
119
120 sem = (struct semaphore *) data;
121 up(sem);
122}
123
124static struct sclp_req *
125cpi_prepare_req(void)
126{
127 struct sclp_req *req;
128 struct cpi_sccb *sccb;
129 struct cpi_evbuf *evb;
130
131 req = (struct sclp_req *) kmalloc(sizeof(struct sclp_req), GFP_KERNEL);
132 if (req == NULL)
133 return ERR_PTR(-ENOMEM);
134 sccb = (struct cpi_sccb *) __get_free_page(GFP_KERNEL | GFP_DMA);
135 if (sccb == NULL) {
136 kfree(req);
137 return ERR_PTR(-ENOMEM);
138 }
139 memset(sccb, 0, sizeof(struct cpi_sccb));
140
141 /* setup SCCB for Control-Program Identification */
142 sccb->header.length = sizeof(struct cpi_sccb);
143 sccb->cpi_evbuf.header.length = sizeof(struct cpi_evbuf);
144 sccb->cpi_evbuf.header.type = 0x0B;
145 evb = &sccb->cpi_evbuf;
146
147 /* set system type */
148 memset(evb->system_type, ' ', CPI_LENGTH_SYSTEM_TYPE);
149 memcpy(evb->system_type, system_type, strlen(system_type));
150 sclp_ascebc_str(evb->system_type, CPI_LENGTH_SYSTEM_TYPE);
151 EBC_TOUPPER(evb->system_type, CPI_LENGTH_SYSTEM_TYPE);
152
153 /* set system name */
154 memset(evb->system_name, ' ', CPI_LENGTH_SYSTEM_NAME);
155 memcpy(evb->system_name, system_name, strlen(system_name));
156 sclp_ascebc_str(evb->system_name, CPI_LENGTH_SYSTEM_NAME);
157 EBC_TOUPPER(evb->system_name, CPI_LENGTH_SYSTEM_NAME);
158
159 /* set sytem level */
160 evb->system_level = LINUX_VERSION_CODE;
161
162 /* set sysplex name */
163 if (sysplex_name) {
164 memset(evb->sysplex_name, ' ', CPI_LENGTH_SYSPLEX_NAME);
165 memcpy(evb->sysplex_name, sysplex_name, strlen(sysplex_name));
166 sclp_ascebc_str(evb->sysplex_name, CPI_LENGTH_SYSPLEX_NAME);
167 EBC_TOUPPER(evb->sysplex_name, CPI_LENGTH_SYSPLEX_NAME);
168 }
169
170 /* prepare request data structure presented to SCLP driver */
171 req->command = SCLP_CMDW_WRITEDATA;
172 req->sccb = sccb;
173 req->status = SCLP_REQ_FILLED;
174 req->callback = cpi_callback;
175 return req;
176}
177
178static void
179cpi_free_req(struct sclp_req *req)
180{
181 free_page((unsigned long) req->sccb);
182 kfree(req);
183}
184
185static int __init
186cpi_module_init(void)
187{
188 struct semaphore sem;
189 struct sclp_req *req;
190 int rc;
191
192 rc = cpi_check_parms();
193 if (rc)
194 return rc;
195
196 rc = sclp_register(&sclp_cpi_event);
197 if (rc) {
198 /* could not register sclp event. Die. */
199 printk(KERN_WARNING "cpi: could not register to hardware "
200 "console.\n");
201 return -EINVAL;
202 }
203 if (!(sclp_cpi_event.sclp_send_mask & EvTyp_CtlProgIdent_Mask)) {
204 printk(KERN_WARNING "cpi: no control program identification "
205 "support\n");
206 sclp_unregister(&sclp_cpi_event);
207 return -ENOTSUPP;
208 }
209
210 req = cpi_prepare_req();
211 if (IS_ERR(req)) {
212 printk(KERN_WARNING "cpi: couldn't allocate request\n");
213 sclp_unregister(&sclp_cpi_event);
214 return PTR_ERR(req);
215 }
216
217 /* Prepare semaphore */
218 sema_init(&sem, 0);
219 req->callback_data = &sem;
220 /* Add request to sclp queue */
221 rc = sclp_add_request(req);
222 if (rc) {
223 printk(KERN_WARNING "cpi: could not start request\n");
224 cpi_free_req(req);
225 sclp_unregister(&sclp_cpi_event);
226 return rc;
227 }
228 /* make "insmod" sleep until callback arrives */
229 down(&sem);
230
231 rc = ((struct cpi_sccb *) req->sccb)->header.response_code;
232 if (rc != 0x0020) {
233 printk(KERN_WARNING "cpi: failed with response code 0x%x\n",
234 rc);
235 rc = -ECOMM;
236 } else
237 rc = 0;
238
239 cpi_free_req(req);
240 sclp_unregister(&sclp_cpi_event);
241
242 return rc;
243}
244
245
246static void __exit cpi_module_exit(void)
247{
248}
249
250
251/* declare driver module init/cleanup functions */
252module_init(cpi_module_init);
253module_exit(cpi_module_exit);
254
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c
new file mode 100644
index 000000000000..83f75774df60
--- /dev/null
+++ b/drivers/s390/char/sclp_quiesce.c
@@ -0,0 +1,99 @@
1/*
2 * drivers/s390/char/sclp_quiesce.c
3 * signal quiesce handler
4 *
5 * (C) Copyright IBM Corp. 1999,2004
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
8 */
9
10#include <linux/config.h>
11#include <linux/module.h>
12#include <linux/types.h>
13#include <linux/cpumask.h>
14#include <linux/smp.h>
15#include <linux/init.h>
16#include <asm/atomic.h>
17#include <asm/ptrace.h>
18#include <asm/sigp.h>
19
20#include "sclp.h"
21
22
23#ifdef CONFIG_SMP
24/* Signal completion of shutdown process. All CPUs except the first to enter
25 * this function: go to stopped state. First CPU: wait until all other
26 * CPUs are in stopped or check stop state. Afterwards, load special PSW
27 * to indicate completion. */
28static void
29do_load_quiesce_psw(void * __unused)
30{
31 static atomic_t cpuid = ATOMIC_INIT(-1);
32 psw_t quiesce_psw;
33 int cpu;
34
35 if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid))
36 signal_processor(smp_processor_id(), sigp_stop);
37 /* Wait for all other cpus to enter stopped state */
38 for_each_online_cpu(cpu) {
39 if (cpu == smp_processor_id())
40 continue;
41 while(!smp_cpu_not_running(cpu))
42 cpu_relax();
43 }
44 /* Quiesce the last cpu with the special psw */
45 quiesce_psw.mask = PSW_BASE_BITS | PSW_MASK_WAIT;
46 quiesce_psw.addr = 0xfff;
47 __load_psw(quiesce_psw);
48}
49
50/* Shutdown handler. Perform shutdown function on all CPUs. */
51static void
52do_machine_quiesce(void)
53{
54 on_each_cpu(do_load_quiesce_psw, NULL, 0, 0);
55}
56#else
57/* Shutdown handler. Signal completion of shutdown by loading special PSW. */
58static void
59do_machine_quiesce(void)
60{
61 psw_t quiesce_psw;
62
63 quiesce_psw.mask = PSW_BASE_BITS | PSW_MASK_WAIT;
64 quiesce_psw.addr = 0xfff;
65 __load_psw(quiesce_psw);
66}
67#endif
68
69extern void ctrl_alt_del(void);
70
71/* Handler for quiesce event. Start shutdown procedure. */
72static void
73sclp_quiesce_handler(struct evbuf_header *evbuf)
74{
75 _machine_restart = (void *) do_machine_quiesce;
76 _machine_halt = do_machine_quiesce;
77 _machine_power_off = do_machine_quiesce;
78 ctrl_alt_del();
79}
80
81static struct sclp_register sclp_quiesce_event = {
82 .receive_mask = EvTyp_SigQuiesce_Mask,
83 .receiver_fn = sclp_quiesce_handler
84};
85
86/* Initialize quiesce driver. */
87static int __init
88sclp_quiesce_init(void)
89{
90 int rc;
91
92 rc = sclp_register(&sclp_quiesce_event);
93 if (rc)
94 printk(KERN_WARNING "sclp: could not register quiesce handler "
95 "(rc=%d)\n", rc);
96 return rc;
97}
98
99module_init(sclp_quiesce_init);
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c
new file mode 100644
index 000000000000..ac10dfb20a62
--- /dev/null
+++ b/drivers/s390/char/sclp_rw.c
@@ -0,0 +1,471 @@
1/*
2 * drivers/s390/char/sclp_rw.c
3 * driver: reading from and writing to system console on S/390 via SCLP
4 *
5 * S390 version
6 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
9 */
10
11#include <linux/config.h>
12#include <linux/kmod.h>
13#include <linux/types.h>
14#include <linux/err.h>
15#include <linux/string.h>
16#include <linux/spinlock.h>
17#include <linux/ctype.h>
18#include <asm/uaccess.h>
19
20#include "sclp.h"
21#include "sclp_rw.h"
22
23#define SCLP_RW_PRINT_HEADER "sclp low level driver: "
24
25/*
26 * The room for the SCCB (only for writing) is not equal to a pages size
27 * (as it is specified as the maximum size in the the SCLP ducumentation)
28 * because of the additional data structure described above.
29 */
30#define MAX_SCCB_ROOM (PAGE_SIZE - sizeof(struct sclp_buffer))
31
32/* Event type structure for write message and write priority message */
33static struct sclp_register sclp_rw_event = {
34 .send_mask = EvTyp_Msg_Mask | EvTyp_PMsgCmd_Mask
35};
36
37/*
38 * Setup a sclp write buffer. Gets a page as input (4K) and returns
39 * a pointer to a struct sclp_buffer structure that is located at the
40 * end of the input page. This reduces the buffer space by a few
41 * bytes but simplifies things.
42 */
43struct sclp_buffer *
44sclp_make_buffer(void *page, unsigned short columns, unsigned short htab)
45{
46 struct sclp_buffer *buffer;
47 struct write_sccb *sccb;
48
49 sccb = (struct write_sccb *) page;
50 /*
51 * We keep the struct sclp_buffer structure at the end
52 * of the sccb page.
53 */
54 buffer = ((struct sclp_buffer *) ((addr_t) sccb + PAGE_SIZE)) - 1;
55 buffer->sccb = sccb;
56 buffer->retry_count = 0;
57 buffer->mto_number = 0;
58 buffer->mto_char_sum = 0;
59 buffer->current_line = NULL;
60 buffer->current_length = 0;
61 buffer->columns = columns;
62 buffer->htab = htab;
63
64 /* initialize sccb */
65 memset(sccb, 0, sizeof(struct write_sccb));
66 sccb->header.length = sizeof(struct write_sccb);
67 sccb->msg_buf.header.length = sizeof(struct msg_buf);
68 sccb->msg_buf.header.type = EvTyp_Msg;
69 sccb->msg_buf.mdb.header.length = sizeof(struct mdb);
70 sccb->msg_buf.mdb.header.type = 1;
71 sccb->msg_buf.mdb.header.tag = 0xD4C4C240; /* ebcdic "MDB " */
72 sccb->msg_buf.mdb.header.revision_code = 1;
73 sccb->msg_buf.mdb.go.length = sizeof(struct go);
74 sccb->msg_buf.mdb.go.type = 1;
75
76 return buffer;
77}
78
79/*
80 * Return a pointer to the orignal page that has been used to create
81 * the buffer.
82 */
83void *
84sclp_unmake_buffer(struct sclp_buffer *buffer)
85{
86 return buffer->sccb;
87}
88
89/*
90 * Initialize a new Message Text Object (MTO) at the end of the provided buffer
91 * with enough room for max_len characters. Return 0 on success.
92 */
93static int
94sclp_initialize_mto(struct sclp_buffer *buffer, int max_len)
95{
96 struct write_sccb *sccb;
97 struct mto *mto;
98 int mto_size;
99
100 /* max size of new Message Text Object including message text */
101 mto_size = sizeof(struct mto) + max_len;
102
103 /* check if current buffer sccb can contain the mto */
104 sccb = buffer->sccb;
105 if ((MAX_SCCB_ROOM - sccb->header.length) < mto_size)
106 return -ENOMEM;
107
108 /* find address of new message text object */
109 mto = (struct mto *)(((addr_t) sccb) + sccb->header.length);
110
111 /*
112 * fill the new Message-Text Object,
113 * starting behind the former last byte of the SCCB
114 */
115 memset(mto, 0, sizeof(struct mto));
116 mto->length = sizeof(struct mto);
117 mto->type = 4; /* message text object */
118 mto->line_type_flags = LnTpFlgs_EndText; /* end text */
119
120 /* set pointer to first byte after struct mto. */
121 buffer->current_line = (char *) (mto + 1);
122 buffer->current_length = 0;
123
124 return 0;
125}
126
127/*
128 * Finalize MTO initialized by sclp_initialize_mto(), updating the sizes of
129 * MTO, enclosing MDB, event buffer and SCCB.
130 */
131static void
132sclp_finalize_mto(struct sclp_buffer *buffer)
133{
134 struct write_sccb *sccb;
135 struct mto *mto;
136 int str_len, mto_size;
137
138 str_len = buffer->current_length;
139 buffer->current_line = NULL;
140 buffer->current_length = 0;
141
142 /* real size of new Message Text Object including message text */
143 mto_size = sizeof(struct mto) + str_len;
144
145 /* find address of new message text object */
146 sccb = buffer->sccb;
147 mto = (struct mto *)(((addr_t) sccb) + sccb->header.length);
148
149 /* set size of message text object */
150 mto->length = mto_size;
151
152 /*
153 * update values of sizes
154 * (SCCB, Event(Message) Buffer, Message Data Block)
155 */
156 sccb->header.length += mto_size;
157 sccb->msg_buf.header.length += mto_size;
158 sccb->msg_buf.mdb.header.length += mto_size;
159
160 /*
161 * count number of buffered messages (= number of Message Text
162 * Objects) and number of buffered characters
163 * for the SCCB currently used for buffering and at all
164 */
165 buffer->mto_number++;
166 buffer->mto_char_sum += str_len;
167}
168
169/*
170 * processing of a message including escape characters,
171 * returns number of characters written to the output sccb
172 * ("processed" means that is not guaranteed that the character have already
173 * been sent to the SCLP but that it will be done at least next time the SCLP
174 * is not busy)
175 */
176int
177sclp_write(struct sclp_buffer *buffer, const unsigned char *msg, int count)
178{
179 int spaces, i_msg;
180 int rc;
181
182 /*
183 * parse msg for escape sequences (\t,\v ...) and put formated
184 * msg into an mto (created by sclp_initialize_mto).
185 *
186 * We have to do this work ourselfs because there is no support for
187 * these characters on the native machine and only partial support
188 * under VM (Why does VM interpret \n but the native machine doesn't ?)
189 *
190 * Depending on i/o-control setting the message is always written
191 * immediately or we wait for a final new line maybe coming with the
192 * next message. Besides we avoid a buffer overrun by writing its
193 * content.
194 *
195 * RESTRICTIONS:
196 *
197 * \r and \b work within one line because we are not able to modify
198 * previous output that have already been accepted by the SCLP.
199 *
200 * \t combined with following \r is not correctly represented because
201 * \t is expanded to some spaces but \r does not know about a
202 * previous \t and decreases the current position by one column.
203 * This is in order to a slim and quick implementation.
204 */
205 for (i_msg = 0; i_msg < count; i_msg++) {
206 switch (msg[i_msg]) {
207 case '\n': /* new line, line feed (ASCII) */
208 /* check if new mto needs to be created */
209 if (buffer->current_line == NULL) {
210 rc = sclp_initialize_mto(buffer, 0);
211 if (rc)
212 return i_msg;
213 }
214 sclp_finalize_mto(buffer);
215 break;
216 case '\a': /* bell, one for several times */
217 /* set SCLP sound alarm bit in General Object */
218 buffer->sccb->msg_buf.mdb.go.general_msg_flags |=
219 GnrlMsgFlgs_SndAlrm;
220 break;
221 case '\t': /* horizontal tabulator */
222 /* check if new mto needs to be created */
223 if (buffer->current_line == NULL) {
224 rc = sclp_initialize_mto(buffer,
225 buffer->columns);
226 if (rc)
227 return i_msg;
228 }
229 /* "go to (next htab-boundary + 1, same line)" */
230 do {
231 if (buffer->current_length >= buffer->columns)
232 break;
233 /* ok, add a blank */
234 *buffer->current_line++ = 0x40;
235 buffer->current_length++;
236 } while (buffer->current_length % buffer->htab);
237 break;
238 case '\f': /* form feed */
239 case '\v': /* vertical tabulator */
240 /* "go to (actual column, actual line + 1)" */
241 /* = new line, leading spaces */
242 if (buffer->current_line != NULL) {
243 spaces = buffer->current_length;
244 sclp_finalize_mto(buffer);
245 rc = sclp_initialize_mto(buffer,
246 buffer->columns);
247 if (rc)
248 return i_msg;
249 memset(buffer->current_line, 0x40, spaces);
250 buffer->current_line += spaces;
251 buffer->current_length = spaces;
252 } else {
253 /* one an empty line this is the same as \n */
254 rc = sclp_initialize_mto(buffer,
255 buffer->columns);
256 if (rc)
257 return i_msg;
258 sclp_finalize_mto(buffer);
259 }
260 break;
261 case '\b': /* backspace */
262 /* "go to (actual column - 1, actual line)" */
263 /* decrement counter indicating position, */
264 /* do not remove last character */
265 if (buffer->current_line != NULL &&
266 buffer->current_length > 0) {
267 buffer->current_length--;
268 buffer->current_line--;
269 }
270 break;
271 case 0x00: /* end of string */
272 /* transfer current line to SCCB */
273 if (buffer->current_line != NULL)
274 sclp_finalize_mto(buffer);
275 /* skip the rest of the message including the 0 byte */
276 i_msg = count - 1;
277 break;
278 default: /* no escape character */
279 /* do not output unprintable characters */
280 if (!isprint(msg[i_msg]))
281 break;
282 /* check if new mto needs to be created */
283 if (buffer->current_line == NULL) {
284 rc = sclp_initialize_mto(buffer,
285 buffer->columns);
286 if (rc)
287 return i_msg;
288 }
289 *buffer->current_line++ = sclp_ascebc(msg[i_msg]);
290 buffer->current_length++;
291 break;
292 }
293 /* check if current mto is full */
294 if (buffer->current_line != NULL &&
295 buffer->current_length >= buffer->columns)
296 sclp_finalize_mto(buffer);
297 }
298
299 /* return number of processed characters */
300 return i_msg;
301}
302
303/*
304 * Return the number of free bytes in the sccb
305 */
306int
307sclp_buffer_space(struct sclp_buffer *buffer)
308{
309 int count;
310
311 count = MAX_SCCB_ROOM - buffer->sccb->header.length;
312 if (buffer->current_line != NULL)
313 count -= sizeof(struct mto) + buffer->current_length;
314 return count;
315}
316
317/*
318 * Return number of characters in buffer
319 */
320int
321sclp_chars_in_buffer(struct sclp_buffer *buffer)
322{
323 int count;
324
325 count = buffer->mto_char_sum;
326 if (buffer->current_line != NULL)
327 count += buffer->current_length;
328 return count;
329}
330
331/*
332 * sets or provides some values that influence the drivers behaviour
333 */
334void
335sclp_set_columns(struct sclp_buffer *buffer, unsigned short columns)
336{
337 buffer->columns = columns;
338 if (buffer->current_line != NULL &&
339 buffer->current_length > buffer->columns)
340 sclp_finalize_mto(buffer);
341}
342
343void
344sclp_set_htab(struct sclp_buffer *buffer, unsigned short htab)
345{
346 buffer->htab = htab;
347}
348
349/*
350 * called by sclp_console_init and/or sclp_tty_init
351 */
352int
353sclp_rw_init(void)
354{
355 static int init_done = 0;
356 int rc;
357
358 if (init_done)
359 return 0;
360
361 rc = sclp_register(&sclp_rw_event);
362 if (rc == 0)
363 init_done = 1;
364 return rc;
365}
366
367#define SCLP_BUFFER_MAX_RETRY 1
368
369/*
370 * second half of Write Event Data-function that has to be done after
371 * interruption indicating completion of Service Call.
372 */
373static void
374sclp_writedata_callback(struct sclp_req *request, void *data)
375{
376 int rc;
377 struct sclp_buffer *buffer;
378 struct write_sccb *sccb;
379
380 buffer = (struct sclp_buffer *) data;
381 sccb = buffer->sccb;
382
383 if (request->status == SCLP_REQ_FAILED) {
384 if (buffer->callback != NULL)
385 buffer->callback(buffer, -EIO);
386 return;
387 }
388 /* check SCLP response code and choose suitable action */
389 switch (sccb->header.response_code) {
390 case 0x0020 :
391 /* Normal completion, buffer processed, message(s) sent */
392 rc = 0;
393 break;
394
395 case 0x0340: /* Contained SCLP equipment check */
396 if (++buffer->retry_count > SCLP_BUFFER_MAX_RETRY) {
397 rc = -EIO;
398 break;
399 }
400 /* remove processed buffers and requeue rest */
401 if (sclp_remove_processed((struct sccb_header *) sccb) > 0) {
402 /* not all buffers were processed */
403 sccb->header.response_code = 0x0000;
404 buffer->request.status = SCLP_REQ_FILLED;
405 rc = sclp_add_request(request);
406 if (rc == 0)
407 return;
408 } else
409 rc = 0;
410 break;
411
412 case 0x0040: /* SCLP equipment check */
413 case 0x05f0: /* Target resource in improper state */
414 if (++buffer->retry_count > SCLP_BUFFER_MAX_RETRY) {
415 rc = -EIO;
416 break;
417 }
418 /* retry request */
419 sccb->header.response_code = 0x0000;
420 buffer->request.status = SCLP_REQ_FILLED;
421 rc = sclp_add_request(request);
422 if (rc == 0)
423 return;
424 break;
425 default:
426 if (sccb->header.response_code == 0x71f0)
427 rc = -ENOMEM;
428 else
429 rc = -EINVAL;
430 break;
431 }
432 if (buffer->callback != NULL)
433 buffer->callback(buffer, rc);
434}
435
436/*
437 * Setup the request structure in the struct sclp_buffer to do SCLP Write
438 * Event Data and pass the request to the core SCLP loop. Return zero on
439 * success, non-zero otherwise.
440 */
441int
442sclp_emit_buffer(struct sclp_buffer *buffer,
443 void (*callback)(struct sclp_buffer *, int))
444{
445 struct write_sccb *sccb;
446
447 /* add current line if there is one */
448 if (buffer->current_line != NULL)
449 sclp_finalize_mto(buffer);
450
451 /* Are there messages in the output buffer ? */
452 if (buffer->mto_number == 0)
453 return -EIO;
454
455 sccb = buffer->sccb;
456 if (sclp_rw_event.sclp_send_mask & EvTyp_Msg_Mask)
457 /* Use normal write message */
458 sccb->msg_buf.header.type = EvTyp_Msg;
459 else if (sclp_rw_event.sclp_send_mask & EvTyp_PMsgCmd_Mask)
460 /* Use write priority message */
461 sccb->msg_buf.header.type = EvTyp_PMsgCmd;
462 else
463 return -ENOSYS;
464 buffer->request.command = SCLP_CMDW_WRITEDATA;
465 buffer->request.status = SCLP_REQ_FILLED;
466 buffer->request.callback = sclp_writedata_callback;
467 buffer->request.callback_data = buffer;
468 buffer->request.sccb = sccb;
469 buffer->callback = callback;
470 return sclp_add_request(&buffer->request);
471}
diff --git a/drivers/s390/char/sclp_rw.h b/drivers/s390/char/sclp_rw.h
new file mode 100644
index 000000000000..6aa7a6948bc9
--- /dev/null
+++ b/drivers/s390/char/sclp_rw.h
@@ -0,0 +1,96 @@
1/*
2 * drivers/s390/char/sclp_rw.h
3 * interface to the SCLP-read/write driver
4 *
5 * S390 version
6 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
9 */
10
11#ifndef __SCLP_RW_H__
12#define __SCLP_RW_H__
13
14#include <linux/list.h>
15
16struct mto {
17 u16 length;
18 u16 type;
19 u16 line_type_flags;
20 u8 alarm_control;
21 u8 _reserved[3];
22} __attribute__((packed));
23
24struct go {
25 u16 length;
26 u16 type;
27 u32 domid;
28 u8 hhmmss_time[8];
29 u8 th_time[3];
30 u8 reserved_0;
31 u8 dddyyyy_date[7];
32 u8 _reserved_1;
33 u16 general_msg_flags;
34 u8 _reserved_2[10];
35 u8 originating_system_name[8];
36 u8 job_guest_name[8];
37} __attribute__((packed));
38
39struct mdb_header {
40 u16 length;
41 u16 type;
42 u32 tag;
43 u32 revision_code;
44} __attribute__((packed));
45
46struct mdb {
47 struct mdb_header header;
48 struct go go;
49} __attribute__((packed));
50
51struct msg_buf {
52 struct evbuf_header header;
53 struct mdb mdb;
54} __attribute__((packed));
55
56struct write_sccb {
57 struct sccb_header header;
58 struct msg_buf msg_buf;
59} __attribute__((packed));
60
61/* The number of empty mto buffers that can be contained in a single sccb. */
62#define NR_EMPTY_MTO_PER_SCCB ((PAGE_SIZE - sizeof(struct sclp_buffer) - \
63 sizeof(struct write_sccb)) / sizeof(struct mto))
64
65/*
66 * data structure for information about list of SCCBs (only for writing),
67 * will be located at the end of a SCCBs page
68 */
69struct sclp_buffer {
70 struct list_head list; /* list_head for sccb_info chain */
71 struct sclp_req request;
72 struct write_sccb *sccb;
73 char *current_line;
74 int current_length;
75 int retry_count;
76 /* output format settings */
77 unsigned short columns;
78 unsigned short htab;
79 /* statistics about this buffer */
80 unsigned int mto_char_sum; /* # chars in sccb */
81 unsigned int mto_number; /* # mtos in sccb */
82 /* Callback that is called after reaching final status. */
83 void (*callback)(struct sclp_buffer *, int);
84};
85
86int sclp_rw_init(void);
87struct sclp_buffer *sclp_make_buffer(void *, unsigned short, unsigned short);
88void *sclp_unmake_buffer(struct sclp_buffer *);
89int sclp_buffer_space(struct sclp_buffer *);
90int sclp_write(struct sclp_buffer *buffer, const unsigned char *, int);
91int sclp_emit_buffer(struct sclp_buffer *,void (*)(struct sclp_buffer *,int));
92void sclp_set_columns(struct sclp_buffer *, unsigned short);
93void sclp_set_htab(struct sclp_buffer *, unsigned short);
94int sclp_chars_in_buffer(struct sclp_buffer *);
95
96#endif /* __SCLP_RW_H__ */
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
new file mode 100644
index 000000000000..a20d7c89341d
--- /dev/null
+++ b/drivers/s390/char/sclp_tty.c
@@ -0,0 +1,813 @@
1/*
2 * drivers/s390/char/sclp_tty.c
3 * SCLP line mode terminal driver.
4 *
5 * S390 version
6 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
9 */
10
11#include <linux/config.h>
12#include <linux/module.h>
13#include <linux/kmod.h>
14#include <linux/tty.h>
15#include <linux/tty_driver.h>
16#include <linux/sched.h>
17#include <linux/wait.h>
18#include <linux/slab.h>
19#include <linux/err.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <asm/uaccess.h>
23
24#include "ctrlchar.h"
25#include "sclp.h"
26#include "sclp_rw.h"
27#include "sclp_tty.h"
28
29#define SCLP_TTY_PRINT_HEADER "sclp tty driver: "
30
31/*
32 * size of a buffer that collects single characters coming in
33 * via sclp_tty_put_char()
34 */
35#define SCLP_TTY_BUF_SIZE 512
36
37/*
38 * There is exactly one SCLP terminal, so we can keep things simple
39 * and allocate all variables statically.
40 */
41
42/* Lock to guard over changes to global variables. */
43static spinlock_t sclp_tty_lock;
44/* List of free pages that can be used for console output buffering. */
45static struct list_head sclp_tty_pages;
46/* List of full struct sclp_buffer structures ready for output. */
47static struct list_head sclp_tty_outqueue;
48/* Counter how many buffers are emitted. */
49static int sclp_tty_buffer_count;
50/* Pointer to current console buffer. */
51static struct sclp_buffer *sclp_ttybuf;
52/* Timer for delayed output of console messages. */
53static struct timer_list sclp_tty_timer;
54/* Waitqueue to wait for buffers to get empty. */
55static wait_queue_head_t sclp_tty_waitq;
56
57static struct tty_struct *sclp_tty;
58static unsigned char sclp_tty_chars[SCLP_TTY_BUF_SIZE];
59static unsigned short int sclp_tty_chars_count;
60
61struct tty_driver *sclp_tty_driver;
62
63extern struct termios tty_std_termios;
64
65static struct sclp_ioctls sclp_ioctls;
66static struct sclp_ioctls sclp_ioctls_init =
67{
68 8, /* 1 hor. tab. = 8 spaces */
69 0, /* no echo of input by this driver */
70 80, /* 80 characters/line */
71 1, /* write after 1/10 s without final new line */
72 MAX_KMEM_PAGES, /* quick fix: avoid __alloc_pages */
73 MAX_KMEM_PAGES, /* take 32/64 pages from kernel memory, */
74 0, /* do not convert to lower case */
75 0x6c /* to seprate upper and lower case */
76 /* ('%' in EBCDIC) */
77};
78
79/* This routine is called whenever we try to open a SCLP terminal. */
80static int
81sclp_tty_open(struct tty_struct *tty, struct file *filp)
82{
83 sclp_tty = tty;
84 tty->driver_data = NULL;
85 tty->low_latency = 0;
86 return 0;
87}
88
89/* This routine is called when the SCLP terminal is closed. */
90static void
91sclp_tty_close(struct tty_struct *tty, struct file *filp)
92{
93 if (tty->count > 1)
94 return;
95 sclp_tty = NULL;
96}
97
98/* execute commands to control the i/o behaviour of the SCLP tty at runtime */
99static int
100sclp_tty_ioctl(struct tty_struct *tty, struct file * file,
101 unsigned int cmd, unsigned long arg)
102{
103 unsigned long flags;
104 unsigned int obuf;
105 int check;
106 int rc;
107
108 if (tty->flags & (1 << TTY_IO_ERROR))
109 return -EIO;
110 rc = 0;
111 check = 0;
112 switch (cmd) {
113 case TIOCSCLPSHTAB:
114 /* set width of horizontal tab */
115 if (get_user(sclp_ioctls.htab, (unsigned short __user *) arg))
116 rc = -EFAULT;
117 else
118 check = 1;
119 break;
120 case TIOCSCLPGHTAB:
121 /* get width of horizontal tab */
122 if (put_user(sclp_ioctls.htab, (unsigned short __user *) arg))
123 rc = -EFAULT;
124 break;
125 case TIOCSCLPSECHO:
126 /* enable/disable echo of input */
127 if (get_user(sclp_ioctls.echo, (unsigned char __user *) arg))
128 rc = -EFAULT;
129 break;
130 case TIOCSCLPGECHO:
131 /* Is echo of input enabled ? */
132 if (put_user(sclp_ioctls.echo, (unsigned char __user *) arg))
133 rc = -EFAULT;
134 break;
135 case TIOCSCLPSCOLS:
136 /* set number of columns for output */
137 if (get_user(sclp_ioctls.columns, (unsigned short __user *) arg))
138 rc = -EFAULT;
139 else
140 check = 1;
141 break;
142 case TIOCSCLPGCOLS:
143 /* get number of columns for output */
144 if (put_user(sclp_ioctls.columns, (unsigned short __user *) arg))
145 rc = -EFAULT;
146 break;
147 case TIOCSCLPSNL:
148 /* enable/disable writing without final new line character */
149 if (get_user(sclp_ioctls.final_nl, (signed char __user *) arg))
150 rc = -EFAULT;
151 break;
152 case TIOCSCLPGNL:
153 /* Is writing without final new line character enabled ? */
154 if (put_user(sclp_ioctls.final_nl, (signed char __user *) arg))
155 rc = -EFAULT;
156 break;
157 case TIOCSCLPSOBUF:
158 /*
159 * set the maximum buffers size for output, will be rounded
160 * up to next 4kB boundary and stored as number of SCCBs
161 * (4kB Buffers) limitation: 256 x 4kB
162 */
163 if (get_user(obuf, (unsigned int __user *) arg) == 0) {
164 if (obuf & 0xFFF)
165 sclp_ioctls.max_sccb = (obuf >> 12) + 1;
166 else
167 sclp_ioctls.max_sccb = (obuf >> 12);
168 } else
169 rc = -EFAULT;
170 break;
171 case TIOCSCLPGOBUF:
172 /* get the maximum buffers size for output */
173 obuf = sclp_ioctls.max_sccb << 12;
174 if (put_user(obuf, (unsigned int __user *) arg))
175 rc = -EFAULT;
176 break;
177 case TIOCSCLPGKBUF:
178 /* get the number of buffers got from kernel at startup */
179 if (put_user(sclp_ioctls.kmem_sccb, (unsigned short __user *) arg))
180 rc = -EFAULT;
181 break;
182 case TIOCSCLPSCASE:
183 /* enable/disable conversion from upper to lower case */
184 if (get_user(sclp_ioctls.tolower, (unsigned char __user *) arg))
185 rc = -EFAULT;
186 break;
187 case TIOCSCLPGCASE:
188 /* Is conversion from upper to lower case of input enabled? */
189 if (put_user(sclp_ioctls.tolower, (unsigned char __user *) arg))
190 rc = -EFAULT;
191 break;
192 case TIOCSCLPSDELIM:
193 /*
194 * set special character used for separating upper and
195 * lower case, 0x00 disables this feature
196 */
197 if (get_user(sclp_ioctls.delim, (unsigned char __user *) arg))
198 rc = -EFAULT;
199 break;
200 case TIOCSCLPGDELIM:
201 /*
202 * get special character used for separating upper and
203 * lower case, 0x00 disables this feature
204 */
205 if (put_user(sclp_ioctls.delim, (unsigned char __user *) arg))
206 rc = -EFAULT;
207 break;
208 case TIOCSCLPSINIT:
209 /* set initial (default) sclp ioctls */
210 sclp_ioctls = sclp_ioctls_init;
211 check = 1;
212 break;
213 default:
214 rc = -ENOIOCTLCMD;
215 break;
216 }
217 if (check) {
218 spin_lock_irqsave(&sclp_tty_lock, flags);
219 if (sclp_ttybuf != NULL) {
220 sclp_set_htab(sclp_ttybuf, sclp_ioctls.htab);
221 sclp_set_columns(sclp_ttybuf, sclp_ioctls.columns);
222 }
223 spin_unlock_irqrestore(&sclp_tty_lock, flags);
224 }
225 return rc;
226}
227
228/*
229 * This routine returns the numbers of characters the tty driver
230 * will accept for queuing to be written. This number is subject
231 * to change as output buffers get emptied, or if the output flow
232 * control is acted. This is not an exact number because not every
233 * character needs the same space in the sccb. The worst case is
234 * a string of newlines. Every newlines creates a new mto which
235 * needs 8 bytes.
236 */
237static int
238sclp_tty_write_room (struct tty_struct *tty)
239{
240 unsigned long flags;
241 struct list_head *l;
242 int count;
243
244 spin_lock_irqsave(&sclp_tty_lock, flags);
245 count = 0;
246 if (sclp_ttybuf != NULL)
247 count = sclp_buffer_space(sclp_ttybuf) / sizeof(struct mto);
248 list_for_each(l, &sclp_tty_pages)
249 count += NR_EMPTY_MTO_PER_SCCB;
250 spin_unlock_irqrestore(&sclp_tty_lock, flags);
251 return count;
252}
253
254static void
255sclp_ttybuf_callback(struct sclp_buffer *buffer, int rc)
256{
257 unsigned long flags;
258 void *page;
259
260 do {
261 page = sclp_unmake_buffer(buffer);
262 spin_lock_irqsave(&sclp_tty_lock, flags);
263 /* Remove buffer from outqueue */
264 list_del(&buffer->list);
265 sclp_tty_buffer_count--;
266 list_add_tail((struct list_head *) page, &sclp_tty_pages);
267 /* Check if there is a pending buffer on the out queue. */
268 buffer = NULL;
269 if (!list_empty(&sclp_tty_outqueue))
270 buffer = list_entry(sclp_tty_outqueue.next,
271 struct sclp_buffer, list);
272 spin_unlock_irqrestore(&sclp_tty_lock, flags);
273 } while (buffer && sclp_emit_buffer(buffer, sclp_ttybuf_callback));
274 wake_up(&sclp_tty_waitq);
275 /* check if the tty needs a wake up call */
276 if (sclp_tty != NULL) {
277 tty_wakeup(sclp_tty);
278 }
279}
280
281static inline void
282__sclp_ttybuf_emit(struct sclp_buffer *buffer)
283{
284 unsigned long flags;
285 int count;
286 int rc;
287
288 spin_lock_irqsave(&sclp_tty_lock, flags);
289 list_add_tail(&buffer->list, &sclp_tty_outqueue);
290 count = sclp_tty_buffer_count++;
291 spin_unlock_irqrestore(&sclp_tty_lock, flags);
292 if (count)
293 return;
294 rc = sclp_emit_buffer(buffer, sclp_ttybuf_callback);
295 if (rc)
296 sclp_ttybuf_callback(buffer, rc);
297}
298
299/*
300 * When this routine is called from the timer then we flush the
301 * temporary write buffer.
302 */
303static void
304sclp_tty_timeout(unsigned long data)
305{
306 unsigned long flags;
307 struct sclp_buffer *buf;
308
309 spin_lock_irqsave(&sclp_tty_lock, flags);
310 buf = sclp_ttybuf;
311 sclp_ttybuf = NULL;
312 spin_unlock_irqrestore(&sclp_tty_lock, flags);
313
314 if (buf != NULL) {
315 __sclp_ttybuf_emit(buf);
316 }
317}
318
319/*
320 * Write a string to the sclp tty.
321 */
322static void
323sclp_tty_write_string(const unsigned char *str, int count)
324{
325 unsigned long flags;
326 void *page;
327 int written;
328 struct sclp_buffer *buf;
329
330 if (count <= 0)
331 return;
332 spin_lock_irqsave(&sclp_tty_lock, flags);
333 do {
334 /* Create a sclp output buffer if none exists yet */
335 if (sclp_ttybuf == NULL) {
336 while (list_empty(&sclp_tty_pages)) {
337 spin_unlock_irqrestore(&sclp_tty_lock, flags);
338 if (in_interrupt())
339 sclp_sync_wait();
340 else
341 wait_event(sclp_tty_waitq,
342 !list_empty(&sclp_tty_pages));
343 spin_lock_irqsave(&sclp_tty_lock, flags);
344 }
345 page = sclp_tty_pages.next;
346 list_del((struct list_head *) page);
347 sclp_ttybuf = sclp_make_buffer(page,
348 sclp_ioctls.columns,
349 sclp_ioctls.htab);
350 }
351 /* try to write the string to the current output buffer */
352 written = sclp_write(sclp_ttybuf, str, count);
353 if (written == count)
354 break;
355 /*
356 * Not all characters could be written to the current
357 * output buffer. Emit the buffer, create a new buffer
358 * and then output the rest of the string.
359 */
360 buf = sclp_ttybuf;
361 sclp_ttybuf = NULL;
362 spin_unlock_irqrestore(&sclp_tty_lock, flags);
363 __sclp_ttybuf_emit(buf);
364 spin_lock_irqsave(&sclp_tty_lock, flags);
365 str += written;
366 count -= written;
367 } while (count > 0);
368 /* Setup timer to output current console buffer after 1/10 second */
369 if (sclp_ioctls.final_nl) {
370 if (sclp_ttybuf != NULL &&
371 sclp_chars_in_buffer(sclp_ttybuf) != 0 &&
372 !timer_pending(&sclp_tty_timer)) {
373 init_timer(&sclp_tty_timer);
374 sclp_tty_timer.function = sclp_tty_timeout;
375 sclp_tty_timer.data = 0UL;
376 sclp_tty_timer.expires = jiffies + HZ/10;
377 add_timer(&sclp_tty_timer);
378 }
379 } else {
380 if (sclp_ttybuf != NULL &&
381 sclp_chars_in_buffer(sclp_ttybuf) != 0) {
382 buf = sclp_ttybuf;
383 sclp_ttybuf = NULL;
384 spin_unlock_irqrestore(&sclp_tty_lock, flags);
385 __sclp_ttybuf_emit(buf);
386 spin_lock_irqsave(&sclp_tty_lock, flags);
387 }
388 }
389 spin_unlock_irqrestore(&sclp_tty_lock, flags);
390}
391
392/*
393 * This routine is called by the kernel to write a series of characters to the
394 * tty device. The characters may come from user space or kernel space. This
395 * routine will return the number of characters actually accepted for writing.
396 */
397static int
398sclp_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
399{
400 if (sclp_tty_chars_count > 0) {
401 sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count);
402 sclp_tty_chars_count = 0;
403 }
404 sclp_tty_write_string(buf, count);
405 return count;
406}
407
408/*
409 * This routine is called by the kernel to write a single character to the tty
410 * device. If the kernel uses this routine, it must call the flush_chars()
411 * routine (if defined) when it is done stuffing characters into the driver.
412 *
413 * Characters provided to sclp_tty_put_char() are buffered by the SCLP driver.
414 * If the given character is a '\n' the contents of the SCLP write buffer
415 * - including previous characters from sclp_tty_put_char() and strings from
416 * sclp_write() without final '\n' - will be written.
417 */
418static void
419sclp_tty_put_char(struct tty_struct *tty, unsigned char ch)
420{
421 sclp_tty_chars[sclp_tty_chars_count++] = ch;
422 if (ch == '\n' || sclp_tty_chars_count >= SCLP_TTY_BUF_SIZE) {
423 sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count);
424 sclp_tty_chars_count = 0;
425 }
426}
427
428/*
429 * This routine is called by the kernel after it has written a series of
430 * characters to the tty device using put_char().
431 */
432static void
433sclp_tty_flush_chars(struct tty_struct *tty)
434{
435 if (sclp_tty_chars_count > 0) {
436 sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count);
437 sclp_tty_chars_count = 0;
438 }
439}
440
441/*
442 * This routine returns the number of characters in the write buffer of the
443 * SCLP driver. The provided number includes all characters that are stored
444 * in the SCCB (will be written next time the SCLP is not busy) as well as
445 * characters in the write buffer (will not be written as long as there is a
446 * final line feed missing).
447 */
448static int
449sclp_tty_chars_in_buffer(struct tty_struct *tty)
450{
451 unsigned long flags;
452 struct list_head *l;
453 struct sclp_buffer *t;
454 int count;
455
456 spin_lock_irqsave(&sclp_tty_lock, flags);
457 count = 0;
458 if (sclp_ttybuf != NULL)
459 count = sclp_chars_in_buffer(sclp_ttybuf);
460 list_for_each(l, &sclp_tty_outqueue) {
461 t = list_entry(l, struct sclp_buffer, list);
462 count += sclp_chars_in_buffer(t);
463 }
464 spin_unlock_irqrestore(&sclp_tty_lock, flags);
465 return count;
466}
467
468/*
469 * removes all content from buffers of low level driver
470 */
471static void
472sclp_tty_flush_buffer(struct tty_struct *tty)
473{
474 if (sclp_tty_chars_count > 0) {
475 sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count);
476 sclp_tty_chars_count = 0;
477 }
478}
479
480/*
481 * push input to tty
482 */
483static void
484sclp_tty_input(unsigned char* buf, unsigned int count)
485{
486 unsigned int cchar;
487
488 /*
489 * If this tty driver is currently closed
490 * then throw the received input away.
491 */
492 if (sclp_tty == NULL)
493 return;
494 cchar = ctrlchar_handle(buf, count, sclp_tty);
495 switch (cchar & CTRLCHAR_MASK) {
496 case CTRLCHAR_SYSRQ:
497 break;
498 case CTRLCHAR_CTRL:
499 sclp_tty->flip.count++;
500 *sclp_tty->flip.flag_buf_ptr++ = TTY_NORMAL;
501 *sclp_tty->flip.char_buf_ptr++ = cchar;
502 tty_flip_buffer_push(sclp_tty);
503 break;
504 case CTRLCHAR_NONE:
505 /* send (normal) input to line discipline */
506 memcpy(sclp_tty->flip.char_buf_ptr, buf, count);
507 if (count < 2 ||
508 (strncmp ((const char *) buf + count - 2, "^n", 2) &&
509 strncmp ((const char *) buf + count - 2, "\0252n", 2))) {
510 sclp_tty->flip.char_buf_ptr[count] = '\n';
511 count++;
512 } else
513 count -= 2;
514 memset(sclp_tty->flip.flag_buf_ptr, TTY_NORMAL, count);
515 sclp_tty->flip.char_buf_ptr += count;
516 sclp_tty->flip.flag_buf_ptr += count;
517 sclp_tty->flip.count += count;
518 tty_flip_buffer_push(sclp_tty);
519 break;
520 }
521}
522
523/*
524 * get a EBCDIC string in upper/lower case,
525 * find out characters in lower/upper case separated by a special character,
526 * modifiy original string,
527 * returns length of resulting string
528 */
529static int
530sclp_switch_cases(unsigned char *buf, int count,
531 unsigned char delim, int tolower)
532{
533 unsigned char *ip, *op;
534 int toggle;
535
536 /* initially changing case is off */
537 toggle = 0;
538 ip = op = buf;
539 while (count-- > 0) {
540 /* compare with special character */
541 if (*ip == delim) {
542 /* followed by another special character? */
543 if (count && ip[1] == delim) {
544 /*
545 * ... then put a single copy of the special
546 * character to the output string
547 */
548 *op++ = *ip++;
549 count--;
550 } else
551 /*
552 * ... special character follower by a normal
553 * character toggles the case change behaviour
554 */
555 toggle = ~toggle;
556 /* skip special character */
557 ip++;
558 } else
559 /* not the special character */
560 if (toggle)
561 /* but case switching is on */
562 if (tolower)
563 /* switch to uppercase */
564 *op++ = _ebc_toupper[(int) *ip++];
565 else
566 /* switch to lowercase */
567 *op++ = _ebc_tolower[(int) *ip++];
568 else
569 /* no case switching, copy the character */
570 *op++ = *ip++;
571 }
572 /* return length of reformatted string. */
573 return op - buf;
574}
575
576static void
577sclp_get_input(unsigned char *start, unsigned char *end)
578{
579 int count;
580
581 count = end - start;
582 /*
583 * if set in ioctl convert EBCDIC to lower case
584 * (modify original input in SCCB)
585 */
586 if (sclp_ioctls.tolower)
587 EBC_TOLOWER(start, count);
588
589 /*
590 * if set in ioctl find out characters in lower or upper case
591 * (depends on current case) separated by a special character,
592 * works on EBCDIC
593 */
594 if (sclp_ioctls.delim)
595 count = sclp_switch_cases(start, count,
596 sclp_ioctls.delim,
597 sclp_ioctls.tolower);
598
599 /* convert EBCDIC to ASCII (modify original input in SCCB) */
600 sclp_ebcasc_str(start, count);
601
602 /* if set in ioctl write operators input to console */
603 if (sclp_ioctls.echo)
604 sclp_tty_write(sclp_tty, start, count);
605
606 /* transfer input to high level driver */
607 sclp_tty_input(start, count);
608}
609
610static inline struct gds_vector *
611find_gds_vector(struct gds_vector *start, struct gds_vector *end, u16 id)
612{
613 struct gds_vector *vec;
614
615 for (vec = start; vec < end; vec = (void *) vec + vec->length)
616 if (vec->gds_id == id)
617 return vec;
618 return NULL;
619}
620
621static inline struct gds_subvector *
622find_gds_subvector(struct gds_subvector *start,
623 struct gds_subvector *end, u8 key)
624{
625 struct gds_subvector *subvec;
626
627 for (subvec = start; subvec < end;
628 subvec = (void *) subvec + subvec->length)
629 if (subvec->key == key)
630 return subvec;
631 return NULL;
632}
633
634static inline void
635sclp_eval_selfdeftextmsg(struct gds_subvector *start,
636 struct gds_subvector *end)
637{
638 struct gds_subvector *subvec;
639
640 subvec = start;
641 while (subvec < end) {
642 subvec = find_gds_subvector(subvec, end, 0x30);
643 if (!subvec)
644 break;
645 sclp_get_input((unsigned char *)(subvec + 1),
646 (unsigned char *) subvec + subvec->length);
647 subvec = (void *) subvec + subvec->length;
648 }
649}
650
651static inline void
652sclp_eval_textcmd(struct gds_subvector *start,
653 struct gds_subvector *end)
654{
655 struct gds_subvector *subvec;
656
657 subvec = start;
658 while (subvec < end) {
659 subvec = find_gds_subvector(subvec, end,
660 GDS_KEY_SelfDefTextMsg);
661 if (!subvec)
662 break;
663 sclp_eval_selfdeftextmsg((struct gds_subvector *)(subvec + 1),
664 (void *)subvec + subvec->length);
665 subvec = (void *) subvec + subvec->length;
666 }
667}
668
669static inline void
670sclp_eval_cpmsu(struct gds_vector *start, struct gds_vector *end)
671{
672 struct gds_vector *vec;
673
674 vec = start;
675 while (vec < end) {
676 vec = find_gds_vector(vec, end, GDS_ID_TextCmd);
677 if (!vec)
678 break;
679 sclp_eval_textcmd((struct gds_subvector *)(vec + 1),
680 (void *) vec + vec->length);
681 vec = (void *) vec + vec->length;
682 }
683}
684
685
686static inline void
687sclp_eval_mdsmu(struct gds_vector *start, void *end)
688{
689 struct gds_vector *vec;
690
691 vec = find_gds_vector(start, end, GDS_ID_CPMSU);
692 if (vec)
693 sclp_eval_cpmsu(vec + 1, (void *) vec + vec->length);
694}
695
696static void
697sclp_tty_receiver(struct evbuf_header *evbuf)
698{
699 struct gds_vector *start, *end, *vec;
700
701 start = (struct gds_vector *)(evbuf + 1);
702 end = (void *) evbuf + evbuf->length;
703 vec = find_gds_vector(start, end, GDS_ID_MDSMU);
704 if (vec)
705 sclp_eval_mdsmu(vec + 1, (void *) vec + vec->length);
706}
707
708static void
709sclp_tty_state_change(struct sclp_register *reg)
710{
711}
712
713static struct sclp_register sclp_input_event =
714{
715 .receive_mask = EvTyp_OpCmd_Mask | EvTyp_PMsgCmd_Mask,
716 .state_change_fn = sclp_tty_state_change,
717 .receiver_fn = sclp_tty_receiver
718};
719
720static struct tty_operations sclp_ops = {
721 .open = sclp_tty_open,
722 .close = sclp_tty_close,
723 .write = sclp_tty_write,
724 .put_char = sclp_tty_put_char,
725 .flush_chars = sclp_tty_flush_chars,
726 .write_room = sclp_tty_write_room,
727 .chars_in_buffer = sclp_tty_chars_in_buffer,
728 .flush_buffer = sclp_tty_flush_buffer,
729 .ioctl = sclp_tty_ioctl,
730};
731
732int __init
733sclp_tty_init(void)
734{
735 struct tty_driver *driver;
736 void *page;
737 int i;
738 int rc;
739
740 if (!CONSOLE_IS_SCLP)
741 return 0;
742 driver = alloc_tty_driver(1);
743 if (!driver)
744 return -ENOMEM;
745
746 rc = sclp_rw_init();
747 if (rc) {
748 printk(KERN_ERR SCLP_TTY_PRINT_HEADER
749 "could not register tty - "
750 "sclp_rw_init returned %d\n", rc);
751 put_tty_driver(driver);
752 return rc;
753 }
754 /* Allocate pages for output buffering */
755 INIT_LIST_HEAD(&sclp_tty_pages);
756 for (i = 0; i < MAX_KMEM_PAGES; i++) {
757 page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
758 if (page == NULL) {
759 put_tty_driver(driver);
760 return -ENOMEM;
761 }
762 list_add_tail((struct list_head *) page, &sclp_tty_pages);
763 }
764 INIT_LIST_HEAD(&sclp_tty_outqueue);
765 spin_lock_init(&sclp_tty_lock);
766 init_waitqueue_head(&sclp_tty_waitq);
767 init_timer(&sclp_tty_timer);
768 sclp_ttybuf = NULL;
769 sclp_tty_buffer_count = 0;
770 if (MACHINE_IS_VM) {
771 /*
772 * save 4 characters for the CPU number
773 * written at start of each line by VM/CP
774 */
775 sclp_ioctls_init.columns = 76;
776 /* case input lines to lowercase */
777 sclp_ioctls_init.tolower = 1;
778 }
779 sclp_ioctls = sclp_ioctls_init;
780 sclp_tty_chars_count = 0;
781 sclp_tty = NULL;
782
783 rc = sclp_register(&sclp_input_event);
784 if (rc) {
785 put_tty_driver(driver);
786 return rc;
787 }
788
789 driver->owner = THIS_MODULE;
790 driver->driver_name = "sclp_line";
791 driver->name = "sclp_line";
792 driver->major = TTY_MAJOR;
793 driver->minor_start = 64;
794 driver->type = TTY_DRIVER_TYPE_SYSTEM;
795 driver->subtype = SYSTEM_TYPE_TTY;
796 driver->init_termios = tty_std_termios;
797 driver->init_termios.c_iflag = IGNBRK | IGNPAR;
798 driver->init_termios.c_oflag = ONLCR | XTABS;
799 driver->init_termios.c_lflag = ISIG | ECHO;
800 driver->flags = TTY_DRIVER_REAL_RAW;
801 tty_set_operations(driver, &sclp_ops);
802 rc = tty_register_driver(driver);
803 if (rc) {
804 printk(KERN_ERR SCLP_TTY_PRINT_HEADER
805 "could not register tty - "
806 "tty_register_driver returned %d\n", rc);
807 put_tty_driver(driver);
808 return rc;
809 }
810 sclp_tty_driver = driver;
811 return 0;
812}
813module_init(sclp_tty_init);
diff --git a/drivers/s390/char/sclp_tty.h b/drivers/s390/char/sclp_tty.h
new file mode 100644
index 000000000000..0ce2c1fc5340
--- /dev/null
+++ b/drivers/s390/char/sclp_tty.h
@@ -0,0 +1,71 @@
1/*
2 * drivers/s390/char/sclp_tty.h
3 * interface to the SCLP-read/write driver
4 *
5 * S390 version
6 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
9 */
10
11#ifndef __SCLP_TTY_H__
12#define __SCLP_TTY_H__
13
14#include <linux/ioctl.h>
15#include <linux/termios.h>
16#include <linux/tty_driver.h>
17
18/* This is the type of data structures storing sclp ioctl setting. */
19struct sclp_ioctls {
20 unsigned short htab;
21 unsigned char echo;
22 unsigned short columns;
23 unsigned char final_nl;
24 unsigned short max_sccb;
25 unsigned short kmem_sccb; /* can't be modified at run time */
26 unsigned char tolower;
27 unsigned char delim;
28};
29
30/* must be unique, FIXME: must be added in Documentation/ioctl_number.txt */
31#define SCLP_IOCTL_LETTER 'B'
32
33/* set width of horizontal tabulator */
34#define TIOCSCLPSHTAB _IOW(SCLP_IOCTL_LETTER, 0, unsigned short)
35/* enable/disable echo of input (independent from line discipline) */
36#define TIOCSCLPSECHO _IOW(SCLP_IOCTL_LETTER, 1, unsigned char)
37/* set number of colums for output */
38#define TIOCSCLPSCOLS _IOW(SCLP_IOCTL_LETTER, 2, unsigned short)
39/* enable/disable writing without final new line character */
40#define TIOCSCLPSNL _IOW(SCLP_IOCTL_LETTER, 4, signed char)
41/* set the maximum buffers size for output, rounded up to next 4kB boundary */
42#define TIOCSCLPSOBUF _IOW(SCLP_IOCTL_LETTER, 5, unsigned short)
43/* set initial (default) sclp ioctls */
44#define TIOCSCLPSINIT _IO(SCLP_IOCTL_LETTER, 6)
45/* enable/disable conversion from upper to lower case of input */
46#define TIOCSCLPSCASE _IOW(SCLP_IOCTL_LETTER, 7, unsigned char)
47/* set special character used for separating upper and lower case, */
48/* 0x00 disables this feature */
49#define TIOCSCLPSDELIM _IOW(SCLP_IOCTL_LETTER, 9, unsigned char)
50
51/* get width of horizontal tabulator */
52#define TIOCSCLPGHTAB _IOR(SCLP_IOCTL_LETTER, 10, unsigned short)
53/* Is echo of input enabled ? (independent from line discipline) */
54#define TIOCSCLPGECHO _IOR(SCLP_IOCTL_LETTER, 11, unsigned char)
55/* get number of colums for output */
56#define TIOCSCLPGCOLS _IOR(SCLP_IOCTL_LETTER, 12, unsigned short)
57/* Is writing without final new line character enabled ? */
58#define TIOCSCLPGNL _IOR(SCLP_IOCTL_LETTER, 14, signed char)
59/* get the maximum buffers size for output */
60#define TIOCSCLPGOBUF _IOR(SCLP_IOCTL_LETTER, 15, unsigned short)
61/* Is conversion from upper to lower case of input enabled ? */
62#define TIOCSCLPGCASE _IOR(SCLP_IOCTL_LETTER, 17, unsigned char)
63/* get special character used for separating upper and lower case, */
64/* 0x00 disables this feature */
65#define TIOCSCLPGDELIM _IOR(SCLP_IOCTL_LETTER, 19, unsigned char)
66/* get the number of buffers/pages got from kernel at startup */
67#define TIOCSCLPGKBUF _IOR(SCLP_IOCTL_LETTER, 20, unsigned short)
68
69extern struct tty_driver *sclp_tty_driver;
70
71#endif /* __SCLP_TTY_H__ */
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
new file mode 100644
index 000000000000..06bd85824d7b
--- /dev/null
+++ b/drivers/s390/char/sclp_vt220.c
@@ -0,0 +1,785 @@
1/*
2 * drivers/s390/char/sclp_vt220.c
3 * SCLP VT220 terminal driver.
4 *
5 * S390 version
6 * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
8 */
9
10#include <linux/config.h>
11#include <linux/module.h>
12#include <linux/spinlock.h>
13#include <linux/list.h>
14#include <linux/wait.h>
15#include <linux/timer.h>
16#include <linux/kernel.h>
17#include <linux/tty.h>
18#include <linux/tty_driver.h>
19#include <linux/sched.h>
20#include <linux/errno.h>
21#include <linux/mm.h>
22#include <linux/major.h>
23#include <linux/console.h>
24#include <linux/kdev_t.h>
25#include <linux/bootmem.h>
26#include <linux/interrupt.h>
27#include <linux/init.h>
28#include <asm/uaccess.h>
29#include "sclp.h"
30
31#define SCLP_VT220_PRINT_HEADER "sclp vt220 tty driver: "
32#define SCLP_VT220_MAJOR TTY_MAJOR
33#define SCLP_VT220_MINOR 65
34#define SCLP_VT220_DRIVER_NAME "sclp_vt220"
35#define SCLP_VT220_DEVICE_NAME "ttysclp"
36#define SCLP_VT220_CONSOLE_NAME "ttyS"
37#define SCLP_VT220_CONSOLE_INDEX 1 /* console=ttyS1 */
38#define SCLP_VT220_BUF_SIZE 80
39
40/* Representation of a single write request */
41struct sclp_vt220_request {
42 struct list_head list;
43 struct sclp_req sclp_req;
44 int retry_count;
45};
46
47/* VT220 SCCB */
48struct sclp_vt220_sccb {
49 struct sccb_header header;
50 struct evbuf_header evbuf;
51};
52
53#define SCLP_VT220_MAX_CHARS_PER_BUFFER (PAGE_SIZE - \
54 sizeof(struct sclp_vt220_request) - \
55 sizeof(struct sclp_vt220_sccb))
56
57/* Structures and data needed to register tty driver */
58static struct tty_driver *sclp_vt220_driver;
59
60/* The tty_struct that the kernel associated with us */
61static struct tty_struct *sclp_vt220_tty;
62
63/* Lock to protect internal data from concurrent access */
64static spinlock_t sclp_vt220_lock;
65
66/* List of empty pages to be used as write request buffers */
67static struct list_head sclp_vt220_empty;
68
69/* List of pending requests */
70static struct list_head sclp_vt220_outqueue;
71
72/* Number of requests in outqueue */
73static int sclp_vt220_outqueue_count;
74
75/* Wait queue used to delay write requests while we've run out of buffers */
76static wait_queue_head_t sclp_vt220_waitq;
77
78/* Timer used for delaying write requests to merge subsequent messages into
79 * a single buffer */
80static struct timer_list sclp_vt220_timer;
81
82/* Pointer to current request buffer which has been partially filled but not
83 * yet sent */
84static struct sclp_vt220_request *sclp_vt220_current_request;
85
86/* Number of characters in current request buffer */
87static int sclp_vt220_buffered_chars;
88
89/* Flag indicating whether this driver has already been initialized */
90static int sclp_vt220_initialized = 0;
91
92/* Flag indicating that sclp_vt220_current_request should really
93 * have been already queued but wasn't because the SCLP was processing
94 * another buffer */
95static int sclp_vt220_flush_later;
96
97static void sclp_vt220_receiver_fn(struct evbuf_header *evbuf);
98static int __sclp_vt220_emit(struct sclp_vt220_request *request);
99static void sclp_vt220_emit_current(void);
100
101/* Registration structure for our interest in SCLP event buffers */
102static struct sclp_register sclp_vt220_register = {
103 .send_mask = EvTyp_VT220Msg_Mask,
104 .receive_mask = EvTyp_VT220Msg_Mask,
105 .state_change_fn = NULL,
106 .receiver_fn = sclp_vt220_receiver_fn
107};
108
109
110/*
111 * Put provided request buffer back into queue and check emit pending
112 * buffers if necessary.
113 */
114static void
115sclp_vt220_process_queue(struct sclp_vt220_request *request)
116{
117 unsigned long flags;
118 void *page;
119
120 do {
121 /* Put buffer back to list of empty buffers */
122 page = request->sclp_req.sccb;
123 spin_lock_irqsave(&sclp_vt220_lock, flags);
124 /* Move request from outqueue to empty queue */
125 list_del(&request->list);
126 sclp_vt220_outqueue_count--;
127 list_add_tail((struct list_head *) page, &sclp_vt220_empty);
128 /* Check if there is a pending buffer on the out queue. */
129 request = NULL;
130 if (!list_empty(&sclp_vt220_outqueue))
131 request = list_entry(sclp_vt220_outqueue.next,
132 struct sclp_vt220_request, list);
133 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
134 } while (request && __sclp_vt220_emit(request));
135 if (request == NULL && sclp_vt220_flush_later)
136 sclp_vt220_emit_current();
137 wake_up(&sclp_vt220_waitq);
138 /* Check if the tty needs a wake up call */
139 if (sclp_vt220_tty != NULL) {
140 tty_wakeup(sclp_vt220_tty);
141 }
142}
143
144#define SCLP_BUFFER_MAX_RETRY 1
145
146/*
147 * Callback through which the result of a write request is reported by the
148 * SCLP.
149 */
150static void
151sclp_vt220_callback(struct sclp_req *request, void *data)
152{
153 struct sclp_vt220_request *vt220_request;
154 struct sclp_vt220_sccb *sccb;
155
156 vt220_request = (struct sclp_vt220_request *) data;
157 if (request->status == SCLP_REQ_FAILED) {
158 sclp_vt220_process_queue(vt220_request);
159 return;
160 }
161 sccb = (struct sclp_vt220_sccb *) vt220_request->sclp_req.sccb;
162
163 /* Check SCLP response code and choose suitable action */
164 switch (sccb->header.response_code) {
165 case 0x0020 :
166 break;
167
168 case 0x05f0: /* Target resource in improper state */
169 break;
170
171 case 0x0340: /* Contained SCLP equipment check */
172 if (++vt220_request->retry_count > SCLP_BUFFER_MAX_RETRY)
173 break;
174 /* Remove processed buffers and requeue rest */
175 if (sclp_remove_processed((struct sccb_header *) sccb) > 0) {
176 /* Not all buffers were processed */
177 sccb->header.response_code = 0x0000;
178 vt220_request->sclp_req.status = SCLP_REQ_FILLED;
179 if (sclp_add_request(request) == 0)
180 return;
181 }
182 break;
183
184 case 0x0040: /* SCLP equipment check */
185 if (++vt220_request->retry_count > SCLP_BUFFER_MAX_RETRY)
186 break;
187 sccb->header.response_code = 0x0000;
188 vt220_request->sclp_req.status = SCLP_REQ_FILLED;
189 if (sclp_add_request(request) == 0)
190 return;
191 break;
192
193 default:
194 break;
195 }
196 sclp_vt220_process_queue(vt220_request);
197}
198
199/*
200 * Emit vt220 request buffer to SCLP. Return zero on success, non-zero
201 * otherwise.
202 */
203static int
204__sclp_vt220_emit(struct sclp_vt220_request *request)
205{
206 if (!(sclp_vt220_register.sclp_send_mask & EvTyp_VT220Msg_Mask)) {
207 request->sclp_req.status = SCLP_REQ_FAILED;
208 return -EIO;
209 }
210 request->sclp_req.command = SCLP_CMDW_WRITEDATA;
211 request->sclp_req.status = SCLP_REQ_FILLED;
212 request->sclp_req.callback = sclp_vt220_callback;
213 request->sclp_req.callback_data = (void *) request;
214
215 return sclp_add_request(&request->sclp_req);
216}
217
218/*
219 * Queue and emit given request.
220 */
221static void
222sclp_vt220_emit(struct sclp_vt220_request *request)
223{
224 unsigned long flags;
225 int count;
226
227 spin_lock_irqsave(&sclp_vt220_lock, flags);
228 list_add_tail(&request->list, &sclp_vt220_outqueue);
229 count = sclp_vt220_outqueue_count++;
230 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
231 /* Emit only the first buffer immediately - callback takes care of
232 * the rest */
233 if (count == 0 && __sclp_vt220_emit(request))
234 sclp_vt220_process_queue(request);
235}
236
237/*
238 * Queue and emit current request. Return zero on success, non-zero otherwise.
239 */
240static void
241sclp_vt220_emit_current(void)
242{
243 unsigned long flags;
244 struct sclp_vt220_request *request;
245 struct sclp_vt220_sccb *sccb;
246
247 spin_lock_irqsave(&sclp_vt220_lock, flags);
248 request = NULL;
249 if (sclp_vt220_current_request != NULL) {
250 sccb = (struct sclp_vt220_sccb *)
251 sclp_vt220_current_request->sclp_req.sccb;
252 /* Only emit buffers with content */
253 if (sccb->header.length != sizeof(struct sclp_vt220_sccb)) {
254 request = sclp_vt220_current_request;
255 sclp_vt220_current_request = NULL;
256 if (timer_pending(&sclp_vt220_timer))
257 del_timer(&sclp_vt220_timer);
258 }
259 sclp_vt220_flush_later = 0;
260 }
261 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
262 if (request != NULL)
263 sclp_vt220_emit(request);
264}
265
266#define SCLP_NORMAL_WRITE 0x00
267
268/*
269 * Helper function to initialize a page with the sclp request structure.
270 */
271static struct sclp_vt220_request *
272sclp_vt220_initialize_page(void *page)
273{
274 struct sclp_vt220_request *request;
275 struct sclp_vt220_sccb *sccb;
276
277 /* Place request structure at end of page */
278 request = ((struct sclp_vt220_request *)
279 ((addr_t) page + PAGE_SIZE)) - 1;
280 request->retry_count = 0;
281 request->sclp_req.sccb = page;
282 /* SCCB goes at start of page */
283 sccb = (struct sclp_vt220_sccb *) page;
284 memset((void *) sccb, 0, sizeof(struct sclp_vt220_sccb));
285 sccb->header.length = sizeof(struct sclp_vt220_sccb);
286 sccb->header.function_code = SCLP_NORMAL_WRITE;
287 sccb->header.response_code = 0x0000;
288 sccb->evbuf.type = EvTyp_VT220Msg;
289 sccb->evbuf.length = sizeof(struct evbuf_header);
290
291 return request;
292}
293
294static inline unsigned int
295sclp_vt220_space_left(struct sclp_vt220_request *request)
296{
297 struct sclp_vt220_sccb *sccb;
298 sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
299 return PAGE_SIZE - sizeof(struct sclp_vt220_request) -
300 sccb->header.length;
301}
302
303static inline unsigned int
304sclp_vt220_chars_stored(struct sclp_vt220_request *request)
305{
306 struct sclp_vt220_sccb *sccb;
307 sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
308 return sccb->evbuf.length - sizeof(struct evbuf_header);
309}
310
311/*
312 * Add msg to buffer associated with request. Return the number of characters
313 * added.
314 */
315static int
316sclp_vt220_add_msg(struct sclp_vt220_request *request,
317 const unsigned char *msg, int count, int convertlf)
318{
319 struct sclp_vt220_sccb *sccb;
320 void *buffer;
321 unsigned char c;
322 int from;
323 int to;
324
325 if (count > sclp_vt220_space_left(request))
326 count = sclp_vt220_space_left(request);
327 if (count <= 0)
328 return 0;
329
330 sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
331 buffer = (void *) ((addr_t) sccb + sccb->header.length);
332
333 if (convertlf) {
334 /* Perform Linefeed conversion (0x0a -> 0x0a 0x0d)*/
335 for (from=0, to=0;
336 (from < count) && (to < sclp_vt220_space_left(request));
337 from++) {
338 /* Retrieve character */
339 c = msg[from];
340 /* Perform conversion */
341 if (c == 0x0a) {
342 if (to + 1 < sclp_vt220_space_left(request)) {
343 ((unsigned char *) buffer)[to++] = c;
344 ((unsigned char *) buffer)[to++] = 0x0d;
345 } else
346 break;
347
348 } else
349 ((unsigned char *) buffer)[to++] = c;
350 }
351 sccb->header.length += to;
352 sccb->evbuf.length += to;
353 return from;
354 } else {
355 memcpy(buffer, (const void *) msg, count);
356 sccb->header.length += count;
357 sccb->evbuf.length += count;
358 return count;
359 }
360}
361
362/*
363 * Emit buffer after having waited long enough for more data to arrive.
364 */
365static void
366sclp_vt220_timeout(unsigned long data)
367{
368 sclp_vt220_emit_current();
369}
370
371#define BUFFER_MAX_DELAY HZ/2
372
373/*
374 * Internal implementation of the write function. Write COUNT bytes of data
375 * from memory at BUF
376 * to the SCLP interface. In case that the data does not fit into the current
377 * write buffer, emit the current one and allocate a new one. If there are no
378 * more empty buffers available, wait until one gets emptied. If DO_SCHEDULE
379 * is non-zero, the buffer will be scheduled for emitting after a timeout -
380 * otherwise the user has to explicitly call the flush function.
381 * A non-zero CONVERTLF parameter indicates that 0x0a characters in the message
382 * buffer should be converted to 0x0a 0x0d. After completion, return the number
383 * of bytes written.
384 */
385static int
386__sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
387 int convertlf)
388{
389 unsigned long flags;
390 void *page;
391 int written;
392 int overall_written;
393
394 if (count <= 0)
395 return 0;
396 overall_written = 0;
397 spin_lock_irqsave(&sclp_vt220_lock, flags);
398 do {
399 /* Create a sclp output buffer if none exists yet */
400 if (sclp_vt220_current_request == NULL) {
401 while (list_empty(&sclp_vt220_empty)) {
402 spin_unlock_irqrestore(&sclp_vt220_lock,
403 flags);
404 if (in_interrupt())
405 sclp_sync_wait();
406 else
407 wait_event(sclp_vt220_waitq,
408 !list_empty(&sclp_vt220_empty));
409 spin_lock_irqsave(&sclp_vt220_lock, flags);
410 }
411 page = (void *) sclp_vt220_empty.next;
412 list_del((struct list_head *) page);
413 sclp_vt220_current_request =
414 sclp_vt220_initialize_page(page);
415 }
416 /* Try to write the string to the current request buffer */
417 written = sclp_vt220_add_msg(sclp_vt220_current_request,
418 buf, count, convertlf);
419 overall_written += written;
420 if (written == count)
421 break;
422 /*
423 * Not all characters could be written to the current
424 * output buffer. Emit the buffer, create a new buffer
425 * and then output the rest of the string.
426 */
427 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
428 sclp_vt220_emit_current();
429 spin_lock_irqsave(&sclp_vt220_lock, flags);
430 buf += written;
431 count -= written;
432 } while (count > 0);
433 /* Setup timer to output current console buffer after some time */
434 if (sclp_vt220_current_request != NULL &&
435 !timer_pending(&sclp_vt220_timer) && do_schedule) {
436 sclp_vt220_timer.function = sclp_vt220_timeout;
437 sclp_vt220_timer.data = 0UL;
438 sclp_vt220_timer.expires = jiffies + BUFFER_MAX_DELAY;
439 add_timer(&sclp_vt220_timer);
440 }
441 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
442 return overall_written;
443}
444
445/*
446 * This routine is called by the kernel to write a series of
447 * characters to the tty device. The characters may come from
448 * user space or kernel space. This routine will return the
449 * number of characters actually accepted for writing.
450 */
451static int
452sclp_vt220_write(struct tty_struct *tty, const unsigned char *buf, int count)
453{
454 return __sclp_vt220_write(buf, count, 1, 0);
455}
456
457#define SCLP_VT220_SESSION_ENDED 0x01
458#define SCLP_VT220_SESSION_STARTED 0x80
459#define SCLP_VT220_SESSION_DATA 0x00
460
461/*
462 * Called by the SCLP to report incoming event buffers.
463 */
464static void
465sclp_vt220_receiver_fn(struct evbuf_header *evbuf)
466{
467 char *buffer;
468 unsigned int count;
469
470 /* Ignore input if device is not open */
471 if (sclp_vt220_tty == NULL)
472 return;
473
474 buffer = (char *) ((addr_t) evbuf + sizeof(struct evbuf_header));
475 count = evbuf->length - sizeof(struct evbuf_header);
476
477 switch (*buffer) {
478 case SCLP_VT220_SESSION_ENDED:
479 case SCLP_VT220_SESSION_STARTED:
480 break;
481 case SCLP_VT220_SESSION_DATA:
482 /* Send input to line discipline */
483 buffer++;
484 count--;
485 /* Prevent buffer overrun by discarding input. Note that
486 * because buffer_push works asynchronously, we cannot wait
487 * for the buffer to be emptied. */
488 if (count + sclp_vt220_tty->flip.count > TTY_FLIPBUF_SIZE)
489 count = TTY_FLIPBUF_SIZE - sclp_vt220_tty->flip.count;
490 memcpy(sclp_vt220_tty->flip.char_buf_ptr, buffer, count);
491 memset(sclp_vt220_tty->flip.flag_buf_ptr, TTY_NORMAL, count);
492 sclp_vt220_tty->flip.char_buf_ptr += count;
493 sclp_vt220_tty->flip.flag_buf_ptr += count;
494 sclp_vt220_tty->flip.count += count;
495 tty_flip_buffer_push(sclp_vt220_tty);
496 break;
497 }
498}
499
500/*
501 * This routine is called when a particular tty device is opened.
502 */
503static int
504sclp_vt220_open(struct tty_struct *tty, struct file *filp)
505{
506 if (tty->count == 1) {
507 sclp_vt220_tty = tty;
508 tty->driver_data = kmalloc(SCLP_VT220_BUF_SIZE, GFP_KERNEL);
509 if (tty->driver_data == NULL)
510 return -ENOMEM;
511 tty->low_latency = 0;
512 }
513 return 0;
514}
515
516/*
517 * This routine is called when a particular tty device is closed.
518 */
519static void
520sclp_vt220_close(struct tty_struct *tty, struct file *filp)
521{
522 if (tty->count == 1) {
523 sclp_vt220_tty = NULL;
524 kfree(tty->driver_data);
525 tty->driver_data = NULL;
526 }
527}
528
529/*
530 * This routine is called by the kernel to write a single
531 * character to the tty device. If the kernel uses this routine,
532 * it must call the flush_chars() routine (if defined) when it is
533 * done stuffing characters into the driver.
534 *
535 * NOTE: include/linux/tty_driver.h specifies that a character should be
536 * ignored if there is no room in the queue. This driver implements a different
537 * semantic in that it will block when there is no more room left.
538 */
539static void
540sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch)
541{
542 __sclp_vt220_write(&ch, 1, 0, 0);
543}
544
545/*
546 * This routine is called by the kernel after it has written a
547 * series of characters to the tty device using put_char().
548 */
549static void
550sclp_vt220_flush_chars(struct tty_struct *tty)
551{
552 if (sclp_vt220_outqueue_count == 0)
553 sclp_vt220_emit_current();
554 else
555 sclp_vt220_flush_later = 1;
556}
557
558/*
559 * This routine returns the numbers of characters the tty driver
560 * will accept for queuing to be written. This number is subject
561 * to change as output buffers get emptied, or if the output flow
562 * control is acted.
563 */
564static int
565sclp_vt220_write_room(struct tty_struct *tty)
566{
567 unsigned long flags;
568 struct list_head *l;
569 int count;
570
571 spin_lock_irqsave(&sclp_vt220_lock, flags);
572 count = 0;
573 if (sclp_vt220_current_request != NULL)
574 count = sclp_vt220_space_left(sclp_vt220_current_request);
575 list_for_each(l, &sclp_vt220_empty)
576 count += SCLP_VT220_MAX_CHARS_PER_BUFFER;
577 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
578 return count;
579}
580
581/*
582 * Return number of buffered chars.
583 */
584static int
585sclp_vt220_chars_in_buffer(struct tty_struct *tty)
586{
587 unsigned long flags;
588 struct list_head *l;
589 struct sclp_vt220_request *r;
590 int count;
591
592 spin_lock_irqsave(&sclp_vt220_lock, flags);
593 count = 0;
594 if (sclp_vt220_current_request != NULL)
595 count = sclp_vt220_chars_stored(sclp_vt220_current_request);
596 list_for_each(l, &sclp_vt220_outqueue) {
597 r = list_entry(l, struct sclp_vt220_request, list);
598 count += sclp_vt220_chars_stored(r);
599 }
600 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
601 return count;
602}
603
604static void
605__sclp_vt220_flush_buffer(void)
606{
607 unsigned long flags;
608
609 sclp_vt220_emit_current();
610 spin_lock_irqsave(&sclp_vt220_lock, flags);
611 if (timer_pending(&sclp_vt220_timer))
612 del_timer(&sclp_vt220_timer);
613 while (sclp_vt220_outqueue_count > 0) {
614 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
615 sclp_sync_wait();
616 spin_lock_irqsave(&sclp_vt220_lock, flags);
617 }
618 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
619}
620
621/*
622 * Pass on all buffers to the hardware. Return only when there are no more
623 * buffers pending.
624 */
625static void
626sclp_vt220_flush_buffer(struct tty_struct *tty)
627{
628 sclp_vt220_emit_current();
629}
630
631/*
632 * Initialize all relevant components and register driver with system.
633 */
634static int
635__sclp_vt220_init(int early)
636{
637 void *page;
638 int i;
639
640 if (sclp_vt220_initialized)
641 return 0;
642 sclp_vt220_initialized = 1;
643 spin_lock_init(&sclp_vt220_lock);
644 INIT_LIST_HEAD(&sclp_vt220_empty);
645 INIT_LIST_HEAD(&sclp_vt220_outqueue);
646 init_waitqueue_head(&sclp_vt220_waitq);
647 init_timer(&sclp_vt220_timer);
648 sclp_vt220_current_request = NULL;
649 sclp_vt220_buffered_chars = 0;
650 sclp_vt220_outqueue_count = 0;
651 sclp_vt220_tty = NULL;
652 sclp_vt220_flush_later = 0;
653
654 /* Allocate pages for output buffering */
655 for (i = 0; i < (early ? MAX_CONSOLE_PAGES : MAX_KMEM_PAGES); i++) {
656 if (early)
657 page = alloc_bootmem_low_pages(PAGE_SIZE);
658 else
659 page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
660 if (!page)
661 return -ENOMEM;
662 list_add_tail((struct list_head *) page, &sclp_vt220_empty);
663 }
664 return 0;
665}
666
667static struct tty_operations sclp_vt220_ops = {
668 .open = sclp_vt220_open,
669 .close = sclp_vt220_close,
670 .write = sclp_vt220_write,
671 .put_char = sclp_vt220_put_char,
672 .flush_chars = sclp_vt220_flush_chars,
673 .write_room = sclp_vt220_write_room,
674 .chars_in_buffer = sclp_vt220_chars_in_buffer,
675 .flush_buffer = sclp_vt220_flush_buffer
676};
677
678/*
679 * Register driver with SCLP and Linux and initialize internal tty structures.
680 */
681int __init
682sclp_vt220_tty_init(void)
683{
684 struct tty_driver *driver;
685 int rc;
686
687 /* Note: we're not testing for CONSOLE_IS_SCLP here to preserve
688 * symmetry between VM and LPAR systems regarding ttyS1. */
689 driver = alloc_tty_driver(1);
690 if (!driver)
691 return -ENOMEM;
692 rc = __sclp_vt220_init(0);
693 if (rc) {
694 put_tty_driver(driver);
695 return rc;
696 }
697 rc = sclp_register(&sclp_vt220_register);
698 if (rc) {
699 printk(KERN_ERR SCLP_VT220_PRINT_HEADER
700 "could not register tty - "
701 "sclp_register returned %d\n", rc);
702 put_tty_driver(driver);
703 return rc;
704 }
705
706 driver->owner = THIS_MODULE;
707 driver->driver_name = SCLP_VT220_DRIVER_NAME;
708 driver->name = SCLP_VT220_DEVICE_NAME;
709 driver->major = SCLP_VT220_MAJOR;
710 driver->minor_start = SCLP_VT220_MINOR;
711 driver->type = TTY_DRIVER_TYPE_SYSTEM;
712 driver->subtype = SYSTEM_TYPE_TTY;
713 driver->init_termios = tty_std_termios;
714 driver->flags = TTY_DRIVER_REAL_RAW;
715 tty_set_operations(driver, &sclp_vt220_ops);
716
717 rc = tty_register_driver(driver);
718 if (rc) {
719 printk(KERN_ERR SCLP_VT220_PRINT_HEADER
720 "could not register tty - "
721 "tty_register_driver returned %d\n", rc);
722 put_tty_driver(driver);
723 return rc;
724 }
725 sclp_vt220_driver = driver;
726 return 0;
727}
728
729module_init(sclp_vt220_tty_init);
730
731#ifdef CONFIG_SCLP_VT220_CONSOLE
732
733static void
734sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count)
735{
736 __sclp_vt220_write((const unsigned char *) buf, count, 1, 1);
737}
738
739static struct tty_driver *
740sclp_vt220_con_device(struct console *c, int *index)
741{
742 *index = 0;
743 return sclp_vt220_driver;
744}
745
746/*
747 * This routine is called from panic when the kernel is going to give up.
748 * We have to make sure that all buffers will be flushed to the SCLP.
749 * Note that this function may be called from within an interrupt context.
750 */
751static void
752sclp_vt220_con_unblank(void)
753{
754 __sclp_vt220_flush_buffer();
755}
756
757/* Structure needed to register with printk */
758static struct console sclp_vt220_console =
759{
760 .name = SCLP_VT220_CONSOLE_NAME,
761 .write = sclp_vt220_con_write,
762 .device = sclp_vt220_con_device,
763 .unblank = sclp_vt220_con_unblank,
764 .flags = CON_PRINTBUFFER,
765 .index = SCLP_VT220_CONSOLE_INDEX
766};
767
768static int __init
769sclp_vt220_con_init(void)
770{
771 int rc;
772
773 if (!CONSOLE_IS_SCLP)
774 return 0;
775 rc = __sclp_vt220_init(1);
776 if (rc)
777 return rc;
778 /* Attach linux console */
779 register_console(&sclp_vt220_console);
780 return 0;
781}
782
783console_initcall(sclp_vt220_con_init);
784#endif /* CONFIG_SCLP_VT220_CONSOLE */
785
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
new file mode 100644
index 000000000000..d04e6c2c3cc1
--- /dev/null
+++ b/drivers/s390/char/tape.h
@@ -0,0 +1,384 @@
1/*
2 * drivers/s390/char/tape.h
3 * tape device driver for 3480/3490E/3590 tapes.
4 *
5 * S390 and zSeries version
6 * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
9 * Martin Schwidefsky <schwidefsky@de.ibm.com>
10 */
11
12#ifndef _TAPE_H
13#define _TAPE_H
14
15#include <asm/ccwdev.h>
16#include <asm/debug.h>
17#include <asm/idals.h>
18#include <linux/config.h>
19#include <linux/blkdev.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/mtio.h>
23#include <linux/interrupt.h>
24#include <linux/workqueue.h>
25
26struct gendisk;
27
28/*
29 * Define DBF_LIKE_HELL for lots of messages in the debug feature.
30 */
31#define DBF_LIKE_HELL
32#ifdef DBF_LIKE_HELL
33#define DBF_LH(level, str, ...) \
34do { \
35 debug_sprintf_event(TAPE_DBF_AREA, level, str, ## __VA_ARGS__); \
36} while (0)
37#else
38#define DBF_LH(level, str, ...) do {} while(0)
39#endif
40
41/*
42 * macros s390 debug feature (dbf)
43 */
44#define DBF_EVENT(d_level, d_str...) \
45do { \
46 debug_sprintf_event(TAPE_DBF_AREA, d_level, d_str); \
47} while (0)
48
49#define DBF_EXCEPTION(d_level, d_str...) \
50do { \
51 debug_sprintf_exception(TAPE_DBF_AREA, d_level, d_str); \
52} while (0)
53
54#define TAPE_VERSION_MAJOR 2
55#define TAPE_VERSION_MINOR 0
56#define TAPE_MAGIC "tape"
57
58#define TAPE_MINORS_PER_DEV 2 /* two minors per device */
59#define TAPEBLOCK_HSEC_SIZE 2048
60#define TAPEBLOCK_HSEC_S2B 2
61#define TAPEBLOCK_RETRIES 5
62
63enum tape_medium_state {
64 MS_UNKNOWN,
65 MS_LOADED,
66 MS_UNLOADED,
67 MS_SIZE
68};
69
70enum tape_state {
71 TS_UNUSED=0,
72 TS_IN_USE,
73 TS_BLKUSE,
74 TS_INIT,
75 TS_NOT_OPER,
76 TS_SIZE
77};
78
79enum tape_op {
80 TO_BLOCK, /* Block read */
81 TO_BSB, /* Backward space block */
82 TO_BSF, /* Backward space filemark */
83 TO_DSE, /* Data security erase */
84 TO_FSB, /* Forward space block */
85 TO_FSF, /* Forward space filemark */
86 TO_LBL, /* Locate block label */
87 TO_NOP, /* No operation */
88 TO_RBA, /* Read backward */
89 TO_RBI, /* Read block information */
90 TO_RFO, /* Read forward */
91 TO_REW, /* Rewind tape */
92 TO_RUN, /* Rewind and unload tape */
93 TO_WRI, /* Write block */
94 TO_WTM, /* Write tape mark */
95 TO_MSEN, /* Medium sense */
96 TO_LOAD, /* Load tape */
97 TO_READ_CONFIG, /* Read configuration data */
98 TO_READ_ATTMSG, /* Read attention message */
99 TO_DIS, /* Tape display */
100 TO_ASSIGN, /* Assign tape to channel path */
101 TO_UNASSIGN, /* Unassign tape from channel path */
102 TO_SIZE /* #entries in tape_op_t */
103};
104
105/* Forward declaration */
106struct tape_device;
107
108/* tape_request->status can be: */
109enum tape_request_status {
110 TAPE_REQUEST_INIT, /* request is ready to be processed */
111 TAPE_REQUEST_QUEUED, /* request is queued to be processed */
112 TAPE_REQUEST_IN_IO, /* request is currently in IO */
113 TAPE_REQUEST_DONE, /* request is completed. */
114};
115
116/* Tape CCW request */
117struct tape_request {
118 struct list_head list; /* list head for request queueing. */
119 struct tape_device *device; /* tape device of this request */
120 struct ccw1 *cpaddr; /* address of the channel program. */
121 void *cpdata; /* pointer to ccw data. */
122 enum tape_request_status status;/* status of this request */
123 int options; /* options for execution. */
124 int retries; /* retry counter for error recovery. */
125 int rescnt; /* residual count from devstat. */
126
127 /* Callback for delivering final status. */
128 void (*callback)(struct tape_request *, void *);
129 void *callback_data;
130
131 enum tape_op op;
132 int rc;
133};
134
135/* Function type for magnetic tape commands */
136typedef int (*tape_mtop_fn)(struct tape_device *, int);
137
138/* Size of the array containing the mtops for a discipline */
139#define TAPE_NR_MTOPS (MTMKPART+1)
140
141/* Tape Discipline */
142struct tape_discipline {
143 struct module *owner;
144 int (*setup_device)(struct tape_device *);
145 void (*cleanup_device)(struct tape_device *);
146 int (*irq)(struct tape_device *, struct tape_request *, struct irb *);
147 struct tape_request *(*read_block)(struct tape_device *, size_t);
148 struct tape_request *(*write_block)(struct tape_device *, size_t);
149 void (*process_eov)(struct tape_device*);
150#ifdef CONFIG_S390_TAPE_BLOCK
151 /* Block device stuff. */
152 struct tape_request *(*bread)(struct tape_device *, struct request *);
153 void (*check_locate)(struct tape_device *, struct tape_request *);
154 void (*free_bread)(struct tape_request *);
155#endif
156 /* ioctl function for additional ioctls. */
157 int (*ioctl_fn)(struct tape_device *, unsigned int, unsigned long);
158 /* Array of tape commands with TAPE_NR_MTOPS entries */
159 tape_mtop_fn *mtop_array;
160};
161
162/*
163 * The discipline irq function either returns an error code (<0) which
164 * means that the request has failed with an error or one of the following:
165 */
166#define TAPE_IO_SUCCESS 0 /* request successful */
167#define TAPE_IO_PENDING 1 /* request still running */
168#define TAPE_IO_RETRY 2 /* retry to current request */
169#define TAPE_IO_STOP 3 /* stop the running request */
170
171/* Char Frontend Data */
172struct tape_char_data {
173 struct idal_buffer *idal_buf; /* idal buffer for user char data */
174 int block_size; /* of size block_size. */
175};
176
177#ifdef CONFIG_S390_TAPE_BLOCK
178/* Block Frontend Data */
179struct tape_blk_data
180{
181 /* Block device request queue. */
182 request_queue_t * request_queue;
183 spinlock_t request_queue_lock;
184
185 /* Task to move entries from block request to CCS request queue. */
186 struct work_struct requeue_task;
187 atomic_t requeue_scheduled;
188
189 /* Current position on the tape. */
190 long block_position;
191 int medium_changed;
192 struct gendisk * disk;
193};
194#endif
195
196/* Tape Info */
197struct tape_device {
198 /* entry in tape_device_list */
199 struct list_head node;
200
201 int cdev_id;
202 struct ccw_device * cdev;
203 struct tape_class_device * nt;
204 struct tape_class_device * rt;
205
206 /* Device discipline information. */
207 struct tape_discipline * discipline;
208 void * discdata;
209
210 /* Generic status flags */
211 long tape_generic_status;
212
213 /* Device state information. */
214 wait_queue_head_t state_change_wq;
215 enum tape_state tape_state;
216 enum tape_medium_state medium_state;
217 unsigned char * modeset_byte;
218
219 /* Reference count. */
220 atomic_t ref_count;
221
222 /* Request queue. */
223 struct list_head req_queue;
224
225 /* Each tape device has (currently) two minor numbers. */
226 int first_minor;
227
228 /* Number of tapemarks required for correct termination. */
229 int required_tapemarks;
230
231 /* Block ID of the BOF */
232 unsigned int bof;
233
234 /* Character device frontend data */
235 struct tape_char_data char_data;
236#ifdef CONFIG_S390_TAPE_BLOCK
237 /* Block dev frontend data */
238 struct tape_blk_data blk_data;
239#endif
240};
241
242/* Externals from tape_core.c */
243extern struct tape_request *tape_alloc_request(int cplength, int datasize);
244extern void tape_free_request(struct tape_request *);
245extern int tape_do_io(struct tape_device *, struct tape_request *);
246extern int tape_do_io_async(struct tape_device *, struct tape_request *);
247extern int tape_do_io_interruptible(struct tape_device *, struct tape_request *);
248void tape_hotplug_event(struct tape_device *, int major, int action);
249
250static inline int
251tape_do_io_free(struct tape_device *device, struct tape_request *request)
252{
253 int rc;
254
255 rc = tape_do_io(device, request);
256 tape_free_request(request);
257 return rc;
258}
259
260extern int tape_oper_handler(int irq, int status);
261extern void tape_noper_handler(int irq, int status);
262extern int tape_open(struct tape_device *);
263extern int tape_release(struct tape_device *);
264extern int tape_mtop(struct tape_device *, int, int);
265extern void tape_state_set(struct tape_device *, enum tape_state);
266
267extern int tape_generic_online(struct tape_device *, struct tape_discipline *);
268extern int tape_generic_offline(struct tape_device *device);
269
270/* Externals from tape_devmap.c */
271extern int tape_generic_probe(struct ccw_device *);
272extern void tape_generic_remove(struct ccw_device *);
273
274extern struct tape_device *tape_get_device(int devindex);
275extern struct tape_device *tape_get_device_reference(struct tape_device *);
276extern struct tape_device *tape_put_device(struct tape_device *);
277
278/* Externals from tape_char.c */
279extern int tapechar_init(void);
280extern void tapechar_exit(void);
281extern int tapechar_setup_device(struct tape_device *);
282extern void tapechar_cleanup_device(struct tape_device *);
283
284/* Externals from tape_block.c */
285#ifdef CONFIG_S390_TAPE_BLOCK
286extern int tapeblock_init (void);
287extern void tapeblock_exit(void);
288extern int tapeblock_setup_device(struct tape_device *);
289extern void tapeblock_cleanup_device(struct tape_device *);
290#else
291static inline int tapeblock_init (void) {return 0;}
292static inline void tapeblock_exit (void) {;}
293static inline int tapeblock_setup_device(struct tape_device *t) {return 0;}
294static inline void tapeblock_cleanup_device (struct tape_device *t) {;}
295#endif
296
297/* tape initialisation functions */
298#ifdef CONFIG_PROC_FS
299extern void tape_proc_init (void);
300extern void tape_proc_cleanup (void);
301#else
302static inline void tape_proc_init (void) {;}
303static inline void tape_proc_cleanup (void) {;}
304#endif
305
306/* a function for dumping device sense info */
307extern void tape_dump_sense(struct tape_device *, struct tape_request *,
308 struct irb *);
309extern void tape_dump_sense_dbf(struct tape_device *, struct tape_request *,
310 struct irb *);
311
312/* functions for handling the status of a device */
313extern void tape_med_state_set(struct tape_device *, enum tape_medium_state);
314
315/* The debug area */
316extern debug_info_t *TAPE_DBF_AREA;
317
318/* functions for building ccws */
319static inline struct ccw1 *
320tape_ccw_cc(struct ccw1 *ccw, __u8 cmd_code, __u16 memsize, void *cda)
321{
322 ccw->cmd_code = cmd_code;
323 ccw->flags = CCW_FLAG_CC;
324 ccw->count = memsize;
325 ccw->cda = (__u32)(addr_t) cda;
326 return ccw + 1;
327}
328
329static inline struct ccw1 *
330tape_ccw_end(struct ccw1 *ccw, __u8 cmd_code, __u16 memsize, void *cda)
331{
332 ccw->cmd_code = cmd_code;
333 ccw->flags = 0;
334 ccw->count = memsize;
335 ccw->cda = (__u32)(addr_t) cda;
336 return ccw + 1;
337}
338
339static inline struct ccw1 *
340tape_ccw_cmd(struct ccw1 *ccw, __u8 cmd_code)
341{
342 ccw->cmd_code = cmd_code;
343 ccw->flags = 0;
344 ccw->count = 0;
345 ccw->cda = (__u32)(addr_t) &ccw->cmd_code;
346 return ccw + 1;
347}
348
349static inline struct ccw1 *
350tape_ccw_repeat(struct ccw1 *ccw, __u8 cmd_code, int count)
351{
352 while (count-- > 0) {
353 ccw->cmd_code = cmd_code;
354 ccw->flags = CCW_FLAG_CC;
355 ccw->count = 0;
356 ccw->cda = (__u32)(addr_t) &ccw->cmd_code;
357 ccw++;
358 }
359 return ccw;
360}
361
362static inline struct ccw1 *
363tape_ccw_cc_idal(struct ccw1 *ccw, __u8 cmd_code, struct idal_buffer *idal)
364{
365 ccw->cmd_code = cmd_code;
366 ccw->flags = CCW_FLAG_CC;
367 idal_buffer_set_cda(idal, ccw);
368 return ccw++;
369}
370
371static inline struct ccw1 *
372tape_ccw_end_idal(struct ccw1 *ccw, __u8 cmd_code, struct idal_buffer *idal)
373{
374 ccw->cmd_code = cmd_code;
375 ccw->flags = 0;
376 idal_buffer_set_cda(idal, ccw);
377 return ccw++;
378}
379
380/* Global vars */
381extern const char *tape_state_verbose[];
382extern const char *tape_op_verbose[];
383
384#endif /* for ifdef tape.h */
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
new file mode 100644
index 000000000000..480ec87976fb
--- /dev/null
+++ b/drivers/s390/char/tape_34xx.c
@@ -0,0 +1,1385 @@
1/*
2 * drivers/s390/char/tape_34xx.c
3 * tape device discipline for 3480/3490 tapes.
4 *
5 * S390 and zSeries version
6 * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
9 * Martin Schwidefsky <schwidefsky@de.ibm.com>
10 */
11
12#include <linux/config.h>
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/bio.h>
16#include <linux/workqueue.h>
17
18#define TAPE_DBF_AREA tape_34xx_dbf
19
20#include "tape.h"
21#include "tape_std.h"
22
23#define PRINTK_HEADER "TAPE_34XX: "
24
25/*
26 * Pointer to debug area.
27 */
28debug_info_t *TAPE_DBF_AREA = NULL;
29EXPORT_SYMBOL(TAPE_DBF_AREA);
30
31enum tape_34xx_type {
32 tape_3480,
33 tape_3490,
34};
35
36#define TAPE34XX_FMT_3480 0
37#define TAPE34XX_FMT_3480_2_XF 1
38#define TAPE34XX_FMT_3480_XF 2
39
40struct tape_34xx_block_id {
41 unsigned int wrap : 1;
42 unsigned int segment : 7;
43 unsigned int format : 2;
44 unsigned int block : 22;
45};
46
47/*
48 * A list of block ID's is used to faster seek blocks.
49 */
50struct tape_34xx_sbid {
51 struct list_head list;
52 struct tape_34xx_block_id bid;
53};
54
55static void tape_34xx_delete_sbid_from(struct tape_device *, int);
56
57/*
58 * Medium sense for 34xx tapes. There is no 'real' medium sense call.
59 * So we just do a normal sense.
60 */
61static int
62tape_34xx_medium_sense(struct tape_device *device)
63{
64 struct tape_request *request;
65 unsigned char *sense;
66 int rc;
67
68 request = tape_alloc_request(1, 32);
69 if (IS_ERR(request)) {
70 DBF_EXCEPTION(6, "MSEN fail\n");
71 return PTR_ERR(request);
72 }
73
74 request->op = TO_MSEN;
75 tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata);
76
77 rc = tape_do_io_interruptible(device, request);
78 if (request->rc == 0) {
79 sense = request->cpdata;
80
81 /*
82 * This isn't quite correct. But since INTERVENTION_REQUIRED
83 * means that the drive is 'neither ready nor on-line' it is
84 * only slightly inaccurate to say there is no tape loaded if
85 * the drive isn't online...
86 */
87 if (sense[0] & SENSE_INTERVENTION_REQUIRED)
88 tape_med_state_set(device, MS_UNLOADED);
89 else
90 tape_med_state_set(device, MS_LOADED);
91
92 if (sense[1] & SENSE_WRITE_PROTECT)
93 device->tape_generic_status |= GMT_WR_PROT(~0);
94 else
95 device->tape_generic_status &= ~GMT_WR_PROT(~0);
96 } else {
97 DBF_EVENT(4, "tape_34xx: medium sense failed with rc=%d\n",
98 request->rc);
99 }
100 tape_free_request(request);
101
102 return rc;
103}
104
105/*
106 * These functions are currently used only to schedule a medium_sense for
107 * later execution. This is because we get an interrupt whenever a medium
108 * is inserted but cannot call tape_do_io* from an interrupt context.
109 * Maybe that's useful for other actions we want to start from the
110 * interrupt handler.
111 */
112static void
113tape_34xx_work_handler(void *data)
114{
115 struct {
116 struct tape_device *device;
117 enum tape_op op;
118 struct work_struct work;
119 } *p = data;
120
121 switch(p->op) {
122 case TO_MSEN:
123 tape_34xx_medium_sense(p->device);
124 break;
125 default:
126 DBF_EVENT(3, "T34XX: internal error: unknown work\n");
127 }
128
129 p->device = tape_put_device(p->device);
130 kfree(p);
131}
132
133static int
134tape_34xx_schedule_work(struct tape_device *device, enum tape_op op)
135{
136 struct {
137 struct tape_device *device;
138 enum tape_op op;
139 struct work_struct work;
140 } *p;
141
142 if ((p = kmalloc(sizeof(*p), GFP_ATOMIC)) == NULL)
143 return -ENOMEM;
144
145 memset(p, 0, sizeof(*p));
146 INIT_WORK(&p->work, tape_34xx_work_handler, p);
147
148 p->device = tape_get_device_reference(device);
149 p->op = op;
150
151 schedule_work(&p->work);
152 return 0;
153}
154
155/*
156 * Done Handler is called when dev stat = DEVICE-END (successful operation)
157 */
158static inline int
159tape_34xx_done(struct tape_request *request)
160{
161 DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]);
162
163 switch (request->op) {
164 case TO_DSE:
165 case TO_RUN:
166 case TO_WRI:
167 case TO_WTM:
168 case TO_ASSIGN:
169 case TO_UNASSIGN:
170 tape_34xx_delete_sbid_from(request->device, 0);
171 break;
172 default:
173 ;
174 }
175 return TAPE_IO_SUCCESS;
176}
177
178static inline int
179tape_34xx_erp_failed(struct tape_request *request, int rc)
180{
181 DBF_EVENT(3, "Error recovery failed for %s (RC=%d)\n",
182 tape_op_verbose[request->op], rc);
183 return rc;
184}
185
186static inline int
187tape_34xx_erp_succeeded(struct tape_request *request)
188{
189 DBF_EVENT(3, "Error Recovery successful for %s\n",
190 tape_op_verbose[request->op]);
191 return tape_34xx_done(request);
192}
193
194static inline int
195tape_34xx_erp_retry(struct tape_request *request)
196{
197 DBF_EVENT(3, "xerp retr %s\n", tape_op_verbose[request->op]);
198 return TAPE_IO_RETRY;
199}
200
201/*
202 * This function is called, when no request is outstanding and we get an
203 * interrupt
204 */
205static int
206tape_34xx_unsolicited_irq(struct tape_device *device, struct irb *irb)
207{
208 if (irb->scsw.dstat == 0x85 /* READY */) {
209 /* A medium was inserted in the drive. */
210 DBF_EVENT(6, "xuud med\n");
211 tape_34xx_delete_sbid_from(device, 0);
212 tape_34xx_schedule_work(device, TO_MSEN);
213 } else {
214 DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id);
215 PRINT_WARN("Unsolicited IRQ (Device End) caught.\n");
216 tape_dump_sense(device, NULL, irb);
217 }
218 return TAPE_IO_SUCCESS;
219}
220
221/*
222 * Read Opposite Error Recovery Function:
223 * Used, when Read Forward does not work
224 */
225static int
226tape_34xx_erp_read_opposite(struct tape_device *device,
227 struct tape_request *request)
228{
229 if (request->op == TO_RFO) {
230 /*
231 * We did read forward, but the data could not be read
232 * *correctly*. We transform the request to a read backward
233 * and try again.
234 */
235 tape_std_read_backward(device, request);
236 return tape_34xx_erp_retry(request);
237 }
238 if (request->op != TO_RBA)
239 PRINT_ERR("read_opposite called with state:%s\n",
240 tape_op_verbose[request->op]);
241 /*
242 * We tried to read forward and backward, but hat no
243 * success -> failed.
244 */
245 return tape_34xx_erp_failed(request, -EIO);
246}
247
248static int
249tape_34xx_erp_bug(struct tape_device *device, struct tape_request *request,
250 struct irb *irb, int no)
251{
252 if (request->op != TO_ASSIGN) {
253 PRINT_WARN("An unexpected condition #%d was caught in "
254 "tape error recovery.\n", no);
255 PRINT_WARN("Please report this incident.\n");
256 if (request)
257 PRINT_WARN("Operation of tape:%s\n",
258 tape_op_verbose[request->op]);
259 tape_dump_sense(device, request, irb);
260 }
261 return tape_34xx_erp_failed(request, -EIO);
262}
263
264/*
265 * Handle data overrun between cu and drive. The channel speed might
266 * be too slow.
267 */
268static int
269tape_34xx_erp_overrun(struct tape_device *device, struct tape_request *request,
270 struct irb *irb)
271{
272 if (irb->ecw[3] == 0x40) {
273 PRINT_WARN ("Data overrun error between control-unit "
274 "and drive. Use a faster channel connection, "
275 "if possible! \n");
276 return tape_34xx_erp_failed(request, -EIO);
277 }
278 return tape_34xx_erp_bug(device, request, irb, -1);
279}
280
281/*
282 * Handle record sequence error.
283 */
284static int
285tape_34xx_erp_sequence(struct tape_device *device,
286 struct tape_request *request, struct irb *irb)
287{
288 if (irb->ecw[3] == 0x41) {
289 /*
290 * cu detected incorrect block-id sequence on tape.
291 */
292 PRINT_WARN("Illegal block-id sequence found!\n");
293 return tape_34xx_erp_failed(request, -EIO);
294 }
295 /*
296 * Record sequence error bit is set, but erpa does not
297 * show record sequence error.
298 */
299 return tape_34xx_erp_bug(device, request, irb, -2);
300}
301
302/*
303 * This function analyses the tape's sense-data in case of a unit-check.
304 * If possible, it tries to recover from the error. Else the user is
305 * informed about the problem.
306 */
307static int
308tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
309 struct irb *irb)
310{
311 int inhibit_cu_recovery;
312 __u8* sense;
313
314 inhibit_cu_recovery = (*device->modeset_byte & 0x80) ? 1 : 0;
315 sense = irb->ecw;
316
317#ifdef CONFIG_S390_TAPE_BLOCK
318 if (request->op == TO_BLOCK) {
319 /*
320 * Recovery for block device requests. Set the block_position
321 * to something invalid and retry.
322 */
323 device->blk_data.block_position = -1;
324 if (request->retries-- <= 0)
325 return tape_34xx_erp_failed(request, -EIO);
326 else
327 return tape_34xx_erp_retry(request);
328 }
329#endif
330
331 if (
332 sense[0] & SENSE_COMMAND_REJECT &&
333 sense[1] & SENSE_WRITE_PROTECT
334 ) {
335 if (
336 request->op == TO_DSE ||
337 request->op == TO_WRI ||
338 request->op == TO_WTM
339 ) {
340 /* medium is write protected */
341 return tape_34xx_erp_failed(request, -EACCES);
342 } else {
343 return tape_34xx_erp_bug(device, request, irb, -3);
344 }
345 }
346
347 /*
348 * Special cases for various tape-states when reaching
349 * end of recorded area
350 *
351 * FIXME: Maybe a special case of the special case:
352 * sense[0] == SENSE_EQUIPMENT_CHECK &&
353 * sense[1] == SENSE_DRIVE_ONLINE &&
354 * sense[3] == 0x47 (Volume Fenced)
355 *
356 * This was caused by continued FSF or FSR after an
357 * 'End Of Data'.
358 */
359 if ((
360 sense[0] == SENSE_DATA_CHECK ||
361 sense[0] == SENSE_EQUIPMENT_CHECK ||
362 sense[0] == SENSE_EQUIPMENT_CHECK + SENSE_DEFERRED_UNIT_CHECK
363 ) && (
364 sense[1] == SENSE_DRIVE_ONLINE ||
365 sense[1] == SENSE_BEGINNING_OF_TAPE + SENSE_WRITE_MODE
366 )) {
367 switch (request->op) {
368 /*
369 * sense[0] == SENSE_DATA_CHECK &&
370 * sense[1] == SENSE_DRIVE_ONLINE
371 * sense[3] == 0x36 (End Of Data)
372 *
373 * Further seeks might return a 'Volume Fenced'.
374 */
375 case TO_FSF:
376 case TO_FSB:
377 /* Trying to seek beyond end of recorded area */
378 return tape_34xx_erp_failed(request, -ENOSPC);
379 case TO_BSB:
380 return tape_34xx_erp_retry(request);
381
382 /*
383 * sense[0] == SENSE_DATA_CHECK &&
384 * sense[1] == SENSE_DRIVE_ONLINE &&
385 * sense[3] == 0x36 (End Of Data)
386 */
387 case TO_LBL:
388 /* Block could not be located. */
389 tape_34xx_delete_sbid_from(device, 0);
390 return tape_34xx_erp_failed(request, -EIO);
391
392 case TO_RFO:
393 /* Read beyond end of recorded area -> 0 bytes read */
394 return tape_34xx_erp_failed(request, 0);
395
396 /*
397 * sense[0] == SENSE_EQUIPMENT_CHECK &&
398 * sense[1] == SENSE_DRIVE_ONLINE &&
399 * sense[3] == 0x38 (Physical End Of Volume)
400 */
401 case TO_WRI:
402 /* Writing at physical end of volume */
403 return tape_34xx_erp_failed(request, -ENOSPC);
404 default:
405 PRINT_ERR("Invalid op in %s:%i\n",
406 __FUNCTION__, __LINE__);
407 return tape_34xx_erp_failed(request, 0);
408 }
409 }
410
411 /* Sensing special bits */
412 if (sense[0] & SENSE_BUS_OUT_CHECK)
413 return tape_34xx_erp_retry(request);
414
415 if (sense[0] & SENSE_DATA_CHECK) {
416 /*
417 * hardware failure, damaged tape or improper
418 * operating conditions
419 */
420 switch (sense[3]) {
421 case 0x23:
422 /* a read data check occurred */
423 if ((sense[2] & SENSE_TAPE_SYNC_MODE) ||
424 inhibit_cu_recovery)
425 // data check is not permanent, may be
426 // recovered. We always use async-mode with
427 // cu-recovery, so this should *never* happen.
428 return tape_34xx_erp_bug(device, request,
429 irb, -4);
430
431 /* data check is permanent, CU recovery has failed */
432 PRINT_WARN("Permanent read error\n");
433 return tape_34xx_erp_failed(request, -EIO);
434 case 0x25:
435 // a write data check occurred
436 if ((sense[2] & SENSE_TAPE_SYNC_MODE) ||
437 inhibit_cu_recovery)
438 // data check is not permanent, may be
439 // recovered. We always use async-mode with
440 // cu-recovery, so this should *never* happen.
441 return tape_34xx_erp_bug(device, request,
442 irb, -5);
443
444 // data check is permanent, cu-recovery has failed
445 PRINT_WARN("Permanent write error\n");
446 return tape_34xx_erp_failed(request, -EIO);
447 case 0x26:
448 /* Data Check (read opposite) occurred. */
449 return tape_34xx_erp_read_opposite(device, request);
450 case 0x28:
451 /* ID-Mark at tape start couldn't be written */
452 PRINT_WARN("ID-Mark could not be written.\n");
453 return tape_34xx_erp_failed(request, -EIO);
454 case 0x31:
455 /* Tape void. Tried to read beyond end of device. */
456 PRINT_WARN("Read beyond end of recorded area.\n");
457 return tape_34xx_erp_failed(request, -ENOSPC);
458 case 0x41:
459 /* Record sequence error. */
460 PRINT_WARN("Invalid block-id sequence found.\n");
461 return tape_34xx_erp_failed(request, -EIO);
462 default:
463 /* all data checks for 3480 should result in one of
464 * the above erpa-codes. For 3490, other data-check
465 * conditions do exist. */
466 if (device->cdev->id.driver_info == tape_3480)
467 return tape_34xx_erp_bug(device, request,
468 irb, -6);
469 }
470 }
471
472 if (sense[0] & SENSE_OVERRUN)
473 return tape_34xx_erp_overrun(device, request, irb);
474
475 if (sense[1] & SENSE_RECORD_SEQUENCE_ERR)
476 return tape_34xx_erp_sequence(device, request, irb);
477
478 /* Sensing erpa codes */
479 switch (sense[3]) {
480 case 0x00:
481 /* Unit check with erpa code 0. Report and ignore. */
482 PRINT_WARN("Non-error sense was found. "
483 "Unit-check will be ignored.\n");
484 return TAPE_IO_SUCCESS;
485 case 0x21:
486 /*
487 * Data streaming not operational. CU will switch to
488 * interlock mode. Reissue the command.
489 */
490 PRINT_WARN("Data streaming not operational. "
491 "Switching to interlock-mode.\n");
492 return tape_34xx_erp_retry(request);
493 case 0x22:
494 /*
495 * Path equipment check. Might be drive adapter error, buffer
496 * error on the lower interface, internal path not usable,
497 * or error during cartridge load.
498 */
499 PRINT_WARN("A path equipment check occurred. One of the "
500 "following conditions occurred:\n");
501 PRINT_WARN("drive adapter error, buffer error on the lower "
502 "interface, internal path not usable, error "
503 "during cartridge load.\n");
504 return tape_34xx_erp_failed(request, -EIO);
505 case 0x24:
506 /*
507 * Load display check. Load display was command was issued,
508 * but the drive is displaying a drive check message. Can
509 * be threated as "device end".
510 */
511 return tape_34xx_erp_succeeded(request);
512 case 0x27:
513 /*
514 * Command reject. May indicate illegal channel program or
515 * buffer over/underrun. Since all channel programs are
516 * issued by this driver and ought be correct, we assume a
517 * over/underrun situation and retry the channel program.
518 */
519 return tape_34xx_erp_retry(request);
520 case 0x29:
521 /*
522 * Function incompatible. Either the tape is idrc compressed
523 * but the hardware isn't capable to do idrc, or a perform
524 * subsystem func is issued and the CU is not on-line.
525 */
526 PRINT_WARN ("Function incompatible. Try to switch off idrc\n");
527 return tape_34xx_erp_failed(request, -EIO);
528 case 0x2a:
529 /*
530 * Unsolicited environmental data. An internal counter
531 * overflows, we can ignore this and reissue the cmd.
532 */
533 return tape_34xx_erp_retry(request);
534 case 0x2b:
535 /*
536 * Environmental data present. Indicates either unload
537 * completed ok or read buffered log command completed ok.
538 */
539 if (request->op == TO_RUN) {
540 /* Rewind unload completed ok. */
541 tape_med_state_set(device, MS_UNLOADED);
542 return tape_34xx_erp_succeeded(request);
543 }
544 /* tape_34xx doesn't use read buffered log commands. */
545 return tape_34xx_erp_bug(device, request, irb, sense[3]);
546 case 0x2c:
547 /*
548 * Permanent equipment check. CU has tried recovery, but
549 * did not succeed.
550 */
551 return tape_34xx_erp_failed(request, -EIO);
552 case 0x2d:
553 /* Data security erase failure. */
554 if (request->op == TO_DSE)
555 return tape_34xx_erp_failed(request, -EIO);
556 /* Data security erase failure, but no such command issued. */
557 return tape_34xx_erp_bug(device, request, irb, sense[3]);
558 case 0x2e:
559 /*
560 * Not capable. This indicates either that the drive fails
561 * reading the format id mark or that that format specified
562 * is not supported by the drive.
563 */
564 PRINT_WARN("Drive not capable processing the tape format!\n");
565 return tape_34xx_erp_failed(request, -EMEDIUMTYPE);
566 case 0x30:
567 /* The medium is write protected. */
568 PRINT_WARN("Medium is write protected!\n");
569 return tape_34xx_erp_failed(request, -EACCES);
570 case 0x32:
571 // Tension loss. We cannot recover this, it's an I/O error.
572 PRINT_WARN("The drive lost tape tension.\n");
573 return tape_34xx_erp_failed(request, -EIO);
574 case 0x33:
575 /*
576 * Load Failure. The cartridge was not inserted correctly or
577 * the tape is not threaded correctly.
578 */
579 PRINT_WARN("Cartridge load failure. Reload the cartridge "
580 "and try again.\n");
581 tape_34xx_delete_sbid_from(device, 0);
582 return tape_34xx_erp_failed(request, -EIO);
583 case 0x34:
584 /*
585 * Unload failure. The drive cannot maintain tape tension
586 * and control tape movement during an unload operation.
587 */
588 PRINT_WARN("Failure during cartridge unload. "
589 "Please try manually.\n");
590 if (request->op == TO_RUN)
591 return tape_34xx_erp_failed(request, -EIO);
592 return tape_34xx_erp_bug(device, request, irb, sense[3]);
593 case 0x35:
594 /*
595 * Drive equipment check. One of the following:
596 * - cu cannot recover from a drive detected error
597 * - a check code message is shown on drive display
598 * - the cartridge loader does not respond correctly
599 * - a failure occurs during an index, load, or unload cycle
600 */
601 PRINT_WARN("Equipment check! Please check the drive and "
602 "the cartridge loader.\n");
603 return tape_34xx_erp_failed(request, -EIO);
604 case 0x36:
605 if (device->cdev->id.driver_info == tape_3490)
606 /* End of data. */
607 return tape_34xx_erp_failed(request, -EIO);
608 /* This erpa is reserved for 3480 */
609 return tape_34xx_erp_bug(device, request, irb, sense[3]);
610 case 0x37:
611 /*
612 * Tape length error. The tape is shorter than reported in
613 * the beginning-of-tape data.
614 */
615 PRINT_WARN("Tape length error.\n");
616 return tape_34xx_erp_failed(request, -EIO);
617 case 0x38:
618 /*
619 * Physical end of tape. A read/write operation reached
620 * the physical end of tape.
621 */
622 if (request->op==TO_WRI ||
623 request->op==TO_DSE ||
624 request->op==TO_WTM)
625 return tape_34xx_erp_failed(request, -ENOSPC);
626 return tape_34xx_erp_failed(request, -EIO);
627 case 0x39:
628 /* Backward at Beginning of tape. */
629 return tape_34xx_erp_failed(request, -EIO);
630 case 0x3a:
631 /* Drive switched to not ready. */
632 PRINT_WARN("Drive not ready. Turn the ready/not ready switch "
633 "to ready position and try again.\n");
634 return tape_34xx_erp_failed(request, -EIO);
635 case 0x3b:
636 /* Manual rewind or unload. This causes an I/O error. */
637 PRINT_WARN("Medium was rewound or unloaded manually.\n");
638 tape_34xx_delete_sbid_from(device, 0);
639 return tape_34xx_erp_failed(request, -EIO);
640 case 0x42:
641 /*
642 * Degraded mode. A condition that can cause degraded
643 * performance is detected.
644 */
645 PRINT_WARN("Subsystem is running in degraded mode.\n");
646 return tape_34xx_erp_retry(request);
647 case 0x43:
648 /* Drive not ready. */
649 tape_34xx_delete_sbid_from(device, 0);
650 tape_med_state_set(device, MS_UNLOADED);
651 /* Some commands commands are successful even in this case */
652 if (sense[1] & SENSE_DRIVE_ONLINE) {
653 switch(request->op) {
654 case TO_ASSIGN:
655 case TO_UNASSIGN:
656 case TO_DIS:
657 case TO_NOP:
658 return tape_34xx_done(request);
659 break;
660 default:
661 break;
662 }
663 }
664 PRINT_WARN("The drive is not ready.\n");
665 return tape_34xx_erp_failed(request, -ENOMEDIUM);
666 case 0x44:
667 /* Locate Block unsuccessful. */
668 if (request->op != TO_BLOCK && request->op != TO_LBL)
669 /* No locate block was issued. */
670 return tape_34xx_erp_bug(device, request,
671 irb, sense[3]);
672 return tape_34xx_erp_failed(request, -EIO);
673 case 0x45:
674 /* The drive is assigned to a different channel path. */
675 PRINT_WARN("The drive is assigned elsewhere.\n");
676 return tape_34xx_erp_failed(request, -EIO);
677 case 0x46:
678 /*
679 * Drive not on-line. Drive may be switched offline,
680 * the power supply may be switched off or
681 * the drive address may not be set correctly.
682 */
683 PRINT_WARN("The drive is not on-line.");
684 return tape_34xx_erp_failed(request, -EIO);
685 case 0x47:
686 /* Volume fenced. CU reports volume integrity is lost. */
687 PRINT_WARN("Volume fenced. The volume integrity is lost.\n");
688 tape_34xx_delete_sbid_from(device, 0);
689 return tape_34xx_erp_failed(request, -EIO);
690 case 0x48:
691 /* Log sense data and retry request. */
692 return tape_34xx_erp_retry(request);
693 case 0x49:
694 /* Bus out check. A parity check error on the bus was found. */
695 PRINT_WARN("Bus out check. A data transfer over the bus "
696 "has been corrupted.\n");
697 return tape_34xx_erp_failed(request, -EIO);
698 case 0x4a:
699 /* Control unit erp failed. */
700 PRINT_WARN("The control unit I/O error recovery failed.\n");
701 return tape_34xx_erp_failed(request, -EIO);
702 case 0x4b:
703 /*
704 * CU and drive incompatible. The drive requests micro-program
705 * patches, which are not available on the CU.
706 */
707 PRINT_WARN("The drive needs microprogram patches from the "
708 "control unit, which are not available.\n");
709 return tape_34xx_erp_failed(request, -EIO);
710 case 0x4c:
711 /*
712 * Recovered Check-One failure. Cu develops a hardware error,
713 * but is able to recover.
714 */
715 return tape_34xx_erp_retry(request);
716 case 0x4d:
717 if (device->cdev->id.driver_info == tape_3490)
718 /*
719 * Resetting event received. Since the driver does
720 * not support resetting event recovery (which has to
721 * be handled by the I/O Layer), retry our command.
722 */
723 return tape_34xx_erp_retry(request);
724 /* This erpa is reserved for 3480. */
725 return tape_34xx_erp_bug(device, request, irb, sense[3]);
726 case 0x4e:
727 if (device->cdev->id.driver_info == tape_3490) {
728 /*
729 * Maximum block size exceeded. This indicates, that
730 * the block to be written is larger than allowed for
731 * buffered mode.
732 */
733 PRINT_WARN("Maximum block size for buffered "
734 "mode exceeded.\n");
735 return tape_34xx_erp_failed(request, -ENOBUFS);
736 }
737 /* This erpa is reserved for 3480. */
738 return tape_34xx_erp_bug(device, request, irb, sense[3]);
739 case 0x50:
740 /*
741 * Read buffered log (Overflow). CU is running in extended
742 * buffered log mode, and a counter overflows. This should
743 * never happen, since we're never running in extended
744 * buffered log mode.
745 */
746 return tape_34xx_erp_retry(request);
747 case 0x51:
748 /*
749 * Read buffered log (EOV). EOF processing occurs while the
750 * CU is in extended buffered log mode. This should never
751 * happen, since we're never running in extended buffered
752 * log mode.
753 */
754 return tape_34xx_erp_retry(request);
755 case 0x52:
756 /* End of Volume complete. Rewind unload completed ok. */
757 if (request->op == TO_RUN) {
758 tape_med_state_set(device, MS_UNLOADED);
759 tape_34xx_delete_sbid_from(device, 0);
760 return tape_34xx_erp_succeeded(request);
761 }
762 return tape_34xx_erp_bug(device, request, irb, sense[3]);
763 case 0x53:
764 /* Global command intercept. */
765 return tape_34xx_erp_retry(request);
766 case 0x54:
767 /* Channel interface recovery (temporary). */
768 return tape_34xx_erp_retry(request);
769 case 0x55:
770 /* Channel interface recovery (permanent). */
771 PRINT_WARN("A permanent channel interface error occurred.\n");
772 return tape_34xx_erp_failed(request, -EIO);
773 case 0x56:
774 /* Channel protocol error. */
775 PRINT_WARN("A channel protocol error occurred.\n");
776 return tape_34xx_erp_failed(request, -EIO);
777 case 0x57:
778 if (device->cdev->id.driver_info == tape_3480) {
779 /* Attention intercept. */
780 PRINT_WARN("An attention intercept occurred, "
781 "which will be recovered.\n");
782 return tape_34xx_erp_retry(request);
783 } else {
784 /* Global status intercept. */
785 PRINT_WARN("An global status intercept was received, "
786 "which will be recovered.\n");
787 return tape_34xx_erp_retry(request);
788 }
789 case 0x5a:
790 /*
791 * Tape length incompatible. The tape inserted is too long,
792 * which could cause damage to the tape or the drive.
793 */
794 PRINT_WARN("Tape Length Incompatible\n");
795 PRINT_WARN("Tape length exceeds IBM enhanced capacity "
796 "cartdridge length or a medium\n");
797 PRINT_WARN("with EC-CST identification mark has been mounted "
798 "in a device that writes\n");
799 PRINT_WARN("3480 or 3480 XF format.\n");
800 return tape_34xx_erp_failed(request, -EIO);
801 case 0x5b:
802 /* Format 3480 XF incompatible */
803 if (sense[1] & SENSE_BEGINNING_OF_TAPE)
804 /* The tape will get overwritten. */
805 return tape_34xx_erp_retry(request);
806 PRINT_WARN("Format 3480 XF Incompatible\n");
807 PRINT_WARN("Medium has been created in 3480 format. "
808 "To change the format writes\n");
809 PRINT_WARN("must be issued at BOT.\n");
810 return tape_34xx_erp_failed(request, -EIO);
811 case 0x5c:
812 /* Format 3480-2 XF incompatible */
813 PRINT_WARN("Format 3480-2 XF Incompatible\n");
814 PRINT_WARN("Device can only read 3480 or 3480 XF format.\n");
815 return tape_34xx_erp_failed(request, -EIO);
816 case 0x5d:
817 /* Tape length violation. */
818 PRINT_WARN("Tape Length Violation\n");
819 PRINT_WARN("The mounted tape exceeds IBM Enhanced Capacity "
820 "Cartdridge System Tape length.\n");
821 PRINT_WARN("This may cause damage to the drive or tape when "
822 "processing to the EOV\n");
823 return tape_34xx_erp_failed(request, -EMEDIUMTYPE);
824 case 0x5e:
825 /* Compaction algorithm incompatible. */
826 PRINT_WARN("Compaction Algorithm Incompatible\n");
827 PRINT_WARN("The volume is recorded using an incompatible "
828 "compaction algorithm,\n");
829 PRINT_WARN("which is not supported by the device.\n");
830 return tape_34xx_erp_failed(request, -EMEDIUMTYPE);
831
832 /* The following erpas should have been covered earlier. */
833 case 0x23: /* Read data check. */
834 case 0x25: /* Write data check. */
835 case 0x26: /* Data check (read opposite). */
836 case 0x28: /* Write id mark check. */
837 case 0x31: /* Tape void. */
838 case 0x40: /* Overrun error. */
839 case 0x41: /* Record sequence error. */
840 /* All other erpas are reserved for future use. */
841 default:
842 return tape_34xx_erp_bug(device, request, irb, sense[3]);
843 }
844}
845
846/*
847 * 3480/3490 interrupt handler
848 */
849static int
850tape_34xx_irq(struct tape_device *device, struct tape_request *request,
851 struct irb *irb)
852{
853 if (request == NULL)
854 return tape_34xx_unsolicited_irq(device, irb);
855
856 if ((irb->scsw.dstat & DEV_STAT_UNIT_EXCEP) &&
857 (irb->scsw.dstat & DEV_STAT_DEV_END) &&
858 (request->op == TO_WRI)) {
859 /* Write at end of volume */
860 PRINT_INFO("End of volume\n"); /* XXX */
861 return tape_34xx_erp_failed(request, -ENOSPC);
862 }
863
864 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK)
865 return tape_34xx_unit_check(device, request, irb);
866
867 if (irb->scsw.dstat & DEV_STAT_DEV_END) {
868 /*
869 * A unit exception occurs on skipping over a tapemark block.
870 */
871 if (irb->scsw.dstat & DEV_STAT_UNIT_EXCEP) {
872 if (request->op == TO_BSB || request->op == TO_FSB)
873 request->rescnt++;
874 else
875 DBF_EVENT(5, "Unit Exception!\n");
876 }
877 return tape_34xx_done(request);
878 }
879
880 DBF_EVENT(6, "xunknownirq\n");
881 PRINT_ERR("Unexpected interrupt.\n");
882 PRINT_ERR("Current op is: %s", tape_op_verbose[request->op]);
883 tape_dump_sense(device, request, irb);
884 return TAPE_IO_STOP;
885}
886
887/*
888 * ioctl_overload
889 */
890static int
891tape_34xx_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg)
892{
893 if (cmd == TAPE390_DISPLAY) {
894 struct display_struct disp;
895
896 if (copy_from_user(&disp, (char __user *) arg, sizeof(disp)) != 0)
897 return -EFAULT;
898
899 return tape_std_display(device, &disp);
900 } else
901 return -EINVAL;
902}
903
904static inline void
905tape_34xx_append_new_sbid(struct tape_34xx_block_id bid, struct list_head *l)
906{
907 struct tape_34xx_sbid * new_sbid;
908
909 new_sbid = kmalloc(sizeof(*new_sbid), GFP_ATOMIC);
910 if (!new_sbid)
911 return;
912
913 new_sbid->bid = bid;
914 list_add(&new_sbid->list, l);
915}
916
917/*
918 * Build up the search block ID list. The block ID consists of a logical
919 * block number and a hardware specific part. The hardware specific part
920 * helps the tape drive to speed up searching for a specific block.
921 */
922static void
923tape_34xx_add_sbid(struct tape_device *device, struct tape_34xx_block_id bid)
924{
925 struct list_head * sbid_list;
926 struct tape_34xx_sbid * sbid;
927 struct list_head * l;
928
929 /*
930 * immediately return if there is no list at all or the block to add
931 * is located in segment 1 of wrap 0 because this position is used
932 * if no hardware position data is supplied.
933 */
934 sbid_list = (struct list_head *) device->discdata;
935 if (!sbid_list || (bid.segment < 2 && bid.wrap == 0))
936 return;
937
938 /*
939 * Search the position where to insert the new entry. Hardware
940 * acceleration uses only the segment and wrap number. So we
941 * need only one entry for a specific wrap/segment combination.
942 * If there is a block with a lower number but the same hard-
943 * ware position data we just update the block number in the
944 * existing entry.
945 */
946 list_for_each(l, sbid_list) {
947 sbid = list_entry(l, struct tape_34xx_sbid, list);
948
949 if (
950 (sbid->bid.segment == bid.segment) &&
951 (sbid->bid.wrap == bid.wrap)
952 ) {
953 if (bid.block < sbid->bid.block)
954 sbid->bid = bid;
955 else return;
956 break;
957 }
958
959 /* Sort in according to logical block number. */
960 if (bid.block < sbid->bid.block) {
961 tape_34xx_append_new_sbid(bid, l->prev);
962 break;
963 }
964 }
965 /* List empty or new block bigger than last entry. */
966 if (l == sbid_list)
967 tape_34xx_append_new_sbid(bid, l->prev);
968
969 DBF_LH(4, "Current list is:\n");
970 list_for_each(l, sbid_list) {
971 sbid = list_entry(l, struct tape_34xx_sbid, list);
972 DBF_LH(4, "%d:%03d@%05d\n",
973 sbid->bid.wrap,
974 sbid->bid.segment,
975 sbid->bid.block
976 );
977 }
978}
979
980/*
981 * Delete all entries from the search block ID list that belong to tape blocks
982 * equal or higher than the given number.
983 */
984static void
985tape_34xx_delete_sbid_from(struct tape_device *device, int from)
986{
987 struct list_head * sbid_list;
988 struct tape_34xx_sbid * sbid;
989 struct list_head * l;
990 struct list_head * n;
991
992 sbid_list = (struct list_head *) device->discdata;
993 if (!sbid_list)
994 return;
995
996 list_for_each_safe(l, n, sbid_list) {
997 sbid = list_entry(l, struct tape_34xx_sbid, list);
998 if (sbid->bid.block >= from) {
999 DBF_LH(4, "Delete sbid %d:%03d@%05d\n",
1000 sbid->bid.wrap,
1001 sbid->bid.segment,
1002 sbid->bid.block
1003 );
1004 list_del(l);
1005 kfree(sbid);
1006 }
1007 }
1008}
1009
1010/*
1011 * Merge hardware position data into a block id.
1012 */
1013static void
1014tape_34xx_merge_sbid(
1015 struct tape_device * device,
1016 struct tape_34xx_block_id * bid
1017) {
1018 struct tape_34xx_sbid * sbid;
1019 struct tape_34xx_sbid * sbid_to_use;
1020 struct list_head * sbid_list;
1021 struct list_head * l;
1022
1023 sbid_list = (struct list_head *) device->discdata;
1024 bid->wrap = 0;
1025 bid->segment = 1;
1026
1027 if (!sbid_list || list_empty(sbid_list))
1028 return;
1029
1030 sbid_to_use = NULL;
1031 list_for_each(l, sbid_list) {
1032 sbid = list_entry(l, struct tape_34xx_sbid, list);
1033
1034 if (sbid->bid.block >= bid->block)
1035 break;
1036 sbid_to_use = sbid;
1037 }
1038 if (sbid_to_use) {
1039 bid->wrap = sbid_to_use->bid.wrap;
1040 bid->segment = sbid_to_use->bid.segment;
1041 DBF_LH(4, "Use %d:%03d@%05d for %05d\n",
1042 sbid_to_use->bid.wrap,
1043 sbid_to_use->bid.segment,
1044 sbid_to_use->bid.block,
1045 bid->block
1046 );
1047 }
1048}
1049
1050static int
1051tape_34xx_setup_device(struct tape_device * device)
1052{
1053 int rc;
1054 struct list_head * discdata;
1055
1056 DBF_EVENT(6, "34xx device setup\n");
1057 if ((rc = tape_std_assign(device)) == 0) {
1058 if ((rc = tape_34xx_medium_sense(device)) != 0) {
1059 DBF_LH(3, "34xx medium sense returned %d\n", rc);
1060 }
1061 }
1062 discdata = kmalloc(sizeof(struct list_head), GFP_KERNEL);
1063 if (discdata) {
1064 INIT_LIST_HEAD(discdata);
1065 device->discdata = discdata;
1066 }
1067
1068 return rc;
1069}
1070
1071static void
1072tape_34xx_cleanup_device(struct tape_device *device)
1073{
1074 tape_std_unassign(device);
1075
1076 if (device->discdata) {
1077 tape_34xx_delete_sbid_from(device, 0);
1078 kfree(device->discdata);
1079 device->discdata = NULL;
1080 }
1081}
1082
1083
1084/*
1085 * MTTELL: Tell block. Return the number of block relative to current file.
1086 */
1087static int
1088tape_34xx_mttell(struct tape_device *device, int mt_count)
1089{
1090 struct {
1091 struct tape_34xx_block_id cbid;
1092 struct tape_34xx_block_id dbid;
1093 } __attribute__ ((packed)) block_id;
1094 int rc;
1095
1096 rc = tape_std_read_block_id(device, (__u64 *) &block_id);
1097 if (rc)
1098 return rc;
1099
1100 tape_34xx_add_sbid(device, block_id.cbid);
1101 return block_id.cbid.block;
1102}
1103
1104/*
1105 * MTSEEK: seek to the specified block.
1106 */
1107static int
1108tape_34xx_mtseek(struct tape_device *device, int mt_count)
1109{
1110 struct tape_request *request;
1111 struct tape_34xx_block_id * bid;
1112
1113 if (mt_count > 0x3fffff) {
1114 DBF_EXCEPTION(6, "xsee parm\n");
1115 return -EINVAL;
1116 }
1117 request = tape_alloc_request(3, 4);
1118 if (IS_ERR(request))
1119 return PTR_ERR(request);
1120
1121 /* setup ccws */
1122 request->op = TO_LBL;
1123 bid = (struct tape_34xx_block_id *) request->cpdata;
1124 bid->format = (*device->modeset_byte & 0x08) ?
1125 TAPE34XX_FMT_3480_XF : TAPE34XX_FMT_3480;
1126 bid->block = mt_count;
1127 tape_34xx_merge_sbid(device, bid);
1128
1129 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
1130 tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata);
1131 tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
1132
1133 /* execute it */
1134 return tape_do_io_free(device, request);
1135}
1136
1137#ifdef CONFIG_S390_TAPE_BLOCK
1138/*
1139 * Tape block read for 34xx.
1140 */
1141static struct tape_request *
1142tape_34xx_bread(struct tape_device *device, struct request *req)
1143{
1144 struct tape_request *request;
1145 struct ccw1 *ccw;
1146 int count = 0, i;
1147 unsigned off;
1148 char *dst;
1149 struct bio_vec *bv;
1150 struct bio *bio;
1151 struct tape_34xx_block_id * start_block;
1152
1153 DBF_EVENT(6, "xBREDid:");
1154
1155 /* Count the number of blocks for the request. */
1156 rq_for_each_bio(bio, req) {
1157 bio_for_each_segment(bv, bio, i) {
1158 count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9);
1159 }
1160 }
1161
1162 /* Allocate the ccw request. */
1163 request = tape_alloc_request(3+count+1, 8);
1164 if (IS_ERR(request))
1165 return request;
1166
1167 /* Setup ccws. */
1168 request->op = TO_BLOCK;
1169 start_block = (struct tape_34xx_block_id *) request->cpdata;
1170 start_block->block = req->sector >> TAPEBLOCK_HSEC_S2B;
1171 DBF_EVENT(6, "start_block = %i\n", start_block->block);
1172
1173 ccw = request->cpaddr;
1174 ccw = tape_ccw_cc(ccw, MODE_SET_DB, 1, device->modeset_byte);
1175
1176 /*
1177 * We always setup a nop after the mode set ccw. This slot is
1178 * used in tape_std_check_locate to insert a locate ccw if the
1179 * current tape position doesn't match the start block to be read.
1180 * The second nop will be filled with a read block id which is in
1181 * turn used by tape_34xx_free_bread to populate the segment bid
1182 * table.
1183 */
1184 ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
1185 ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
1186
1187 rq_for_each_bio(bio, req) {
1188 bio_for_each_segment(bv, bio, i) {
1189 dst = kmap(bv->bv_page) + bv->bv_offset;
1190 for (off = 0; off < bv->bv_len;
1191 off += TAPEBLOCK_HSEC_SIZE) {
1192 ccw->flags = CCW_FLAG_CC;
1193 ccw->cmd_code = READ_FORWARD;
1194 ccw->count = TAPEBLOCK_HSEC_SIZE;
1195 set_normalized_cda(ccw, (void*) __pa(dst));
1196 ccw++;
1197 dst += TAPEBLOCK_HSEC_SIZE;
1198 }
1199 }
1200 }
1201
1202 ccw = tape_ccw_end(ccw, NOP, 0, NULL);
1203 DBF_EVENT(6, "xBREDccwg\n");
1204 return request;
1205}
1206
1207static void
1208tape_34xx_free_bread (struct tape_request *request)
1209{
1210 struct ccw1* ccw;
1211
1212 ccw = request->cpaddr;
1213 if ((ccw + 2)->cmd_code == READ_BLOCK_ID) {
1214 struct {
1215 struct tape_34xx_block_id cbid;
1216 struct tape_34xx_block_id dbid;
1217 } __attribute__ ((packed)) *rbi_data;
1218
1219 rbi_data = request->cpdata;
1220
1221 if (request->device)
1222 tape_34xx_add_sbid(request->device, rbi_data->cbid);
1223 }
1224
1225 /* Last ccw is a nop and doesn't need clear_normalized_cda */
1226 for (; ccw->flags & CCW_FLAG_CC; ccw++)
1227 if (ccw->cmd_code == READ_FORWARD)
1228 clear_normalized_cda(ccw);
1229 tape_free_request(request);
1230}
1231
1232/*
1233 * check_locate is called just before the tape request is passed to
1234 * the common io layer for execution. It has to check the current
1235 * tape position and insert a locate ccw if it doesn't match the
1236 * start block for the request.
1237 */
1238static void
1239tape_34xx_check_locate(struct tape_device *device, struct tape_request *request)
1240{
1241 struct tape_34xx_block_id * start_block;
1242
1243 start_block = (struct tape_34xx_block_id *) request->cpdata;
1244 if (start_block->block == device->blk_data.block_position)
1245 return;
1246
1247 DBF_LH(4, "Block seek(%06d+%06d)\n", start_block->block, device->bof);
1248 start_block->wrap = 0;
1249 start_block->segment = 1;
1250 start_block->format = (*device->modeset_byte & 0x08) ?
1251 TAPE34XX_FMT_3480_XF :
1252 TAPE34XX_FMT_3480;
1253 start_block->block = start_block->block + device->bof;
1254 tape_34xx_merge_sbid(device, start_block);
1255 tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata);
1256 tape_ccw_cc(request->cpaddr + 2, READ_BLOCK_ID, 8, request->cpdata);
1257}
1258#endif
1259
1260/*
1261 * List of 3480/3490 magnetic tape commands.
1262 */
1263static tape_mtop_fn tape_34xx_mtop[TAPE_NR_MTOPS] = {
1264 [MTRESET] = tape_std_mtreset,
1265 [MTFSF] = tape_std_mtfsf,
1266 [MTBSF] = tape_std_mtbsf,
1267 [MTFSR] = tape_std_mtfsr,
1268 [MTBSR] = tape_std_mtbsr,
1269 [MTWEOF] = tape_std_mtweof,
1270 [MTREW] = tape_std_mtrew,
1271 [MTOFFL] = tape_std_mtoffl,
1272 [MTNOP] = tape_std_mtnop,
1273 [MTRETEN] = tape_std_mtreten,
1274 [MTBSFM] = tape_std_mtbsfm,
1275 [MTFSFM] = tape_std_mtfsfm,
1276 [MTEOM] = tape_std_mteom,
1277 [MTERASE] = tape_std_mterase,
1278 [MTRAS1] = NULL,
1279 [MTRAS2] = NULL,
1280 [MTRAS3] = NULL,
1281 [MTSETBLK] = tape_std_mtsetblk,
1282 [MTSETDENSITY] = NULL,
1283 [MTSEEK] = tape_34xx_mtseek,
1284 [MTTELL] = tape_34xx_mttell,
1285 [MTSETDRVBUFFER] = NULL,
1286 [MTFSS] = NULL,
1287 [MTBSS] = NULL,
1288 [MTWSM] = NULL,
1289 [MTLOCK] = NULL,
1290 [MTUNLOCK] = NULL,
1291 [MTLOAD] = tape_std_mtload,
1292 [MTUNLOAD] = tape_std_mtunload,
1293 [MTCOMPRESSION] = tape_std_mtcompression,
1294 [MTSETPART] = NULL,
1295 [MTMKPART] = NULL
1296};
1297
1298/*
1299 * Tape discipline structure for 3480 and 3490.
1300 */
1301static struct tape_discipline tape_discipline_34xx = {
1302 .owner = THIS_MODULE,
1303 .setup_device = tape_34xx_setup_device,
1304 .cleanup_device = tape_34xx_cleanup_device,
1305 .process_eov = tape_std_process_eov,
1306 .irq = tape_34xx_irq,
1307 .read_block = tape_std_read_block,
1308 .write_block = tape_std_write_block,
1309#ifdef CONFIG_S390_TAPE_BLOCK
1310 .bread = tape_34xx_bread,
1311 .free_bread = tape_34xx_free_bread,
1312 .check_locate = tape_34xx_check_locate,
1313#endif
1314 .ioctl_fn = tape_34xx_ioctl,
1315 .mtop_array = tape_34xx_mtop
1316};
1317
1318static struct ccw_device_id tape_34xx_ids[] = {
1319 { CCW_DEVICE_DEVTYPE(0x3480, 0, 0x3480, 0), driver_info: tape_3480},
1320 { CCW_DEVICE_DEVTYPE(0x3490, 0, 0x3490, 0), driver_info: tape_3490},
1321 { /* end of list */ }
1322};
1323
1324static int
1325tape_34xx_online(struct ccw_device *cdev)
1326{
1327 return tape_generic_online(
1328 cdev->dev.driver_data,
1329 &tape_discipline_34xx
1330 );
1331}
1332
1333static int
1334tape_34xx_offline(struct ccw_device *cdev)
1335{
1336 return tape_generic_offline(cdev->dev.driver_data);
1337}
1338
1339static struct ccw_driver tape_34xx_driver = {
1340 .name = "tape_34xx",
1341 .owner = THIS_MODULE,
1342 .ids = tape_34xx_ids,
1343 .probe = tape_generic_probe,
1344 .remove = tape_generic_remove,
1345 .set_online = tape_34xx_online,
1346 .set_offline = tape_34xx_offline,
1347};
1348
1349static int
1350tape_34xx_init (void)
1351{
1352 int rc;
1353
1354 TAPE_DBF_AREA = debug_register ( "tape_34xx", 1, 2, 4*sizeof(long));
1355 debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view);
1356#ifdef DBF_LIKE_HELL
1357 debug_set_level(TAPE_DBF_AREA, 6);
1358#endif
1359
1360 DBF_EVENT(3, "34xx init: $Revision: 1.21 $\n");
1361 /* Register driver for 3480/3490 tapes. */
1362 rc = ccw_driver_register(&tape_34xx_driver);
1363 if (rc)
1364 DBF_EVENT(3, "34xx init failed\n");
1365 else
1366 DBF_EVENT(3, "34xx registered\n");
1367 return rc;
1368}
1369
1370static void
1371tape_34xx_exit(void)
1372{
1373 ccw_driver_unregister(&tape_34xx_driver);
1374
1375 debug_unregister(TAPE_DBF_AREA);
1376}
1377
1378MODULE_DEVICE_TABLE(ccw, tape_34xx_ids);
1379MODULE_AUTHOR("(C) 2001-2002 IBM Deutschland Entwicklung GmbH");
1380MODULE_DESCRIPTION("Linux on zSeries channel attached 3480 tape "
1381 "device driver ($Revision: 1.21 $)");
1382MODULE_LICENSE("GPL");
1383
1384module_init(tape_34xx_init);
1385module_exit(tape_34xx_exit);
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
new file mode 100644
index 000000000000..1efc9f21229e
--- /dev/null
+++ b/drivers/s390/char/tape_block.c
@@ -0,0 +1,492 @@
1/*
2 * drivers/s390/char/tape_block.c
3 * block device frontend for tape device driver
4 *
5 * S390 and zSeries version
6 * Copyright (C) 2001,2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
9 * Martin Schwidefsky <schwidefsky@de.ibm.com>
10 * Stefan Bader <shbader@de.ibm.com>
11 */
12
13#include <linux/fs.h>
14#include <linux/config.h>
15#include <linux/module.h>
16#include <linux/blkdev.h>
17#include <linux/interrupt.h>
18#include <linux/buffer_head.h>
19
20#include <asm/debug.h>
21
22#define TAPE_DBF_AREA tape_core_dbf
23
24#include "tape.h"
25
26#define PRINTK_HEADER "TAPE_BLOCK: "
27
28#define TAPEBLOCK_MAX_SEC 100
29#define TAPEBLOCK_MIN_REQUEUE 3
30
31/*
32 * 2003/11/25 Stefan Bader <shbader@de.ibm.com>
33 *
34 * In 2.5/2.6 the block device request function is very likely to be called
35 * with disabled interrupts (e.g. generic_unplug_device). So the driver can't
36 * just call any function that tries to allocate CCW requests from that con-
37 * text since it might sleep. There are two choices to work around this:
38 * a) do not allocate with kmalloc but use its own memory pool
39 * b) take requests from the queue outside that context, knowing that
40 * allocation might sleep
41 */
42
43/*
44 * file operation structure for tape block frontend
45 */
46static int tapeblock_open(struct inode *, struct file *);
47static int tapeblock_release(struct inode *, struct file *);
48static int tapeblock_ioctl(struct inode *, struct file *, unsigned int,
49 unsigned long);
50static int tapeblock_medium_changed(struct gendisk *);
51static int tapeblock_revalidate_disk(struct gendisk *);
52
53static struct block_device_operations tapeblock_fops = {
54 .owner = THIS_MODULE,
55 .open = tapeblock_open,
56 .release = tapeblock_release,
57 .ioctl = tapeblock_ioctl,
58 .media_changed = tapeblock_medium_changed,
59 .revalidate_disk = tapeblock_revalidate_disk,
60};
61
62static int tapeblock_major = 0;
63
64static void
65tapeblock_trigger_requeue(struct tape_device *device)
66{
67 /* Protect against rescheduling. */
68 if (atomic_compare_and_swap(0, 1, &device->blk_data.requeue_scheduled))
69 return;
70 schedule_work(&device->blk_data.requeue_task);
71}
72
73/*
74 * Post finished request.
75 */
76static inline void
77tapeblock_end_request(struct request *req, int uptodate)
78{
79 if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
80 BUG();
81 end_that_request_last(req);
82}
83
84static void
85__tapeblock_end_request(struct tape_request *ccw_req, void *data)
86{
87 struct tape_device *device;
88 struct request *req;
89
90 DBF_LH(6, "__tapeblock_end_request()\n");
91
92 device = ccw_req->device;
93 req = (struct request *) data;
94 tapeblock_end_request(req, ccw_req->rc == 0);
95 if (ccw_req->rc == 0)
96 /* Update position. */
97 device->blk_data.block_position =
98 (req->sector + req->nr_sectors) >> TAPEBLOCK_HSEC_S2B;
99 else
100 /* We lost the position information due to an error. */
101 device->blk_data.block_position = -1;
102 device->discipline->free_bread(ccw_req);
103 if (!list_empty(&device->req_queue) ||
104 elv_next_request(device->blk_data.request_queue))
105 tapeblock_trigger_requeue(device);
106}
107
108/*
109 * Feed the tape device CCW queue with requests supplied in a list.
110 */
111static inline int
112tapeblock_start_request(struct tape_device *device, struct request *req)
113{
114 struct tape_request * ccw_req;
115 int rc;
116
117 DBF_LH(6, "tapeblock_start_request(%p, %p)\n", device, req);
118
119 ccw_req = device->discipline->bread(device, req);
120 if (IS_ERR(ccw_req)) {
121 DBF_EVENT(1, "TBLOCK: bread failed\n");
122 tapeblock_end_request(req, 0);
123 return PTR_ERR(ccw_req);
124 }
125 ccw_req->callback = __tapeblock_end_request;
126 ccw_req->callback_data = (void *) req;
127 ccw_req->retries = TAPEBLOCK_RETRIES;
128
129 rc = tape_do_io_async(device, ccw_req);
130 if (rc) {
131 /*
132 * Start/enqueueing failed. No retries in
133 * this case.
134 */
135 tapeblock_end_request(req, 0);
136 device->discipline->free_bread(ccw_req);
137 }
138
139 return rc;
140}
141
142/*
143 * Move requests from the block device request queue to the tape device ccw
144 * queue.
145 */
146static void
147tapeblock_requeue(void *data) {
148 struct tape_device * device;
149 request_queue_t * queue;
150 int nr_queued;
151 struct request * req;
152 struct list_head * l;
153 int rc;
154
155 device = (struct tape_device *) data;
156 if (!device)
157 return;
158
159 spin_lock_irq(get_ccwdev_lock(device->cdev));
160 queue = device->blk_data.request_queue;
161
162 /* Count number of requests on ccw queue. */
163 nr_queued = 0;
164 list_for_each(l, &device->req_queue)
165 nr_queued++;
166 spin_unlock(get_ccwdev_lock(device->cdev));
167
168 spin_lock(&device->blk_data.request_queue_lock);
169 while (
170 !blk_queue_plugged(queue) &&
171 elv_next_request(queue) &&
172 nr_queued < TAPEBLOCK_MIN_REQUEUE
173 ) {
174 req = elv_next_request(queue);
175 if (rq_data_dir(req) == WRITE) {
176 DBF_EVENT(1, "TBLOCK: Rejecting write request\n");
177 blkdev_dequeue_request(req);
178 tapeblock_end_request(req, 0);
179 continue;
180 }
181 spin_unlock_irq(&device->blk_data.request_queue_lock);
182 rc = tapeblock_start_request(device, req);
183 spin_lock_irq(&device->blk_data.request_queue_lock);
184 blkdev_dequeue_request(req);
185 nr_queued++;
186 }
187 spin_unlock_irq(&device->blk_data.request_queue_lock);
188 atomic_set(&device->blk_data.requeue_scheduled, 0);
189}
190
191/*
192 * Tape request queue function. Called from ll_rw_blk.c
193 */
194static void
195tapeblock_request_fn(request_queue_t *queue)
196{
197 struct tape_device *device;
198
199 device = (struct tape_device *) queue->queuedata;
200 DBF_LH(6, "tapeblock_request_fn(device=%p)\n", device);
201 if (device == NULL)
202 BUG();
203
204 tapeblock_trigger_requeue(device);
205}
206
207/*
208 * This function is called for every new tapedevice
209 */
210int
211tapeblock_setup_device(struct tape_device * device)
212{
213 struct tape_blk_data * blkdat;
214 struct gendisk * disk;
215 int rc;
216
217 blkdat = &device->blk_data;
218 spin_lock_init(&blkdat->request_queue_lock);
219 atomic_set(&blkdat->requeue_scheduled, 0);
220
221 blkdat->request_queue = blk_init_queue(
222 tapeblock_request_fn,
223 &blkdat->request_queue_lock
224 );
225 if (!blkdat->request_queue)
226 return -ENOMEM;
227
228 elevator_exit(blkdat->request_queue->elevator);
229 rc = elevator_init(blkdat->request_queue, "noop");
230 if (rc)
231 goto cleanup_queue;
232
233 blk_queue_hardsect_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE);
234 blk_queue_max_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC);
235 blk_queue_max_phys_segments(blkdat->request_queue, -1L);
236 blk_queue_max_hw_segments(blkdat->request_queue, -1L);
237 blk_queue_max_segment_size(blkdat->request_queue, -1L);
238 blk_queue_segment_boundary(blkdat->request_queue, -1L);
239
240 disk = alloc_disk(1);
241 if (!disk) {
242 rc = -ENOMEM;
243 goto cleanup_queue;
244 }
245
246 disk->major = tapeblock_major;
247 disk->first_minor = device->first_minor;
248 disk->fops = &tapeblock_fops;
249 disk->private_data = tape_get_device_reference(device);
250 disk->queue = blkdat->request_queue;
251 set_capacity(disk, 0);
252 sprintf(disk->disk_name, "btibm%d",
253 device->first_minor / TAPE_MINORS_PER_DEV);
254
255 blkdat->disk = disk;
256 blkdat->medium_changed = 1;
257 blkdat->request_queue->queuedata = tape_get_device_reference(device);
258
259 add_disk(disk);
260
261 INIT_WORK(&blkdat->requeue_task, tapeblock_requeue,
262 tape_get_device_reference(device));
263
264 return 0;
265
266cleanup_queue:
267 blk_cleanup_queue(blkdat->request_queue);
268 blkdat->request_queue = NULL;
269
270 return rc;
271}
272
273void
274tapeblock_cleanup_device(struct tape_device *device)
275{
276 flush_scheduled_work();
277 device->blk_data.requeue_task.data = tape_put_device(device);
278
279 if (!device->blk_data.disk) {
280 PRINT_ERR("(%s): No gendisk to clean up!\n",
281 device->cdev->dev.bus_id);
282 goto cleanup_queue;
283 }
284
285 del_gendisk(device->blk_data.disk);
286 device->blk_data.disk->private_data =
287 tape_put_device(device->blk_data.disk->private_data);
288 put_disk(device->blk_data.disk);
289
290 device->blk_data.disk = NULL;
291cleanup_queue:
292 device->blk_data.request_queue->queuedata = tape_put_device(device);
293
294 blk_cleanup_queue(device->blk_data.request_queue);
295 device->blk_data.request_queue = NULL;
296}
297
298/*
299 * Detect number of blocks of the tape.
300 * FIXME: can we extent this to detect the blocks size as well ?
301 */
302static int
303tapeblock_revalidate_disk(struct gendisk *disk)
304{
305 struct tape_device * device;
306 unsigned int nr_of_blks;
307 int rc;
308
309 device = (struct tape_device *) disk->private_data;
310 if (!device)
311 BUG();
312
313 if (!device->blk_data.medium_changed)
314 return 0;
315
316 PRINT_INFO("Detecting media size...\n");
317 rc = tape_mtop(device, MTFSFM, 1);
318 if (rc)
319 return rc;
320
321 rc = tape_mtop(device, MTTELL, 1);
322 if (rc < 0)
323 return rc;
324
325 DBF_LH(3, "Image file ends at %d\n", rc);
326 nr_of_blks = rc;
327
328 /* This will fail for the first file. Catch the error by checking the
329 * position. */
330 tape_mtop(device, MTBSF, 1);
331
332 rc = tape_mtop(device, MTTELL, 1);
333 if (rc < 0)
334 return rc;
335
336 if (rc > nr_of_blks)
337 return -EINVAL;
338
339 DBF_LH(3, "Image file starts at %d\n", rc);
340 device->bof = rc;
341 nr_of_blks -= rc;
342
343 PRINT_INFO("Found %i blocks on media\n", nr_of_blks);
344 set_capacity(device->blk_data.disk,
345 nr_of_blks*(TAPEBLOCK_HSEC_SIZE/512));
346
347 device->blk_data.block_position = 0;
348 device->blk_data.medium_changed = 0;
349 return 0;
350}
351
352static int
353tapeblock_medium_changed(struct gendisk *disk)
354{
355 struct tape_device *device;
356
357 device = (struct tape_device *) disk->private_data;
358 DBF_LH(6, "tapeblock_medium_changed(%p) = %d\n",
359 device, device->blk_data.medium_changed);
360
361 return device->blk_data.medium_changed;
362}
363
364/*
365 * Block frontend tape device open function.
366 */
367static int
368tapeblock_open(struct inode *inode, struct file *filp)
369{
370 struct gendisk * disk;
371 struct tape_device * device;
372 int rc;
373
374 disk = inode->i_bdev->bd_disk;
375 device = tape_get_device_reference(disk->private_data);
376
377 if (device->required_tapemarks) {
378 DBF_EVENT(2, "TBLOCK: missing tapemarks\n");
379 PRINT_ERR("TBLOCK: Refusing to open tape with missing"
380 " end of file marks.\n");
381 rc = -EPERM;
382 goto put_device;
383 }
384
385 rc = tape_open(device);
386 if (rc)
387 goto put_device;
388
389 rc = tapeblock_revalidate_disk(disk);
390 if (rc)
391 goto release;
392
393 /*
394 * Note: The reference to <device> is hold until the release function
395 * is called.
396 */
397 tape_state_set(device, TS_BLKUSE);
398 return 0;
399
400release:
401 tape_release(device);
402 put_device:
403 tape_put_device(device);
404 return rc;
405}
406
407/*
408 * Block frontend tape device release function.
409 *
410 * Note: One reference to the tape device was made by the open function. So
411 * we just get the pointer here and release the reference.
412 */
413static int
414tapeblock_release(struct inode *inode, struct file *filp)
415{
416 struct gendisk *disk = inode->i_bdev->bd_disk;
417 struct tape_device *device = disk->private_data;
418
419 tape_state_set(device, TS_IN_USE);
420 tape_release(device);
421 tape_put_device(device);
422
423 return 0;
424}
425
426/*
427 * Support of some generic block device IOCTLs.
428 */
429static int
430tapeblock_ioctl(
431 struct inode * inode,
432 struct file * file,
433 unsigned int command,
434 unsigned long arg
435) {
436 int rc;
437 int minor;
438 struct gendisk *disk = inode->i_bdev->bd_disk;
439 struct tape_device *device = disk->private_data;
440
441 rc = 0;
442 disk = inode->i_bdev->bd_disk;
443 if (!disk)
444 BUG();
445 device = disk->private_data;
446 if (!device)
447 BUG();
448 minor = iminor(inode);
449
450 DBF_LH(6, "tapeblock_ioctl(0x%0x)\n", command);
451 DBF_LH(6, "device = %d:%d\n", tapeblock_major, minor);
452
453 switch (command) {
454 /* Refuse some IOCTL calls without complaining (mount). */
455 case 0x5310: /* CDROMMULTISESSION */
456 rc = -EINVAL;
457 break;
458 default:
459 PRINT_WARN("invalid ioctl 0x%x\n", command);
460 rc = -EINVAL;
461 }
462
463 return rc;
464}
465
466/*
467 * Initialize block device frontend.
468 */
469int
470tapeblock_init(void)
471{
472 int rc;
473
474 /* Register the tape major number to the kernel */
475 rc = register_blkdev(tapeblock_major, "tBLK");
476 if (rc < 0)
477 return rc;
478
479 if (tapeblock_major == 0)
480 tapeblock_major = rc;
481 PRINT_INFO("tape gets major %d for block device\n", tapeblock_major);
482 return 0;
483}
484
485/*
486 * Deregister major for block device frontend
487 */
488void
489tapeblock_exit(void)
490{
491 unregister_blkdev(tapeblock_major, "tBLK");
492}
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
new file mode 100644
index 000000000000..86262a13f7c6
--- /dev/null
+++ b/drivers/s390/char/tape_char.c
@@ -0,0 +1,492 @@
1/*
2 * drivers/s390/char/tape_char.c
3 * character device frontend for tape device driver
4 *
5 * S390 and zSeries version
6 * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Michael Holzheu <holzheu@de.ibm.com>
9 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
10 * Martin Schwidefsky <schwidefsky@de.ibm.com>
11 */
12
13#include <linux/config.h>
14#include <linux/module.h>
15#include <linux/types.h>
16#include <linux/proc_fs.h>
17#include <linux/mtio.h>
18
19#include <asm/uaccess.h>
20
21#define TAPE_DBF_AREA tape_core_dbf
22
23#include "tape.h"
24#include "tape_std.h"
25#include "tape_class.h"
26
27#define PRINTK_HEADER "TAPE_CHAR: "
28
29#define TAPECHAR_MAJOR 0 /* get dynamic major */
30
31/*
32 * file operation structure for tape character frontend
33 */
34static ssize_t tapechar_read(struct file *, char __user *, size_t, loff_t *);
35static ssize_t tapechar_write(struct file *, const char __user *, size_t, loff_t *);
36static int tapechar_open(struct inode *,struct file *);
37static int tapechar_release(struct inode *,struct file *);
38static int tapechar_ioctl(struct inode *, struct file *, unsigned int,
39 unsigned long);
40
41static struct file_operations tape_fops =
42{
43 .owner = THIS_MODULE,
44 .read = tapechar_read,
45 .write = tapechar_write,
46 .ioctl = tapechar_ioctl,
47 .open = tapechar_open,
48 .release = tapechar_release,
49};
50
51static int tapechar_major = TAPECHAR_MAJOR;
52
53/*
54 * This function is called for every new tapedevice
55 */
56int
57tapechar_setup_device(struct tape_device * device)
58{
59 char device_name[20];
60
61 sprintf(device_name, "ntibm%i", device->first_minor / 2);
62 device->nt = register_tape_dev(
63 &device->cdev->dev,
64 MKDEV(tapechar_major, device->first_minor),
65 &tape_fops,
66 device_name,
67 "non-rewinding"
68 );
69 device_name[0] = 'r';
70 device->rt = register_tape_dev(
71 &device->cdev->dev,
72 MKDEV(tapechar_major, device->first_minor + 1),
73 &tape_fops,
74 device_name,
75 "rewinding"
76 );
77
78 return 0;
79}
80
81void
82tapechar_cleanup_device(struct tape_device *device)
83{
84 unregister_tape_dev(device->rt);
85 device->rt = NULL;
86 unregister_tape_dev(device->nt);
87 device->nt = NULL;
88}
89
90/*
91 * Terminate write command (we write two TMs and skip backward over last)
92 * This ensures that the tape is always correctly terminated.
93 * When the user writes afterwards a new file, he will overwrite the
94 * second TM and therefore one TM will remain to separate the
95 * two files on the tape...
96 */
97static inline void
98tapechar_terminate_write(struct tape_device *device)
99{
100 if (tape_mtop(device, MTWEOF, 1) == 0 &&
101 tape_mtop(device, MTWEOF, 1) == 0)
102 tape_mtop(device, MTBSR, 1);
103}
104
105static inline int
106tapechar_check_idalbuffer(struct tape_device *device, size_t block_size)
107{
108 struct idal_buffer *new;
109
110 if (device->char_data.idal_buf != NULL &&
111 device->char_data.idal_buf->size == block_size)
112 return 0;
113
114 if (block_size > MAX_BLOCKSIZE) {
115 DBF_EVENT(3, "Invalid blocksize (%zd > %d)\n",
116 block_size, MAX_BLOCKSIZE);
117 PRINT_ERR("Invalid blocksize (%zd> %d)\n",
118 block_size, MAX_BLOCKSIZE);
119 return -EINVAL;
120 }
121
122 /* The current idal buffer is not correct. Allocate a new one. */
123 new = idal_buffer_alloc(block_size, 0);
124 if (new == NULL)
125 return -ENOMEM;
126
127 if (device->char_data.idal_buf != NULL)
128 idal_buffer_free(device->char_data.idal_buf);
129
130 device->char_data.idal_buf = new;
131
132 return 0;
133}
134
135/*
136 * Tape device read function
137 */
138ssize_t
139tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos)
140{
141 struct tape_device *device;
142 struct tape_request *request;
143 size_t block_size;
144 int rc;
145
146 DBF_EVENT(6, "TCHAR:read\n");
147 device = (struct tape_device *) filp->private_data;
148
149 /*
150 * If the tape isn't terminated yet, do it now. And since we then
151 * are at the end of the tape there wouldn't be anything to read
152 * anyways. So we return immediatly.
153 */
154 if(device->required_tapemarks) {
155 return tape_std_terminate_write(device);
156 }
157
158 /* Find out block size to use */
159 if (device->char_data.block_size != 0) {
160 if (count < device->char_data.block_size) {
161 DBF_EVENT(3, "TCHAR:read smaller than block "
162 "size was requested\n");
163 return -EINVAL;
164 }
165 block_size = device->char_data.block_size;
166 } else {
167 block_size = count;
168 }
169
170 rc = tapechar_check_idalbuffer(device, block_size);
171 if (rc)
172 return rc;
173
174#ifdef CONFIG_S390_TAPE_BLOCK
175 /* Changes position. */
176 device->blk_data.medium_changed = 1;
177#endif
178
179 DBF_EVENT(6, "TCHAR:nbytes: %lx\n", block_size);
180 /* Let the discipline build the ccw chain. */
181 request = device->discipline->read_block(device, block_size);
182 if (IS_ERR(request))
183 return PTR_ERR(request);
184 /* Execute it. */
185 rc = tape_do_io(device, request);
186 if (rc == 0) {
187 rc = block_size - request->rescnt;
188 DBF_EVENT(6, "TCHAR:rbytes: %x\n", rc);
189 filp->f_pos += rc;
190 /* Copy data from idal buffer to user space. */
191 if (idal_buffer_to_user(device->char_data.idal_buf,
192 data, rc) != 0)
193 rc = -EFAULT;
194 }
195 tape_free_request(request);
196 return rc;
197}
198
199/*
200 * Tape device write function
201 */
202ssize_t
203tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t *ppos)
204{
205 struct tape_device *device;
206 struct tape_request *request;
207 size_t block_size;
208 size_t written;
209 int nblocks;
210 int i, rc;
211
212 DBF_EVENT(6, "TCHAR:write\n");
213 device = (struct tape_device *) filp->private_data;
214 /* Find out block size and number of blocks */
215 if (device->char_data.block_size != 0) {
216 if (count < device->char_data.block_size) {
217 DBF_EVENT(3, "TCHAR:write smaller than block "
218 "size was requested\n");
219 return -EINVAL;
220 }
221 block_size = device->char_data.block_size;
222 nblocks = count / block_size;
223 } else {
224 block_size = count;
225 nblocks = 1;
226 }
227
228 rc = tapechar_check_idalbuffer(device, block_size);
229 if (rc)
230 return rc;
231
232#ifdef CONFIG_S390_TAPE_BLOCK
233 /* Changes position. */
234 device->blk_data.medium_changed = 1;
235#endif
236
237 DBF_EVENT(6,"TCHAR:nbytes: %lx\n", block_size);
238 DBF_EVENT(6, "TCHAR:nblocks: %x\n", nblocks);
239 /* Let the discipline build the ccw chain. */
240 request = device->discipline->write_block(device, block_size);
241 if (IS_ERR(request))
242 return PTR_ERR(request);
243 rc = 0;
244 written = 0;
245 for (i = 0; i < nblocks; i++) {
246 /* Copy data from user space to idal buffer. */
247 if (idal_buffer_from_user(device->char_data.idal_buf,
248 data, block_size)) {
249 rc = -EFAULT;
250 break;
251 }
252 rc = tape_do_io(device, request);
253 if (rc)
254 break;
255 DBF_EVENT(6, "TCHAR:wbytes: %lx\n",
256 block_size - request->rescnt);
257 filp->f_pos += block_size - request->rescnt;
258 written += block_size - request->rescnt;
259 if (request->rescnt != 0)
260 break;
261 data += block_size;
262 }
263 tape_free_request(request);
264 if (rc == -ENOSPC) {
265 /*
266 * Ok, the device has no more space. It has NOT written
267 * the block.
268 */
269 if (device->discipline->process_eov)
270 device->discipline->process_eov(device);
271 if (written > 0)
272 rc = 0;
273
274 }
275
276 /*
277 * After doing a write we always need two tapemarks to correctly
278 * terminate the tape (one to terminate the file, the second to
279 * flag the end of recorded data.
280 * Since process_eov positions the tape in front of the written
281 * tapemark it doesn't hurt to write two marks again.
282 */
283 if (!rc)
284 device->required_tapemarks = 2;
285
286 return rc ? rc : written;
287}
288
289/*
290 * Character frontend tape device open function.
291 */
292int
293tapechar_open (struct inode *inode, struct file *filp)
294{
295 struct tape_device *device;
296 int minor, rc;
297
298 DBF_EVENT(6, "TCHAR:open: %i:%i\n",
299 imajor(filp->f_dentry->d_inode),
300 iminor(filp->f_dentry->d_inode));
301
302 if (imajor(filp->f_dentry->d_inode) != tapechar_major)
303 return -ENODEV;
304
305 minor = iminor(filp->f_dentry->d_inode);
306 device = tape_get_device(minor / TAPE_MINORS_PER_DEV);
307 if (IS_ERR(device)) {
308 DBF_EVENT(3, "TCHAR:open: tape_get_device() failed\n");
309 return PTR_ERR(device);
310 }
311
312
313 rc = tape_open(device);
314 if (rc == 0) {
315 filp->private_data = device;
316 return nonseekable_open(inode, filp);
317 }
318 tape_put_device(device);
319
320 return rc;
321}
322
323/*
324 * Character frontend tape device release function.
325 */
326
327int
328tapechar_release(struct inode *inode, struct file *filp)
329{
330 struct tape_device *device;
331
332 DBF_EVENT(6, "TCHAR:release: %x\n", iminor(inode));
333 device = (struct tape_device *) filp->private_data;
334
335 /*
336 * If this is the rewinding tape minor then rewind. In that case we
337 * write all required tapemarks. Otherwise only one to terminate the
338 * file.
339 */
340 if ((iminor(inode) & 1) != 0) {
341 if (device->required_tapemarks)
342 tape_std_terminate_write(device);
343 tape_mtop(device, MTREW, 1);
344 } else {
345 if (device->required_tapemarks > 1) {
346 if (tape_mtop(device, MTWEOF, 1) == 0)
347 device->required_tapemarks--;
348 }
349 }
350
351 if (device->char_data.idal_buf != NULL) {
352 idal_buffer_free(device->char_data.idal_buf);
353 device->char_data.idal_buf = NULL;
354 }
355 tape_release(device);
356 filp->private_data = tape_put_device(device);
357
358 return 0;
359}
360
361/*
362 * Tape device io controls.
363 */
364static int
365tapechar_ioctl(struct inode *inp, struct file *filp,
366 unsigned int no, unsigned long data)
367{
368 struct tape_device *device;
369 int rc;
370
371 DBF_EVENT(6, "TCHAR:ioct\n");
372
373 device = (struct tape_device *) filp->private_data;
374
375 if (no == MTIOCTOP) {
376 struct mtop op;
377
378 if (copy_from_user(&op, (char __user *) data, sizeof(op)) != 0)
379 return -EFAULT;
380 if (op.mt_count < 0)
381 return -EINVAL;
382
383 /*
384 * Operations that change tape position should write final
385 * tapemarks.
386 */
387 switch (op.mt_op) {
388 case MTFSF:
389 case MTBSF:
390 case MTFSR:
391 case MTBSR:
392 case MTREW:
393 case MTOFFL:
394 case MTEOM:
395 case MTRETEN:
396 case MTBSFM:
397 case MTFSFM:
398 case MTSEEK:
399#ifdef CONFIG_S390_TAPE_BLOCK
400 device->blk_data.medium_changed = 1;
401#endif
402 if (device->required_tapemarks)
403 tape_std_terminate_write(device);
404 default:
405 ;
406 }
407 rc = tape_mtop(device, op.mt_op, op.mt_count);
408
409 if (op.mt_op == MTWEOF && rc == 0) {
410 if (op.mt_count > device->required_tapemarks)
411 device->required_tapemarks = 0;
412 else
413 device->required_tapemarks -= op.mt_count;
414 }
415 return rc;
416 }
417 if (no == MTIOCPOS) {
418 /* MTIOCPOS: query the tape position. */
419 struct mtpos pos;
420
421 rc = tape_mtop(device, MTTELL, 1);
422 if (rc < 0)
423 return rc;
424 pos.mt_blkno = rc;
425 if (copy_to_user((char __user *) data, &pos, sizeof(pos)) != 0)
426 return -EFAULT;
427 return 0;
428 }
429 if (no == MTIOCGET) {
430 /* MTIOCGET: query the tape drive status. */
431 struct mtget get;
432
433 memset(&get, 0, sizeof(get));
434 get.mt_type = MT_ISUNKNOWN;
435 get.mt_resid = 0 /* device->devstat.rescnt */;
436 get.mt_dsreg = device->tape_state;
437 /* FIXME: mt_gstat, mt_erreg, mt_fileno */
438 get.mt_gstat = 0;
439 get.mt_erreg = 0;
440 get.mt_fileno = 0;
441 get.mt_gstat = device->tape_generic_status;
442
443 if (device->medium_state == MS_LOADED) {
444 rc = tape_mtop(device, MTTELL, 1);
445
446 if (rc < 0)
447 return rc;
448
449 if (rc == 0)
450 get.mt_gstat |= GMT_BOT(~0);
451
452 get.mt_blkno = rc;
453 }
454
455 if (copy_to_user((char __user *) data, &get, sizeof(get)) != 0)
456 return -EFAULT;
457
458 return 0;
459 }
460 /* Try the discipline ioctl function. */
461 if (device->discipline->ioctl_fn == NULL)
462 return -EINVAL;
463 return device->discipline->ioctl_fn(device, no, data);
464}
465
466/*
467 * Initialize character device frontend.
468 */
469int
470tapechar_init (void)
471{
472 dev_t dev;
473
474 if (alloc_chrdev_region(&dev, 0, 256, "tape") != 0)
475 return -1;
476
477 tapechar_major = MAJOR(dev);
478 PRINT_INFO("tape gets major %d for character devices\n", MAJOR(dev));
479
480 return 0;
481}
482
483/*
484 * cleanup
485 */
486void
487tapechar_exit(void)
488{
489 PRINT_INFO("tape releases major %d for character devices\n",
490 tapechar_major);
491 unregister_chrdev_region(MKDEV(tapechar_major, 0), 256);
492}
diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c
new file mode 100644
index 000000000000..0f8ffd4167ca
--- /dev/null
+++ b/drivers/s390/char/tape_class.c
@@ -0,0 +1,126 @@
1/*
2 * (C) Copyright IBM Corp. 2004
3 * tape_class.c ($Revision: 1.8 $)
4 *
5 * Tape class device support
6 *
7 * Author: Stefan Bader <shbader@de.ibm.com>
8 * Based on simple class device code by Greg K-H
9 */
10#include "tape_class.h"
11
12MODULE_AUTHOR("Stefan Bader <shbader@de.ibm.com>");
13MODULE_DESCRIPTION(
14 "(C) Copyright IBM Corp. 2004 All Rights Reserved.\n"
15 "tape_class.c ($Revision: 1.8 $)"
16);
17MODULE_LICENSE("GPL");
18
19struct class_simple *tape_class;
20
21/*
22 * Register a tape device and return a pointer to the cdev structure.
23 *
24 * device
25 * The pointer to the struct device of the physical (base) device.
26 * drivername
27 * The pointer to the drivers name for it's character devices.
28 * dev
29 * The intended major/minor number. The major number may be 0 to
30 * get a dynamic major number.
31 * fops
32 * The pointer to the drivers file operations for the tape device.
33 * devname
34 * The pointer to the name of the character device.
35 */
36struct tape_class_device *register_tape_dev(
37 struct device * device,
38 dev_t dev,
39 struct file_operations *fops,
40 char * device_name,
41 char * mode_name)
42{
43 struct tape_class_device * tcd;
44 int rc;
45 char * s;
46
47 tcd = kmalloc(sizeof(struct tape_class_device), GFP_KERNEL);
48 if (!tcd)
49 return ERR_PTR(-ENOMEM);
50
51 memset(tcd, 0, sizeof(struct tape_class_device));
52 strncpy(tcd->device_name, device_name, TAPECLASS_NAME_LEN);
53 for (s = strchr(tcd->device_name, '/'); s; s = strchr(s, '/'))
54 *s = '!';
55 strncpy(tcd->mode_name, mode_name, TAPECLASS_NAME_LEN);
56 for (s = strchr(tcd->mode_name, '/'); s; s = strchr(s, '/'))
57 *s = '!';
58
59 tcd->char_device = cdev_alloc();
60 if (!tcd->char_device) {
61 rc = -ENOMEM;
62 goto fail_with_tcd;
63 }
64
65 tcd->char_device->owner = fops->owner;
66 tcd->char_device->ops = fops;
67 tcd->char_device->dev = dev;
68
69 rc = cdev_add(tcd->char_device, tcd->char_device->dev, 1);
70 if (rc)
71 goto fail_with_cdev;
72
73 tcd->class_device = class_simple_device_add(
74 tape_class,
75 tcd->char_device->dev,
76 device,
77 "%s", tcd->device_name
78 );
79 sysfs_create_link(
80 &device->kobj,
81 &tcd->class_device->kobj,
82 tcd->mode_name
83 );
84
85 return tcd;
86
87fail_with_cdev:
88 cdev_del(tcd->char_device);
89
90fail_with_tcd:
91 kfree(tcd);
92
93 return ERR_PTR(rc);
94}
95EXPORT_SYMBOL(register_tape_dev);
96
97void unregister_tape_dev(struct tape_class_device *tcd)
98{
99 if (tcd != NULL && !IS_ERR(tcd)) {
100 sysfs_remove_link(
101 &tcd->class_device->dev->kobj,
102 tcd->mode_name
103 );
104 class_simple_device_remove(tcd->char_device->dev);
105 cdev_del(tcd->char_device);
106 kfree(tcd);
107 }
108}
109EXPORT_SYMBOL(unregister_tape_dev);
110
111
112static int __init tape_init(void)
113{
114 tape_class = class_simple_create(THIS_MODULE, "tape390");
115
116 return 0;
117}
118
119static void __exit tape_exit(void)
120{
121 class_simple_destroy(tape_class);
122 tape_class = NULL;
123}
124
125postcore_initcall(tape_init);
126module_exit(tape_exit);
diff --git a/drivers/s390/char/tape_class.h b/drivers/s390/char/tape_class.h
new file mode 100644
index 000000000000..33133ad00ba2
--- /dev/null
+++ b/drivers/s390/char/tape_class.h
@@ -0,0 +1,61 @@
1/*
2 * (C) Copyright IBM Corp. 2004 All Rights Reserved.
3 * tape_class.h ($Revision: 1.4 $)
4 *
5 * Tape class device support
6 *
7 * Author: Stefan Bader <shbader@de.ibm.com>
8 * Based on simple class device code by Greg K-H
9 */
10#ifndef __TAPE_CLASS_H__
11#define __TAPE_CLASS_H__
12
13#include <linux/init.h>
14#include <linux/module.h>
15#include <linux/fs.h>
16#include <linux/major.h>
17#include <linux/kobject.h>
18#include <linux/kobj_map.h>
19#include <linux/cdev.h>
20
21#include <linux/device.h>
22#include <linux/kdev_t.h>
23
24#define TAPECLASS_NAME_LEN 32
25
26struct tape_class_device {
27 struct cdev * char_device;
28 struct class_device * class_device;
29 char device_name[TAPECLASS_NAME_LEN];
30 char mode_name[TAPECLASS_NAME_LEN];
31};
32
33/*
34 * Register a tape device and return a pointer to the tape class device
35 * created by the call.
36 *
37 * device
38 * The pointer to the struct device of the physical (base) device.
39 * dev
40 * The intended major/minor number. The major number may be 0 to
41 * get a dynamic major number.
42 * fops
43 * The pointer to the drivers file operations for the tape device.
44 * device_name
45 * Pointer to the logical device name (will also be used as kobject name
46 * of the cdev). This can also be called the name of the tape class
47 * device.
48 * mode_name
49 * Points to the name of the tape mode. This creates a link with that
50 * name from the physical device to the logical device (class).
51 */
52struct tape_class_device *register_tape_dev(
53 struct device * device,
54 dev_t dev,
55 struct file_operations *fops,
56 char * device_name,
57 char * node_name
58);
59void unregister_tape_dev(struct tape_class_device *tcd);
60
61#endif /* __TAPE_CLASS_H__ */
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
new file mode 100644
index 000000000000..e51046ab8adc
--- /dev/null
+++ b/drivers/s390/char/tape_core.c
@@ -0,0 +1,1242 @@
1/*
2 * drivers/s390/char/tape_core.c
3 * basic function of the tape device driver
4 *
5 * S390 and zSeries version
6 * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Michael Holzheu <holzheu@de.ibm.com>
9 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
10 * Martin Schwidefsky <schwidefsky@de.ibm.com>
11 */
12
13#include <linux/config.h>
14#include <linux/module.h>
15#include <linux/init.h> // for kernel parameters
16#include <linux/kmod.h> // for requesting modules
17#include <linux/spinlock.h> // for locks
18#include <linux/vmalloc.h>
19#include <linux/list.h>
20
21#include <asm/types.h> // for variable types
22
23#define TAPE_DBF_AREA tape_core_dbf
24
25#include "tape.h"
26#include "tape_std.h"
27
28#define PRINTK_HEADER "TAPE_CORE: "
29
30static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *);
31static void __tape_remove_request(struct tape_device *, struct tape_request *);
32
33/*
34 * One list to contain all tape devices of all disciplines, so
35 * we can assign the devices to minor numbers of the same major
36 * The list is protected by the rwlock
37 */
38static struct list_head tape_device_list = LIST_HEAD_INIT(tape_device_list);
39static DEFINE_RWLOCK(tape_device_lock);
40
41/*
42 * Pointer to debug area.
43 */
44debug_info_t *TAPE_DBF_AREA = NULL;
45EXPORT_SYMBOL(TAPE_DBF_AREA);
46
47/*
48 * Printable strings for tape enumerations.
49 */
50const char *tape_state_verbose[TS_SIZE] =
51{
52 [TS_UNUSED] = "UNUSED",
53 [TS_IN_USE] = "IN_USE",
54 [TS_BLKUSE] = "BLKUSE",
55 [TS_INIT] = "INIT ",
56 [TS_NOT_OPER] = "NOT_OP"
57};
58
59const char *tape_op_verbose[TO_SIZE] =
60{
61 [TO_BLOCK] = "BLK", [TO_BSB] = "BSB",
62 [TO_BSF] = "BSF", [TO_DSE] = "DSE",
63 [TO_FSB] = "FSB", [TO_FSF] = "FSF",
64 [TO_LBL] = "LBL", [TO_NOP] = "NOP",
65 [TO_RBA] = "RBA", [TO_RBI] = "RBI",
66 [TO_RFO] = "RFO", [TO_REW] = "REW",
67 [TO_RUN] = "RUN", [TO_WRI] = "WRI",
68 [TO_WTM] = "WTM", [TO_MSEN] = "MSN",
69 [TO_LOAD] = "LOA", [TO_READ_CONFIG] = "RCF",
70 [TO_READ_ATTMSG] = "RAT",
71 [TO_DIS] = "DIS", [TO_ASSIGN] = "ASS",
72 [TO_UNASSIGN] = "UAS"
73};
74
75static inline int
76busid_to_int(char *bus_id)
77{
78 int dec;
79 int d;
80 char * s;
81
82 for(s = bus_id, d = 0; *s != '\0' && *s != '.'; s++)
83 d = (d * 10) + (*s - '0');
84 dec = d;
85 for(s++, d = 0; *s != '\0' && *s != '.'; s++)
86 d = (d * 10) + (*s - '0');
87 dec = (dec << 8) + d;
88
89 for(s++; *s != '\0'; s++) {
90 if (*s >= '0' && *s <= '9') {
91 d = *s - '0';
92 } else if (*s >= 'a' && *s <= 'f') {
93 d = *s - 'a' + 10;
94 } else {
95 d = *s - 'A' + 10;
96 }
97 dec = (dec << 4) + d;
98 }
99
100 return dec;
101}
102
103/*
104 * Some channel attached tape specific attributes.
105 *
106 * FIXME: In the future the first_minor and blocksize attribute should be
107 * replaced by a link to the cdev tree.
108 */
109static ssize_t
110tape_medium_state_show(struct device *dev, char *buf)
111{
112 struct tape_device *tdev;
113
114 tdev = (struct tape_device *) dev->driver_data;
115 return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->medium_state);
116}
117
118static
119DEVICE_ATTR(medium_state, 0444, tape_medium_state_show, NULL);
120
121static ssize_t
122tape_first_minor_show(struct device *dev, char *buf)
123{
124 struct tape_device *tdev;
125
126 tdev = (struct tape_device *) dev->driver_data;
127 return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->first_minor);
128}
129
130static
131DEVICE_ATTR(first_minor, 0444, tape_first_minor_show, NULL);
132
133static ssize_t
134tape_state_show(struct device *dev, char *buf)
135{
136 struct tape_device *tdev;
137
138 tdev = (struct tape_device *) dev->driver_data;
139 return scnprintf(buf, PAGE_SIZE, "%s\n", (tdev->first_minor < 0) ?
140 "OFFLINE" : tape_state_verbose[tdev->tape_state]);
141}
142
143static
144DEVICE_ATTR(state, 0444, tape_state_show, NULL);
145
146static ssize_t
147tape_operation_show(struct device *dev, char *buf)
148{
149 struct tape_device *tdev;
150 ssize_t rc;
151
152 tdev = (struct tape_device *) dev->driver_data;
153 if (tdev->first_minor < 0)
154 return scnprintf(buf, PAGE_SIZE, "N/A\n");
155
156 spin_lock_irq(get_ccwdev_lock(tdev->cdev));
157 if (list_empty(&tdev->req_queue))
158 rc = scnprintf(buf, PAGE_SIZE, "---\n");
159 else {
160 struct tape_request *req;
161
162 req = list_entry(tdev->req_queue.next, struct tape_request,
163 list);
164 rc = scnprintf(buf,PAGE_SIZE, "%s\n", tape_op_verbose[req->op]);
165 }
166 spin_unlock_irq(get_ccwdev_lock(tdev->cdev));
167 return rc;
168}
169
170static
171DEVICE_ATTR(operation, 0444, tape_operation_show, NULL);
172
173static ssize_t
174tape_blocksize_show(struct device *dev, char *buf)
175{
176 struct tape_device *tdev;
177
178 tdev = (struct tape_device *) dev->driver_data;
179
180 return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->char_data.block_size);
181}
182
183static
184DEVICE_ATTR(blocksize, 0444, tape_blocksize_show, NULL);
185
186static struct attribute *tape_attrs[] = {
187 &dev_attr_medium_state.attr,
188 &dev_attr_first_minor.attr,
189 &dev_attr_state.attr,
190 &dev_attr_operation.attr,
191 &dev_attr_blocksize.attr,
192 NULL
193};
194
195static struct attribute_group tape_attr_group = {
196 .attrs = tape_attrs,
197};
198
199/*
200 * Tape state functions
201 */
202void
203tape_state_set(struct tape_device *device, enum tape_state newstate)
204{
205 const char *str;
206
207 if (device->tape_state == TS_NOT_OPER) {
208 DBF_EVENT(3, "ts_set err: not oper\n");
209 return;
210 }
211 DBF_EVENT(4, "ts. dev: %x\n", device->first_minor);
212 if (device->tape_state < TO_SIZE && device->tape_state >= 0)
213 str = tape_state_verbose[device->tape_state];
214 else
215 str = "UNKNOWN TS";
216 DBF_EVENT(4, "old ts: %s\n", str);
217 if (device->tape_state < TO_SIZE && device->tape_state >=0 )
218 str = tape_state_verbose[device->tape_state];
219 else
220 str = "UNKNOWN TS";
221 DBF_EVENT(4, "%s\n", str);
222 DBF_EVENT(4, "new ts:\t\n");
223 if (newstate < TO_SIZE && newstate >= 0)
224 str = tape_state_verbose[newstate];
225 else
226 str = "UNKNOWN TS";
227 DBF_EVENT(4, "%s\n", str);
228 device->tape_state = newstate;
229 wake_up(&device->state_change_wq);
230}
231
232void
233tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate)
234{
235 if (device->medium_state == newstate)
236 return;
237 switch(newstate){
238 case MS_UNLOADED:
239 device->tape_generic_status |= GMT_DR_OPEN(~0);
240 PRINT_INFO("(%s): Tape is unloaded\n",
241 device->cdev->dev.bus_id);
242 break;
243 case MS_LOADED:
244 device->tape_generic_status &= ~GMT_DR_OPEN(~0);
245 PRINT_INFO("(%s): Tape has been mounted\n",
246 device->cdev->dev.bus_id);
247 break;
248 default:
249 // print nothing
250 break;
251 }
252 device->medium_state = newstate;
253 wake_up(&device->state_change_wq);
254}
255
256/*
257 * Stop running ccw. Has to be called with the device lock held.
258 */
259static inline int
260__tape_halt_io(struct tape_device *device, struct tape_request *request)
261{
262 int retries;
263 int rc;
264
265 /* Check if interrupt has already been processed */
266 if (request->callback == NULL)
267 return 0;
268
269 rc = 0;
270 for (retries = 0; retries < 5; retries++) {
271 rc = ccw_device_clear(device->cdev, (long) request);
272
273 if (rc == 0) { /* Termination successful */
274 request->rc = -EIO;
275 request->status = TAPE_REQUEST_DONE;
276 return 0;
277 }
278
279 if (rc == -ENODEV)
280 DBF_EXCEPTION(2, "device gone, retry\n");
281 else if (rc == -EIO)
282 DBF_EXCEPTION(2, "I/O error, retry\n");
283 else if (rc == -EBUSY)
284 DBF_EXCEPTION(2, "device busy, retry late\n");
285 else
286 BUG();
287 }
288
289 return rc;
290}
291
292/*
293 * Add device into the sorted list, giving it the first
294 * available minor number.
295 */
296static int
297tape_assign_minor(struct tape_device *device)
298{
299 struct tape_device *tmp;
300 int minor;
301
302 minor = 0;
303 write_lock(&tape_device_lock);
304 list_for_each_entry(tmp, &tape_device_list, node) {
305 if (minor < tmp->first_minor)
306 break;
307 minor += TAPE_MINORS_PER_DEV;
308 }
309 if (minor >= 256) {
310 write_unlock(&tape_device_lock);
311 return -ENODEV;
312 }
313 device->first_minor = minor;
314 list_add_tail(&device->node, &tmp->node);
315 write_unlock(&tape_device_lock);
316 return 0;
317}
318
319/* remove device from the list */
320static void
321tape_remove_minor(struct tape_device *device)
322{
323 write_lock(&tape_device_lock);
324 list_del_init(&device->node);
325 device->first_minor = -1;
326 write_unlock(&tape_device_lock);
327}
328
329/*
330 * Set a device online.
331 *
332 * This function is called by the common I/O layer to move a device from the
333 * detected but offline into the online state.
334 * If we return an error (RC < 0) the device remains in the offline state. This
335 * can happen if the device is assigned somewhere else, for example.
336 */
337int
338tape_generic_online(struct tape_device *device,
339 struct tape_discipline *discipline)
340{
341 int rc;
342
343 DBF_LH(6, "tape_enable_device(%p, %p)\n", device, discipline);
344
345 if (device->tape_state != TS_INIT) {
346 DBF_LH(3, "Tapestate not INIT (%d)\n", device->tape_state);
347 return -EINVAL;
348 }
349
350 /* Let the discipline have a go at the device. */
351 device->discipline = discipline;
352 if (!try_module_get(discipline->owner)) {
353 PRINT_ERR("Cannot get module. Module gone.\n");
354 return -EINVAL;
355 }
356
357 rc = discipline->setup_device(device);
358 if (rc)
359 goto out;
360 rc = tape_assign_minor(device);
361 if (rc)
362 goto out_discipline;
363
364 rc = tapechar_setup_device(device);
365 if (rc)
366 goto out_minor;
367 rc = tapeblock_setup_device(device);
368 if (rc)
369 goto out_char;
370
371 tape_state_set(device, TS_UNUSED);
372
373 DBF_LH(3, "(%08x): Drive set online\n", device->cdev_id);
374
375 return 0;
376
377out_char:
378 tapechar_cleanup_device(device);
379out_discipline:
380 device->discipline->cleanup_device(device);
381 device->discipline = NULL;
382out_minor:
383 tape_remove_minor(device);
384out:
385 module_put(discipline->owner);
386 return rc;
387}
388
389static inline void
390tape_cleanup_device(struct tape_device *device)
391{
392 tapeblock_cleanup_device(device);
393 tapechar_cleanup_device(device);
394 device->discipline->cleanup_device(device);
395 module_put(device->discipline->owner);
396 tape_remove_minor(device);
397 tape_med_state_set(device, MS_UNKNOWN);
398}
399
400/*
401 * Set device offline.
402 *
403 * Called by the common I/O layer if the drive should set offline on user
404 * request. We may prevent this by returning an error.
405 * Manual offline is only allowed while the drive is not in use.
406 */
407int
408tape_generic_offline(struct tape_device *device)
409{
410 if (!device) {
411 PRINT_ERR("tape_generic_offline: no such device\n");
412 return -ENODEV;
413 }
414
415 DBF_LH(3, "(%08x): tape_generic_offline(%p)\n",
416 device->cdev_id, device);
417
418 spin_lock_irq(get_ccwdev_lock(device->cdev));
419 switch (device->tape_state) {
420 case TS_INIT:
421 case TS_NOT_OPER:
422 spin_unlock_irq(get_ccwdev_lock(device->cdev));
423 break;
424 case TS_UNUSED:
425 tape_state_set(device, TS_INIT);
426 spin_unlock_irq(get_ccwdev_lock(device->cdev));
427 tape_cleanup_device(device);
428 break;
429 default:
430 DBF_EVENT(3, "(%08x): Set offline failed "
431 "- drive in use.\n",
432 device->cdev_id);
433 PRINT_WARN("(%s): Set offline failed "
434 "- drive in use.\n",
435 device->cdev->dev.bus_id);
436 spin_unlock_irq(get_ccwdev_lock(device->cdev));
437 return -EBUSY;
438 }
439
440 DBF_LH(3, "(%08x): Drive set offline.\n", device->cdev_id);
441 return 0;
442}
443
444/*
445 * Allocate memory for a new device structure.
446 */
447static struct tape_device *
448tape_alloc_device(void)
449{
450 struct tape_device *device;
451
452 device = (struct tape_device *)
453 kmalloc(sizeof(struct tape_device), GFP_KERNEL);
454 if (device == NULL) {
455 DBF_EXCEPTION(2, "ti:no mem\n");
456 PRINT_INFO ("can't allocate memory for "
457 "tape info structure\n");
458 return ERR_PTR(-ENOMEM);
459 }
460 memset(device, 0, sizeof(struct tape_device));
461 device->modeset_byte = (char *) kmalloc(1, GFP_KERNEL | GFP_DMA);
462 if (device->modeset_byte == NULL) {
463 DBF_EXCEPTION(2, "ti:no mem\n");
464 PRINT_INFO("can't allocate memory for modeset byte\n");
465 kfree(device);
466 return ERR_PTR(-ENOMEM);
467 }
468 INIT_LIST_HEAD(&device->req_queue);
469 INIT_LIST_HEAD(&device->node);
470 init_waitqueue_head(&device->state_change_wq);
471 device->tape_state = TS_INIT;
472 device->medium_state = MS_UNKNOWN;
473 *device->modeset_byte = 0;
474 device->first_minor = -1;
475 atomic_set(&device->ref_count, 1);
476
477 return device;
478}
479
480/*
481 * Get a reference to an existing device structure. This will automatically
482 * increment the reference count.
483 */
484struct tape_device *
485tape_get_device_reference(struct tape_device *device)
486{
487 DBF_EVENT(4, "tape_get_device_reference(%p) = %i\n", device,
488 atomic_inc_return(&device->ref_count));
489
490 return device;
491}
492
493/*
494 * Decrease the reference counter of a devices structure. If the
495 * reference counter reaches zero free the device structure.
496 * The function returns a NULL pointer to be used by the caller
497 * for clearing reference pointers.
498 */
499struct tape_device *
500tape_put_device(struct tape_device *device)
501{
502 int remain;
503
504 remain = atomic_dec_return(&device->ref_count);
505 if (remain > 0) {
506 DBF_EVENT(4, "tape_put_device(%p) -> %i\n", device, remain);
507 } else {
508 if (remain < 0) {
509 DBF_EVENT(4, "put device without reference\n");
510 PRINT_ERR("put device without reference\n");
511 } else {
512 DBF_EVENT(4, "tape_free_device(%p)\n", device);
513 kfree(device->modeset_byte);
514 kfree(device);
515 }
516 }
517
518 return NULL;
519}
520
521/*
522 * Find tape device by a device index.
523 */
524struct tape_device *
525tape_get_device(int devindex)
526{
527 struct tape_device *device, *tmp;
528
529 device = ERR_PTR(-ENODEV);
530 read_lock(&tape_device_lock);
531 list_for_each_entry(tmp, &tape_device_list, node) {
532 if (tmp->first_minor / TAPE_MINORS_PER_DEV == devindex) {
533 device = tape_get_device_reference(tmp);
534 break;
535 }
536 }
537 read_unlock(&tape_device_lock);
538 return device;
539}
540
541/*
542 * Driverfs tape probe function.
543 */
544int
545tape_generic_probe(struct ccw_device *cdev)
546{
547 struct tape_device *device;
548
549 device = tape_alloc_device();
550 if (IS_ERR(device))
551 return -ENODEV;
552 PRINT_INFO("tape device %s found\n", cdev->dev.bus_id);
553 cdev->dev.driver_data = device;
554 device->cdev = cdev;
555 device->cdev_id = busid_to_int(cdev->dev.bus_id);
556 cdev->handler = __tape_do_irq;
557
558 ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP);
559 sysfs_create_group(&cdev->dev.kobj, &tape_attr_group);
560
561 return 0;
562}
563
564static inline void
565__tape_discard_requests(struct tape_device *device)
566{
567 struct tape_request * request;
568 struct list_head * l, *n;
569
570 list_for_each_safe(l, n, &device->req_queue) {
571 request = list_entry(l, struct tape_request, list);
572 if (request->status == TAPE_REQUEST_IN_IO)
573 request->status = TAPE_REQUEST_DONE;
574 list_del(&request->list);
575
576 /* Decrease ref_count for removed request. */
577 request->device = tape_put_device(device);
578 request->rc = -EIO;
579 if (request->callback != NULL)
580 request->callback(request, request->callback_data);
581 }
582}
583
584/*
585 * Driverfs tape remove function.
586 *
587 * This function is called whenever the common I/O layer detects the device
588 * gone. This can happen at any time and we cannot refuse.
589 */
590void
591tape_generic_remove(struct ccw_device *cdev)
592{
593 struct tape_device * device;
594
595 device = cdev->dev.driver_data;
596 if (!device) {
597 PRINT_ERR("No device pointer in tape_generic_remove!\n");
598 return;
599 }
600 DBF_LH(3, "(%08x): tape_generic_remove(%p)\n", device->cdev_id, cdev);
601
602 spin_lock_irq(get_ccwdev_lock(device->cdev));
603 switch (device->tape_state) {
604 case TS_INIT:
605 tape_state_set(device, TS_NOT_OPER);
606 case TS_NOT_OPER:
607 /*
608 * Nothing to do.
609 */
610 spin_unlock_irq(get_ccwdev_lock(device->cdev));
611 break;
612 case TS_UNUSED:
613 /*
614 * Need only to release the device.
615 */
616 tape_state_set(device, TS_NOT_OPER);
617 spin_unlock_irq(get_ccwdev_lock(device->cdev));
618 tape_cleanup_device(device);
619 break;
620 default:
621 /*
622 * There may be requests on the queue. We will not get
623 * an interrupt for a request that was running. So we
624 * just post them all as I/O errors.
625 */
626 DBF_EVENT(3, "(%08x): Drive in use vanished!\n",
627 device->cdev_id);
628 PRINT_WARN("(%s): Drive in use vanished - "
629 "expect trouble!\n",
630 device->cdev->dev.bus_id);
631 PRINT_WARN("State was %i\n", device->tape_state);
632 tape_state_set(device, TS_NOT_OPER);
633 __tape_discard_requests(device);
634 spin_unlock_irq(get_ccwdev_lock(device->cdev));
635 tape_cleanup_device(device);
636 }
637
638 if (cdev->dev.driver_data != NULL) {
639 sysfs_remove_group(&cdev->dev.kobj, &tape_attr_group);
640 cdev->dev.driver_data = tape_put_device(cdev->dev.driver_data);
641 }
642}
643
644/*
645 * Allocate a new tape ccw request
646 */
647struct tape_request *
648tape_alloc_request(int cplength, int datasize)
649{
650 struct tape_request *request;
651
652 if (datasize > PAGE_SIZE || (cplength*sizeof(struct ccw1)) > PAGE_SIZE)
653 BUG();
654
655 DBF_LH(6, "tape_alloc_request(%d, %d)\n", cplength, datasize);
656
657 request = (struct tape_request *) kmalloc(sizeof(struct tape_request),
658 GFP_KERNEL);
659 if (request == NULL) {
660 DBF_EXCEPTION(1, "cqra nomem\n");
661 return ERR_PTR(-ENOMEM);
662 }
663 memset(request, 0, sizeof(struct tape_request));
664 /* allocate channel program */
665 if (cplength > 0) {
666 request->cpaddr = kmalloc(cplength*sizeof(struct ccw1),
667 GFP_ATOMIC | GFP_DMA);
668 if (request->cpaddr == NULL) {
669 DBF_EXCEPTION(1, "cqra nomem\n");
670 kfree(request);
671 return ERR_PTR(-ENOMEM);
672 }
673 memset(request->cpaddr, 0, cplength*sizeof(struct ccw1));
674 }
675 /* alloc small kernel buffer */
676 if (datasize > 0) {
677 request->cpdata = kmalloc(datasize, GFP_KERNEL | GFP_DMA);
678 if (request->cpdata == NULL) {
679 DBF_EXCEPTION(1, "cqra nomem\n");
680 if (request->cpaddr != NULL)
681 kfree(request->cpaddr);
682 kfree(request);
683 return ERR_PTR(-ENOMEM);
684 }
685 memset(request->cpdata, 0, datasize);
686 }
687 DBF_LH(6, "New request %p(%p/%p)\n", request, request->cpaddr,
688 request->cpdata);
689
690 return request;
691}
692
693/*
694 * Free tape ccw request
695 */
696void
697tape_free_request (struct tape_request * request)
698{
699 DBF_LH(6, "Free request %p\n", request);
700
701 if (request->device != NULL) {
702 request->device = tape_put_device(request->device);
703 }
704 if (request->cpdata != NULL)
705 kfree(request->cpdata);
706 if (request->cpaddr != NULL)
707 kfree(request->cpaddr);
708 kfree(request);
709}
710
711static inline void
712__tape_do_io_list(struct tape_device *device)
713{
714 struct list_head *l, *n;
715 struct tape_request *request;
716 int rc;
717
718 DBF_LH(6, "__tape_do_io_list(%p)\n", device);
719 /*
720 * Try to start each request on request queue until one is
721 * started successful.
722 */
723 list_for_each_safe(l, n, &device->req_queue) {
724 request = list_entry(l, struct tape_request, list);
725#ifdef CONFIG_S390_TAPE_BLOCK
726 if (request->op == TO_BLOCK)
727 device->discipline->check_locate(device, request);
728#endif
729 rc = ccw_device_start(device->cdev, request->cpaddr,
730 (unsigned long) request, 0x00,
731 request->options);
732 if (rc == 0) {
733 request->status = TAPE_REQUEST_IN_IO;
734 break;
735 }
736 /* Start failed. Remove request and indicate failure. */
737 DBF_EVENT(1, "tape: DOIO failed with er = %i\n", rc);
738
739 /* Set ending status and do callback. */
740 request->rc = rc;
741 request->status = TAPE_REQUEST_DONE;
742 __tape_remove_request(device, request);
743 }
744}
745
746static void
747__tape_remove_request(struct tape_device *device, struct tape_request *request)
748{
749 /* Remove from request queue. */
750 list_del(&request->list);
751
752 /* Do callback. */
753 if (request->callback != NULL)
754 request->callback(request, request->callback_data);
755
756 /* Start next request. */
757 if (!list_empty(&device->req_queue))
758 __tape_do_io_list(device);
759}
760
761/*
762 * Write sense data to console/dbf
763 */
764void
765tape_dump_sense(struct tape_device* device, struct tape_request *request,
766 struct irb *irb)
767{
768 unsigned int *sptr;
769
770 PRINT_INFO("-------------------------------------------------\n");
771 PRINT_INFO("DSTAT : %02x CSTAT: %02x CPA: %04x\n",
772 irb->scsw.dstat, irb->scsw.cstat, irb->scsw.cpa);
773 PRINT_INFO("DEVICE: %s\n", device->cdev->dev.bus_id);
774 if (request != NULL)
775 PRINT_INFO("OP : %s\n", tape_op_verbose[request->op]);
776
777 sptr = (unsigned int *) irb->ecw;
778 PRINT_INFO("Sense data: %08X %08X %08X %08X \n",
779 sptr[0], sptr[1], sptr[2], sptr[3]);
780 PRINT_INFO("Sense data: %08X %08X %08X %08X \n",
781 sptr[4], sptr[5], sptr[6], sptr[7]);
782 PRINT_INFO("--------------------------------------------------\n");
783}
784
785/*
786 * Write sense data to dbf
787 */
788void
789tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request,
790 struct irb *irb)
791{
792 unsigned int *sptr;
793 const char* op;
794
795 if (request != NULL)
796 op = tape_op_verbose[request->op];
797 else
798 op = "---";
799 DBF_EVENT(3, "DSTAT : %02x CSTAT: %02x\n",
800 irb->scsw.dstat,irb->scsw.cstat);
801 DBF_EVENT(3, "DEVICE: %08x OP\t: %s\n", device->cdev_id, op);
802 sptr = (unsigned int *) irb->ecw;
803 DBF_EVENT(3, "%08x %08x\n", sptr[0], sptr[1]);
804 DBF_EVENT(3, "%08x %08x\n", sptr[2], sptr[3]);
805 DBF_EVENT(3, "%08x %08x\n", sptr[4], sptr[5]);
806 DBF_EVENT(3, "%08x %08x\n", sptr[6], sptr[7]);
807}
808
809/*
810 * I/O helper function. Adds the request to the request queue
811 * and starts it if the tape is idle. Has to be called with
812 * the device lock held.
813 */
814static inline int
815__tape_do_io(struct tape_device *device, struct tape_request *request)
816{
817 int rc;
818
819 switch (request->op) {
820 case TO_MSEN:
821 case TO_ASSIGN:
822 case TO_UNASSIGN:
823 case TO_READ_ATTMSG:
824 if (device->tape_state == TS_INIT)
825 break;
826 if (device->tape_state == TS_UNUSED)
827 break;
828 default:
829 if (device->tape_state == TS_BLKUSE)
830 break;
831 if (device->tape_state != TS_IN_USE)
832 return -ENODEV;
833 }
834
835 /* Increase use count of device for the added request. */
836 request->device = tape_get_device_reference(device);
837
838 if (list_empty(&device->req_queue)) {
839 /* No other requests are on the queue. Start this one. */
840#ifdef CONFIG_S390_TAPE_BLOCK
841 if (request->op == TO_BLOCK)
842 device->discipline->check_locate(device, request);
843#endif
844 rc = ccw_device_start(device->cdev, request->cpaddr,
845 (unsigned long) request, 0x00,
846 request->options);
847 if (rc) {
848 DBF_EVENT(1, "tape: DOIO failed with rc = %i\n", rc);
849 return rc;
850 }
851 DBF_LH(5, "Request %p added for execution.\n", request);
852 list_add(&request->list, &device->req_queue);
853 request->status = TAPE_REQUEST_IN_IO;
854 } else {
855 DBF_LH(5, "Request %p add to queue.\n", request);
856 list_add_tail(&request->list, &device->req_queue);
857 request->status = TAPE_REQUEST_QUEUED;
858 }
859 return 0;
860}
861
862/*
863 * Add the request to the request queue, try to start it if the
864 * tape is idle. Return without waiting for end of i/o.
865 */
866int
867tape_do_io_async(struct tape_device *device, struct tape_request *request)
868{
869 int rc;
870
871 DBF_LH(6, "tape_do_io_async(%p, %p)\n", device, request);
872
873 spin_lock_irq(get_ccwdev_lock(device->cdev));
874 /* Add request to request queue and try to start it. */
875 rc = __tape_do_io(device, request);
876 spin_unlock_irq(get_ccwdev_lock(device->cdev));
877 return rc;
878}
879
880/*
881 * tape_do_io/__tape_wake_up
882 * Add the request to the request queue, try to start it if the
883 * tape is idle and wait uninterruptible for its completion.
884 */
885static void
886__tape_wake_up(struct tape_request *request, void *data)
887{
888 request->callback = NULL;
889 wake_up((wait_queue_head_t *) data);
890}
891
892int
893tape_do_io(struct tape_device *device, struct tape_request *request)
894{
895 wait_queue_head_t wq;
896 int rc;
897
898 init_waitqueue_head(&wq);
899 spin_lock_irq(get_ccwdev_lock(device->cdev));
900 /* Setup callback */
901 request->callback = __tape_wake_up;
902 request->callback_data = &wq;
903 /* Add request to request queue and try to start it. */
904 rc = __tape_do_io(device, request);
905 spin_unlock_irq(get_ccwdev_lock(device->cdev));
906 if (rc)
907 return rc;
908 /* Request added to the queue. Wait for its completion. */
909 wait_event(wq, (request->callback == NULL));
910 /* Get rc from request */
911 return request->rc;
912}
913
914/*
915 * tape_do_io_interruptible/__tape_wake_up_interruptible
916 * Add the request to the request queue, try to start it if the
917 * tape is idle and wait uninterruptible for its completion.
918 */
919static void
920__tape_wake_up_interruptible(struct tape_request *request, void *data)
921{
922 request->callback = NULL;
923 wake_up_interruptible((wait_queue_head_t *) data);
924}
925
926int
927tape_do_io_interruptible(struct tape_device *device,
928 struct tape_request *request)
929{
930 wait_queue_head_t wq;
931 int rc;
932
933 init_waitqueue_head(&wq);
934 spin_lock_irq(get_ccwdev_lock(device->cdev));
935 /* Setup callback */
936 request->callback = __tape_wake_up_interruptible;
937 request->callback_data = &wq;
938 rc = __tape_do_io(device, request);
939 spin_unlock_irq(get_ccwdev_lock(device->cdev));
940 if (rc)
941 return rc;
942 /* Request added to the queue. Wait for its completion. */
943 rc = wait_event_interruptible(wq, (request->callback == NULL));
944 if (rc != -ERESTARTSYS)
945 /* Request finished normally. */
946 return request->rc;
947 /* Interrupted by a signal. We have to stop the current request. */
948 spin_lock_irq(get_ccwdev_lock(device->cdev));
949 rc = __tape_halt_io(device, request);
950 if (rc == 0) {
951 DBF_EVENT(3, "IO stopped on %08x\n", device->cdev_id);
952 rc = -ERESTARTSYS;
953 }
954 spin_unlock_irq(get_ccwdev_lock(device->cdev));
955 return rc;
956}
957
958/*
959 * Handle requests that return an i/o error in the irb.
960 */
961static inline void
962tape_handle_killed_request(
963 struct tape_device *device,
964 struct tape_request *request)
965{
966 if(request != NULL) {
967 /* Set ending status. FIXME: Should the request be retried? */
968 request->rc = -EIO;
969 request->status = TAPE_REQUEST_DONE;
970 __tape_remove_request(device, request);
971 } else {
972 __tape_do_io_list(device);
973 }
974}
975
976/*
977 * Tape interrupt routine, called from the ccw_device layer
978 */
979static void
980__tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
981{
982 struct tape_device *device;
983 struct tape_request *request;
984 int final;
985 int rc;
986
987 device = (struct tape_device *) cdev->dev.driver_data;
988 if (device == NULL) {
989 PRINT_ERR("could not get device structure for %s "
990 "in interrupt\n", cdev->dev.bus_id);
991 return;
992 }
993 request = (struct tape_request *) intparm;
994
995 DBF_LH(6, "__tape_do_irq(device=%p, request=%p)\n", device, request);
996
997 /* On special conditions irb is an error pointer */
998 if (IS_ERR(irb)) {
999 switch (PTR_ERR(irb)) {
1000 case -ETIMEDOUT:
1001 PRINT_WARN("(%s): Request timed out\n",
1002 cdev->dev.bus_id);
1003 case -EIO:
1004 tape_handle_killed_request(device, request);
1005 break;
1006 default:
1007 PRINT_ERR("(%s): Unexpected i/o error %li\n",
1008 cdev->dev.bus_id,
1009 PTR_ERR(irb));
1010 }
1011 return;
1012 }
1013
1014 /* May be an unsolicited irq */
1015 if(request != NULL)
1016 request->rescnt = irb->scsw.count;
1017
1018 if (irb->scsw.dstat != 0x0c) {
1019 /* Set the 'ONLINE' flag depending on sense byte 1 */
1020 if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE)
1021 device->tape_generic_status |= GMT_ONLINE(~0);
1022 else
1023 device->tape_generic_status &= ~GMT_ONLINE(~0);
1024
1025 /*
1026 * Any request that does not come back with channel end
1027 * and device end is unusual. Log the sense data.
1028 */
1029 DBF_EVENT(3,"-- Tape Interrupthandler --\n");
1030 tape_dump_sense_dbf(device, request, irb);
1031 } else {
1032 /* Upon normal completion the device _is_ online */
1033 device->tape_generic_status |= GMT_ONLINE(~0);
1034 }
1035 if (device->tape_state == TS_NOT_OPER) {
1036 DBF_EVENT(6, "tape:device is not operational\n");
1037 return;
1038 }
1039
1040 /*
1041 * Request that were canceled still come back with an interrupt.
1042 * To detect these request the state will be set to TAPE_REQUEST_DONE.
1043 */
1044 if(request != NULL && request->status == TAPE_REQUEST_DONE) {
1045 __tape_remove_request(device, request);
1046 return;
1047 }
1048
1049 rc = device->discipline->irq(device, request, irb);
1050 /*
1051 * rc < 0 : request finished unsuccessfully.
1052 * rc == TAPE_IO_SUCCESS: request finished successfully.
1053 * rc == TAPE_IO_PENDING: request is still running. Ignore rc.
1054 * rc == TAPE_IO_RETRY: request finished but needs another go.
1055 * rc == TAPE_IO_STOP: request needs to get terminated.
1056 */
1057 final = 0;
1058 switch (rc) {
1059 case TAPE_IO_SUCCESS:
1060 /* Upon normal completion the device _is_ online */
1061 device->tape_generic_status |= GMT_ONLINE(~0);
1062 final = 1;
1063 break;
1064 case TAPE_IO_PENDING:
1065 break;
1066 case TAPE_IO_RETRY:
1067#ifdef CONFIG_S390_TAPE_BLOCK
1068 if (request->op == TO_BLOCK)
1069 device->discipline->check_locate(device, request);
1070#endif
1071 rc = ccw_device_start(cdev, request->cpaddr,
1072 (unsigned long) request, 0x00,
1073 request->options);
1074 if (rc) {
1075 DBF_EVENT(1, "tape: DOIO failed with er = %i\n", rc);
1076 final = 1;
1077 }
1078 break;
1079 case TAPE_IO_STOP:
1080 __tape_halt_io(device, request);
1081 break;
1082 default:
1083 if (rc > 0) {
1084 DBF_EVENT(6, "xunknownrc\n");
1085 PRINT_ERR("Invalid return code from discipline "
1086 "interrupt function.\n");
1087 rc = -EIO;
1088 }
1089 final = 1;
1090 break;
1091 }
1092 if (final) {
1093 /* May be an unsolicited irq */
1094 if(request != NULL) {
1095 /* Set ending status. */
1096 request->rc = rc;
1097 request->status = TAPE_REQUEST_DONE;
1098 __tape_remove_request(device, request);
1099 } else {
1100 __tape_do_io_list(device);
1101 }
1102 }
1103}
1104
1105/*
1106 * Tape device open function used by tape_char & tape_block frontends.
1107 */
1108int
1109tape_open(struct tape_device *device)
1110{
1111 int rc;
1112
1113 spin_lock(get_ccwdev_lock(device->cdev));
1114 if (device->tape_state == TS_NOT_OPER) {
1115 DBF_EVENT(6, "TAPE:nodev\n");
1116 rc = -ENODEV;
1117 } else if (device->tape_state == TS_IN_USE) {
1118 DBF_EVENT(6, "TAPE:dbusy\n");
1119 rc = -EBUSY;
1120 } else if (device->tape_state == TS_BLKUSE) {
1121 DBF_EVENT(6, "TAPE:dbusy\n");
1122 rc = -EBUSY;
1123 } else if (device->discipline != NULL &&
1124 !try_module_get(device->discipline->owner)) {
1125 DBF_EVENT(6, "TAPE:nodisc\n");
1126 rc = -ENODEV;
1127 } else {
1128 tape_state_set(device, TS_IN_USE);
1129 rc = 0;
1130 }
1131 spin_unlock(get_ccwdev_lock(device->cdev));
1132 return rc;
1133}
1134
1135/*
1136 * Tape device release function used by tape_char & tape_block frontends.
1137 */
1138int
1139tape_release(struct tape_device *device)
1140{
1141 spin_lock(get_ccwdev_lock(device->cdev));
1142 if (device->tape_state == TS_IN_USE)
1143 tape_state_set(device, TS_UNUSED);
1144 module_put(device->discipline->owner);
1145 spin_unlock(get_ccwdev_lock(device->cdev));
1146 return 0;
1147}
1148
1149/*
1150 * Execute a magnetic tape command a number of times.
1151 */
1152int
1153tape_mtop(struct tape_device *device, int mt_op, int mt_count)
1154{
1155 tape_mtop_fn fn;
1156 int rc;
1157
1158 DBF_EVENT(6, "TAPE:mtio\n");
1159 DBF_EVENT(6, "TAPE:ioop: %x\n", mt_op);
1160 DBF_EVENT(6, "TAPE:arg: %x\n", mt_count);
1161
1162 if (mt_op < 0 || mt_op >= TAPE_NR_MTOPS)
1163 return -EINVAL;
1164 fn = device->discipline->mtop_array[mt_op];
1165 if (fn == NULL)
1166 return -EINVAL;
1167
1168 /* We assume that the backends can handle count up to 500. */
1169 if (mt_op == MTBSR || mt_op == MTFSR || mt_op == MTFSF ||
1170 mt_op == MTBSF || mt_op == MTFSFM || mt_op == MTBSFM) {
1171 rc = 0;
1172 for (; mt_count > 500; mt_count -= 500)
1173 if ((rc = fn(device, 500)) != 0)
1174 break;
1175 if (rc == 0)
1176 rc = fn(device, mt_count);
1177 } else
1178 rc = fn(device, mt_count);
1179 return rc;
1180
1181}
1182
1183/*
1184 * Tape init function.
1185 */
1186static int
1187tape_init (void)
1188{
1189 TAPE_DBF_AREA = debug_register ( "tape", 1, 2, 4*sizeof(long));
1190 debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view);
1191#ifdef DBF_LIKE_HELL
1192 debug_set_level(TAPE_DBF_AREA, 6);
1193#endif
1194 DBF_EVENT(3, "tape init: ($Revision: 1.51 $)\n");
1195 tape_proc_init();
1196 tapechar_init ();
1197 tapeblock_init ();
1198 return 0;
1199}
1200
1201/*
1202 * Tape exit function.
1203 */
1204static void
1205tape_exit(void)
1206{
1207 DBF_EVENT(6, "tape exit\n");
1208
1209 /* Get rid of the frontends */
1210 tapechar_exit();
1211 tapeblock_exit();
1212 tape_proc_cleanup();
1213 debug_unregister (TAPE_DBF_AREA);
1214}
1215
1216MODULE_AUTHOR("(C) 2001 IBM Deutschland Entwicklung GmbH by Carsten Otte and "
1217 "Michael Holzheu (cotte@de.ibm.com,holzheu@de.ibm.com)");
1218MODULE_DESCRIPTION("Linux on zSeries channel attached "
1219 "tape device driver ($Revision: 1.51 $)");
1220MODULE_LICENSE("GPL");
1221
1222module_init(tape_init);
1223module_exit(tape_exit);
1224
1225EXPORT_SYMBOL(tape_generic_remove);
1226EXPORT_SYMBOL(tape_generic_probe);
1227EXPORT_SYMBOL(tape_generic_online);
1228EXPORT_SYMBOL(tape_generic_offline);
1229EXPORT_SYMBOL(tape_put_device);
1230EXPORT_SYMBOL(tape_get_device_reference);
1231EXPORT_SYMBOL(tape_state_verbose);
1232EXPORT_SYMBOL(tape_op_verbose);
1233EXPORT_SYMBOL(tape_state_set);
1234EXPORT_SYMBOL(tape_med_state_set);
1235EXPORT_SYMBOL(tape_alloc_request);
1236EXPORT_SYMBOL(tape_free_request);
1237EXPORT_SYMBOL(tape_dump_sense);
1238EXPORT_SYMBOL(tape_dump_sense_dbf);
1239EXPORT_SYMBOL(tape_do_io);
1240EXPORT_SYMBOL(tape_do_io_async);
1241EXPORT_SYMBOL(tape_do_io_interruptible);
1242EXPORT_SYMBOL(tape_mtop);
diff --git a/drivers/s390/char/tape_proc.c b/drivers/s390/char/tape_proc.c
new file mode 100644
index 000000000000..801d17cca34e
--- /dev/null
+++ b/drivers/s390/char/tape_proc.c
@@ -0,0 +1,145 @@
1/*
2 * drivers/s390/char/tape.c
3 * tape device driver for S/390 and zSeries tapes.
4 *
5 * S390 and zSeries version
6 * Copyright (C) 2001 IBM Corporation
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Michael Holzheu <holzheu@de.ibm.com>
9 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
10 *
11 * PROCFS Functions
12 */
13
14#include <linux/config.h>
15#include <linux/module.h>
16#include <linux/vmalloc.h>
17#include <linux/seq_file.h>
18
19#define TAPE_DBF_AREA tape_core_dbf
20
21#include "tape.h"
22
23#define PRINTK_HEADER "TAPE_PROC: "
24
25static const char *tape_med_st_verbose[MS_SIZE] =
26{
27 [MS_UNKNOWN] = "UNKNOWN ",
28 [MS_LOADED] = "LOADED ",
29 [MS_UNLOADED] = "UNLOADED"
30};
31
32/* our proc tapedevices entry */
33static struct proc_dir_entry *tape_proc_devices;
34
35/*
36 * Show function for /proc/tapedevices
37 */
38static int tape_proc_show(struct seq_file *m, void *v)
39{
40 struct tape_device *device;
41 struct tape_request *request;
42 const char *str;
43 unsigned long n;
44
45 n = (unsigned long) v - 1;
46 if (!n) {
47 seq_printf(m, "TapeNo\tBusID CuType/Model\t"
48 "DevType/Model\tBlkSize\tState\tOp\tMedState\n");
49 }
50 device = tape_get_device(n);
51 if (IS_ERR(device))
52 return 0;
53 spin_lock_irq(get_ccwdev_lock(device->cdev));
54 seq_printf(m, "%d\t", (int) n);
55 seq_printf(m, "%-10.10s ", device->cdev->dev.bus_id);
56 seq_printf(m, "%04X/", device->cdev->id.cu_type);
57 seq_printf(m, "%02X\t", device->cdev->id.cu_model);
58 seq_printf(m, "%04X/", device->cdev->id.dev_type);
59 seq_printf(m, "%02X\t\t", device->cdev->id.dev_model);
60 if (device->char_data.block_size == 0)
61 seq_printf(m, "auto\t");
62 else
63 seq_printf(m, "%i\t", device->char_data.block_size);
64 if (device->tape_state >= 0 &&
65 device->tape_state < TS_SIZE)
66 str = tape_state_verbose[device->tape_state];
67 else
68 str = "UNKNOWN";
69 seq_printf(m, "%s\t", str);
70 if (!list_empty(&device->req_queue)) {
71 request = list_entry(device->req_queue.next,
72 struct tape_request, list);
73 str = tape_op_verbose[request->op];
74 } else
75 str = "---";
76 seq_printf(m, "%s\t", str);
77 seq_printf(m, "%s\n", tape_med_st_verbose[device->medium_state]);
78 spin_unlock_irq(get_ccwdev_lock(device->cdev));
79 tape_put_device(device);
80 return 0;
81}
82
83static void *tape_proc_start(struct seq_file *m, loff_t *pos)
84{
85 if (*pos >= 256 / TAPE_MINORS_PER_DEV)
86 return NULL;
87 return (void *)((unsigned long) *pos + 1);
88}
89
90static void *tape_proc_next(struct seq_file *m, void *v, loff_t *pos)
91{
92 ++*pos;
93 return tape_proc_start(m, pos);
94}
95
96static void tape_proc_stop(struct seq_file *m, void *v)
97{
98}
99
100static struct seq_operations tape_proc_seq = {
101 .start = tape_proc_start,
102 .next = tape_proc_next,
103 .stop = tape_proc_stop,
104 .show = tape_proc_show,
105};
106
107static int tape_proc_open(struct inode *inode, struct file *file)
108{
109 return seq_open(file, &tape_proc_seq);
110}
111
112static struct file_operations tape_proc_ops =
113{
114 .open = tape_proc_open,
115 .read = seq_read,
116 .llseek = seq_lseek,
117 .release = seq_release,
118};
119
120/*
121 * Initialize procfs stuff on startup
122 */
123void
124tape_proc_init(void)
125{
126 tape_proc_devices =
127 create_proc_entry ("tapedevices", S_IFREG | S_IRUGO | S_IWUSR,
128 &proc_root);
129 if (tape_proc_devices == NULL) {
130 PRINT_WARN("tape: Cannot register procfs entry tapedevices\n");
131 return;
132 }
133 tape_proc_devices->proc_fops = &tape_proc_ops;
134 tape_proc_devices->owner = THIS_MODULE;
135}
136
137/*
138 * Cleanup all stuff registered to the procfs
139 */
140void
141tape_proc_cleanup(void)
142{
143 if (tape_proc_devices != NULL)
144 remove_proc_entry ("tapedevices", &proc_root);
145}
diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c
new file mode 100644
index 000000000000..2f9fe30989a7
--- /dev/null
+++ b/drivers/s390/char/tape_std.c
@@ -0,0 +1,765 @@
1/*
2 * drivers/s390/char/tape_std.c
3 * standard tape device functions for ibm tapes.
4 *
5 * S390 and zSeries version
6 * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Michael Holzheu <holzheu@de.ibm.com>
9 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
10 * Martin Schwidefsky <schwidefsky@de.ibm.com>
11 * Stefan Bader <shbader@de.ibm.com>
12 */
13
14#include <linux/config.h>
15#include <linux/stddef.h>
16#include <linux/kernel.h>
17#include <linux/bio.h>
18#include <linux/timer.h>
19
20#include <asm/types.h>
21#include <asm/idals.h>
22#include <asm/ebcdic.h>
23#include <asm/tape390.h>
24
25#define TAPE_DBF_AREA tape_core_dbf
26
27#include "tape.h"
28#include "tape_std.h"
29
30#define PRINTK_HEADER "TAPE_STD: "
31
32/*
33 * tape_std_assign
34 */
35static void
36tape_std_assign_timeout(unsigned long data)
37{
38 struct tape_request * request;
39 struct tape_device * device;
40
41 request = (struct tape_request *) data;
42 if ((device = request->device) == NULL)
43 BUG();
44
45 spin_lock_irq(get_ccwdev_lock(device->cdev));
46 if (request->callback != NULL) {
47 DBF_EVENT(3, "%08x: Assignment timeout. Device busy.\n",
48 device->cdev_id);
49 PRINT_ERR("%s: Assignment timeout. Device busy.\n",
50 device->cdev->dev.bus_id);
51 ccw_device_clear(device->cdev, (long) request);
52 }
53 spin_unlock_irq(get_ccwdev_lock(device->cdev));
54}
55
56int
57tape_std_assign(struct tape_device *device)
58{
59 int rc;
60 struct timer_list timeout;
61 struct tape_request *request;
62
63 request = tape_alloc_request(2, 11);
64 if (IS_ERR(request))
65 return PTR_ERR(request);
66
67 request->op = TO_ASSIGN;
68 tape_ccw_cc(request->cpaddr, ASSIGN, 11, request->cpdata);
69 tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
70
71 /*
72 * The assign command sometimes blocks if the device is assigned
73 * to another host (actually this shouldn't happen but it does).
74 * So we set up a timeout for this call.
75 */
76 init_timer(&timeout);
77 timeout.function = tape_std_assign_timeout;
78 timeout.data = (unsigned long) request;
79 timeout.expires = jiffies + 2 * HZ;
80 add_timer(&timeout);
81
82 rc = tape_do_io_interruptible(device, request);
83
84 del_timer(&timeout);
85
86 if (rc != 0) {
87 PRINT_WARN("%s: assign failed - device might be busy\n",
88 device->cdev->dev.bus_id);
89 DBF_EVENT(3, "%08x: assign failed - device might be busy\n",
90 device->cdev_id);
91 } else {
92 DBF_EVENT(3, "%08x: Tape assigned\n", device->cdev_id);
93 }
94 tape_free_request(request);
95 return rc;
96}
97
98/*
99 * tape_std_unassign
100 */
101int
102tape_std_unassign (struct tape_device *device)
103{
104 int rc;
105 struct tape_request *request;
106
107 if (device->tape_state == TS_NOT_OPER) {
108 DBF_EVENT(3, "(%08x): Can't unassign device\n",
109 device->cdev_id);
110 PRINT_WARN("(%s): Can't unassign device - device gone\n",
111 device->cdev->dev.bus_id);
112 return -EIO;
113 }
114
115 request = tape_alloc_request(2, 11);
116 if (IS_ERR(request))
117 return PTR_ERR(request);
118
119 request->op = TO_UNASSIGN;
120 tape_ccw_cc(request->cpaddr, UNASSIGN, 11, request->cpdata);
121 tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
122
123 if ((rc = tape_do_io(device, request)) != 0) {
124 DBF_EVENT(3, "%08x: Unassign failed\n", device->cdev_id);
125 PRINT_WARN("%s: Unassign failed\n", device->cdev->dev.bus_id);
126 } else {
127 DBF_EVENT(3, "%08x: Tape unassigned\n", device->cdev_id);
128 }
129 tape_free_request(request);
130 return rc;
131}
132
133/*
134 * TAPE390_DISPLAY: Show a string on the tape display.
135 */
136int
137tape_std_display(struct tape_device *device, struct display_struct *disp)
138{
139 struct tape_request *request;
140 int rc;
141
142 request = tape_alloc_request(2, 17);
143 if (IS_ERR(request)) {
144 DBF_EVENT(3, "TAPE: load display failed\n");
145 return PTR_ERR(request);
146 }
147 request->op = TO_DIS;
148
149 *(unsigned char *) request->cpdata = disp->cntrl;
150 DBF_EVENT(5, "TAPE: display cntrl=%04x\n", disp->cntrl);
151 memcpy(((unsigned char *) request->cpdata) + 1, disp->message1, 8);
152 memcpy(((unsigned char *) request->cpdata) + 9, disp->message2, 8);
153 ASCEBC(((unsigned char*) request->cpdata) + 1, 16);
154
155 tape_ccw_cc(request->cpaddr, LOAD_DISPLAY, 17, request->cpdata);
156 tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
157
158 rc = tape_do_io_interruptible(device, request);
159 tape_free_request(request);
160 return rc;
161}
162
163/*
164 * Read block id.
165 */
166int
167tape_std_read_block_id(struct tape_device *device, __u64 *id)
168{
169 struct tape_request *request;
170 int rc;
171
172 request = tape_alloc_request(3, 8);
173 if (IS_ERR(request))
174 return PTR_ERR(request);
175 request->op = TO_RBI;
176 /* setup ccws */
177 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
178 tape_ccw_cc(request->cpaddr + 1, READ_BLOCK_ID, 8, request->cpdata);
179 tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
180 /* execute it */
181 rc = tape_do_io(device, request);
182 if (rc == 0)
183 /* Get result from read buffer. */
184 *id = *(__u64 *) request->cpdata;
185 tape_free_request(request);
186 return rc;
187}
188
189int
190tape_std_terminate_write(struct tape_device *device)
191{
192 int rc;
193
194 if(device->required_tapemarks == 0)
195 return 0;
196
197 DBF_LH(5, "tape%d: terminate write %dxEOF\n", device->first_minor,
198 device->required_tapemarks);
199
200 rc = tape_mtop(device, MTWEOF, device->required_tapemarks);
201 if (rc)
202 return rc;
203
204 device->required_tapemarks = 0;
205 return tape_mtop(device, MTBSR, 1);
206}
207
208/*
209 * MTLOAD: Loads the tape.
210 * The default implementation just wait until the tape medium state changes
211 * to MS_LOADED.
212 */
213int
214tape_std_mtload(struct tape_device *device, int count)
215{
216 return wait_event_interruptible(device->state_change_wq,
217 (device->medium_state == MS_LOADED));
218}
219
220/*
221 * MTSETBLK: Set block size.
222 */
223int
224tape_std_mtsetblk(struct tape_device *device, int count)
225{
226 struct idal_buffer *new;
227
228 DBF_LH(6, "tape_std_mtsetblk(%d)\n", count);
229 if (count <= 0) {
230 /*
231 * Just set block_size to 0. tapechar_read/tapechar_write
232 * will realloc the idal buffer if a bigger one than the
233 * current is needed.
234 */
235 device->char_data.block_size = 0;
236 return 0;
237 }
238 if (device->char_data.idal_buf != NULL &&
239 device->char_data.idal_buf->size == count)
240 /* We already have a idal buffer of that size. */
241 return 0;
242
243 if (count > MAX_BLOCKSIZE) {
244 DBF_EVENT(3, "Invalid block size (%d > %d) given.\n",
245 count, MAX_BLOCKSIZE);
246 PRINT_ERR("Invalid block size (%d > %d) given.\n",
247 count, MAX_BLOCKSIZE);
248 return -EINVAL;
249 }
250
251 /* Allocate a new idal buffer. */
252 new = idal_buffer_alloc(count, 0);
253 if (new == NULL)
254 return -ENOMEM;
255 if (device->char_data.idal_buf != NULL)
256 idal_buffer_free(device->char_data.idal_buf);
257 device->char_data.idal_buf = new;
258 device->char_data.block_size = count;
259
260 DBF_LH(6, "new blocksize is %d\n", device->char_data.block_size);
261
262 return 0;
263}
264
265/*
266 * MTRESET: Set block size to 0.
267 */
268int
269tape_std_mtreset(struct tape_device *device, int count)
270{
271 DBF_EVENT(6, "TCHAR:devreset:\n");
272 device->char_data.block_size = 0;
273 return 0;
274}
275
276/*
277 * MTFSF: Forward space over 'count' file marks. The tape is positioned
278 * at the EOT (End of Tape) side of the file mark.
279 */
280int
281tape_std_mtfsf(struct tape_device *device, int mt_count)
282{
283 struct tape_request *request;
284 struct ccw1 *ccw;
285
286 request = tape_alloc_request(mt_count + 2, 0);
287 if (IS_ERR(request))
288 return PTR_ERR(request);
289 request->op = TO_FSF;
290 /* setup ccws */
291 ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
292 device->modeset_byte);
293 ccw = tape_ccw_repeat(ccw, FORSPACEFILE, mt_count);
294 ccw = tape_ccw_end(ccw, NOP, 0, NULL);
295
296 /* execute it */
297 return tape_do_io_free(device, request);
298}
299
300/*
301 * MTFSR: Forward space over 'count' tape blocks (blocksize is set
302 * via MTSETBLK.
303 */
304int
305tape_std_mtfsr(struct tape_device *device, int mt_count)
306{
307 struct tape_request *request;
308 struct ccw1 *ccw;
309 int rc;
310
311 request = tape_alloc_request(mt_count + 2, 0);
312 if (IS_ERR(request))
313 return PTR_ERR(request);
314 request->op = TO_FSB;
315 /* setup ccws */
316 ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
317 device->modeset_byte);
318 ccw = tape_ccw_repeat(ccw, FORSPACEBLOCK, mt_count);
319 ccw = tape_ccw_end(ccw, NOP, 0, NULL);
320
321 /* execute it */
322 rc = tape_do_io(device, request);
323 if (rc == 0 && request->rescnt > 0) {
324 DBF_LH(3, "FSR over tapemark\n");
325 rc = 1;
326 }
327 tape_free_request(request);
328
329 return rc;
330}
331
332/*
333 * MTBSR: Backward space over 'count' tape blocks.
334 * (blocksize is set via MTSETBLK.
335 */
336int
337tape_std_mtbsr(struct tape_device *device, int mt_count)
338{
339 struct tape_request *request;
340 struct ccw1 *ccw;
341 int rc;
342
343 request = tape_alloc_request(mt_count + 2, 0);
344 if (IS_ERR(request))
345 return PTR_ERR(request);
346 request->op = TO_BSB;
347 /* setup ccws */
348 ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
349 device->modeset_byte);
350 ccw = tape_ccw_repeat(ccw, BACKSPACEBLOCK, mt_count);
351 ccw = tape_ccw_end(ccw, NOP, 0, NULL);
352
353 /* execute it */
354 rc = tape_do_io(device, request);
355 if (rc == 0 && request->rescnt > 0) {
356 DBF_LH(3, "BSR over tapemark\n");
357 rc = 1;
358 }
359 tape_free_request(request);
360
361 return rc;
362}
363
364/*
365 * MTWEOF: Write 'count' file marks at the current position.
366 */
367int
368tape_std_mtweof(struct tape_device *device, int mt_count)
369{
370 struct tape_request *request;
371 struct ccw1 *ccw;
372
373 request = tape_alloc_request(mt_count + 2, 0);
374 if (IS_ERR(request))
375 return PTR_ERR(request);
376 request->op = TO_WTM;
377 /* setup ccws */
378 ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
379 device->modeset_byte);
380 ccw = tape_ccw_repeat(ccw, WRITETAPEMARK, mt_count);
381 ccw = tape_ccw_end(ccw, NOP, 0, NULL);
382
383 /* execute it */
384 return tape_do_io_free(device, request);
385}
386
387/*
388 * MTBSFM: Backward space over 'count' file marks.
389 * The tape is positioned at the BOT (Begin Of Tape) side of the
390 * last skipped file mark.
391 */
392int
393tape_std_mtbsfm(struct tape_device *device, int mt_count)
394{
395 struct tape_request *request;
396 struct ccw1 *ccw;
397
398 request = tape_alloc_request(mt_count + 2, 0);
399 if (IS_ERR(request))
400 return PTR_ERR(request);
401 request->op = TO_BSF;
402 /* setup ccws */
403 ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
404 device->modeset_byte);
405 ccw = tape_ccw_repeat(ccw, BACKSPACEFILE, mt_count);
406 ccw = tape_ccw_end(ccw, NOP, 0, NULL);
407
408 /* execute it */
409 return tape_do_io_free(device, request);
410}
411
412/*
413 * MTBSF: Backward space over 'count' file marks. The tape is positioned at
414 * the EOT (End of Tape) side of the last skipped file mark.
415 */
416int
417tape_std_mtbsf(struct tape_device *device, int mt_count)
418{
419 struct tape_request *request;
420 struct ccw1 *ccw;
421 int rc;
422
423 request = tape_alloc_request(mt_count + 2, 0);
424 if (IS_ERR(request))
425 return PTR_ERR(request);
426 request->op = TO_BSF;
427 /* setup ccws */
428 ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
429 device->modeset_byte);
430 ccw = tape_ccw_repeat(ccw, BACKSPACEFILE, mt_count);
431 ccw = tape_ccw_end(ccw, NOP, 0, NULL);
432 /* execute it */
433 rc = tape_do_io_free(device, request);
434 if (rc == 0) {
435 rc = tape_mtop(device, MTFSR, 1);
436 if (rc > 0)
437 rc = 0;
438 }
439 return rc;
440}
441
442/*
443 * MTFSFM: Forward space over 'count' file marks.
444 * The tape is positioned at the BOT (Begin Of Tape) side
445 * of the last skipped file mark.
446 */
447int
448tape_std_mtfsfm(struct tape_device *device, int mt_count)
449{
450 struct tape_request *request;
451 struct ccw1 *ccw;
452 int rc;
453
454 request = tape_alloc_request(mt_count + 2, 0);
455 if (IS_ERR(request))
456 return PTR_ERR(request);
457 request->op = TO_FSF;
458 /* setup ccws */
459 ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
460 device->modeset_byte);
461 ccw = tape_ccw_repeat(ccw, FORSPACEFILE, mt_count);
462 ccw = tape_ccw_end(ccw, NOP, 0, NULL);
463 /* execute it */
464 rc = tape_do_io_free(device, request);
465 if (rc == 0) {
466 rc = tape_mtop(device, MTBSR, 1);
467 if (rc > 0)
468 rc = 0;
469 }
470
471 return rc;
472}
473
474/*
475 * MTREW: Rewind the tape.
476 */
477int
478tape_std_mtrew(struct tape_device *device, int mt_count)
479{
480 struct tape_request *request;
481
482 request = tape_alloc_request(3, 0);
483 if (IS_ERR(request))
484 return PTR_ERR(request);
485 request->op = TO_REW;
486 /* setup ccws */
487 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
488 device->modeset_byte);
489 tape_ccw_cc(request->cpaddr + 1, REWIND, 0, NULL);
490 tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
491
492 /* execute it */
493 return tape_do_io_free(device, request);
494}
495
496/*
497 * MTOFFL: Rewind the tape and put the drive off-line.
498 * Implement 'rewind unload'
499 */
500int
501tape_std_mtoffl(struct tape_device *device, int mt_count)
502{
503 struct tape_request *request;
504
505 request = tape_alloc_request(3, 0);
506 if (IS_ERR(request))
507 return PTR_ERR(request);
508 request->op = TO_RUN;
509 /* setup ccws */
510 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
511 tape_ccw_cc(request->cpaddr + 1, REWIND_UNLOAD, 0, NULL);
512 tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
513
514 /* execute it */
515 return tape_do_io_free(device, request);
516}
517
518/*
519 * MTNOP: 'No operation'.
520 */
521int
522tape_std_mtnop(struct tape_device *device, int mt_count)
523{
524 struct tape_request *request;
525
526 request = tape_alloc_request(2, 0);
527 if (IS_ERR(request))
528 return PTR_ERR(request);
529 request->op = TO_NOP;
530 /* setup ccws */
531 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
532 tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
533 /* execute it */
534 return tape_do_io_free(device, request);
535}
536
537/*
538 * MTEOM: positions at the end of the portion of the tape already used
539 * for recordind data. MTEOM positions after the last file mark, ready for
540 * appending another file.
541 */
542int
543tape_std_mteom(struct tape_device *device, int mt_count)
544{
545 int rc;
546
547 /*
548 * Seek from the beginning of tape (rewind).
549 */
550 if ((rc = tape_mtop(device, MTREW, 1)) < 0)
551 return rc;
552
553 /*
554 * The logical end of volume is given by two sewuential tapemarks.
555 * Look for this by skipping to the next file (over one tapemark)
556 * and then test for another one (fsr returns 1 if a tapemark was
557 * encountered).
558 */
559 do {
560 if ((rc = tape_mtop(device, MTFSF, 1)) < 0)
561 return rc;
562 if ((rc = tape_mtop(device, MTFSR, 1)) < 0)
563 return rc;
564 } while (rc == 0);
565
566 return tape_mtop(device, MTBSR, 1);
567}
568
569/*
570 * MTRETEN: Retension the tape, i.e. forward space to end of tape and rewind.
571 */
572int
573tape_std_mtreten(struct tape_device *device, int mt_count)
574{
575 struct tape_request *request;
576 int rc;
577
578 request = tape_alloc_request(4, 0);
579 if (IS_ERR(request))
580 return PTR_ERR(request);
581 request->op = TO_FSF;
582 /* setup ccws */
583 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
584 tape_ccw_cc(request->cpaddr + 1,FORSPACEFILE, 0, NULL);
585 tape_ccw_cc(request->cpaddr + 2, NOP, 0, NULL);
586 tape_ccw_end(request->cpaddr + 3, CCW_CMD_TIC, 0, request->cpaddr);
587 /* execute it, MTRETEN rc gets ignored */
588 rc = tape_do_io_interruptible(device, request);
589 tape_free_request(request);
590 return tape_mtop(device, MTREW, 1);
591}
592
593/*
594 * MTERASE: erases the tape.
595 */
596int
597tape_std_mterase(struct tape_device *device, int mt_count)
598{
599 struct tape_request *request;
600
601 request = tape_alloc_request(6, 0);
602 if (IS_ERR(request))
603 return PTR_ERR(request);
604 request->op = TO_DSE;
605 /* setup ccws */
606 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
607 tape_ccw_cc(request->cpaddr + 1, REWIND, 0, NULL);
608 tape_ccw_cc(request->cpaddr + 2, ERASE_GAP, 0, NULL);
609 tape_ccw_cc(request->cpaddr + 3, DATA_SEC_ERASE, 0, NULL);
610 tape_ccw_cc(request->cpaddr + 4, REWIND, 0, NULL);
611 tape_ccw_end(request->cpaddr + 5, NOP, 0, NULL);
612
613 /* execute it */
614 return tape_do_io_free(device, request);
615}
616
617/*
618 * MTUNLOAD: Rewind the tape and unload it.
619 */
620int
621tape_std_mtunload(struct tape_device *device, int mt_count)
622{
623 return tape_mtop(device, MTOFFL, mt_count);
624}
625
626/*
627 * MTCOMPRESSION: used to enable compression.
628 * Sets the IDRC on/off.
629 */
630int
631tape_std_mtcompression(struct tape_device *device, int mt_count)
632{
633 struct tape_request *request;
634
635 if (mt_count < 0 || mt_count > 1) {
636 DBF_EXCEPTION(6, "xcom parm\n");
637 if (*device->modeset_byte & 0x08)
638 PRINT_INFO("(%s) Compression is currently on\n",
639 device->cdev->dev.bus_id);
640 else
641 PRINT_INFO("(%s) Compression is currently off\n",
642 device->cdev->dev.bus_id);
643 PRINT_INFO("Use 1 to switch compression on, 0 to "
644 "switch it off\n");
645 return -EINVAL;
646 }
647 request = tape_alloc_request(2, 0);
648 if (IS_ERR(request))
649 return PTR_ERR(request);
650 request->op = TO_NOP;
651 /* setup ccws */
652 *device->modeset_byte = (mt_count == 0) ? 0x00 : 0x08;
653 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
654 tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
655 /* execute it */
656 return tape_do_io_free(device, request);
657}
658
659/*
660 * Read Block
661 */
662struct tape_request *
663tape_std_read_block(struct tape_device *device, size_t count)
664{
665 struct tape_request *request;
666
667 /*
668 * We have to alloc 4 ccws in order to be able to transform request
669 * into a read backward request in error case.
670 */
671 request = tape_alloc_request(4, 0);
672 if (IS_ERR(request)) {
673 DBF_EXCEPTION(6, "xrbl fail");
674 return request;
675 }
676 request->op = TO_RFO;
677 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
678 tape_ccw_end_idal(request->cpaddr + 1, READ_FORWARD,
679 device->char_data.idal_buf);
680 DBF_EVENT(6, "xrbl ccwg\n");
681 return request;
682}
683
684/*
685 * Read Block backward transformation function.
686 */
687void
688tape_std_read_backward(struct tape_device *device, struct tape_request *request)
689{
690 /*
691 * We have allocated 4 ccws in tape_std_read, so we can now
692 * transform the request to a read backward, followed by a
693 * forward space block.
694 */
695 request->op = TO_RBA;
696 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
697 tape_ccw_cc_idal(request->cpaddr + 1, READ_BACKWARD,
698 device->char_data.idal_buf);
699 tape_ccw_cc(request->cpaddr + 2, FORSPACEBLOCK, 0, NULL);
700 tape_ccw_end(request->cpaddr + 3, NOP, 0, NULL);
701 DBF_EVENT(6, "xrop ccwg");}
702
703/*
704 * Write Block
705 */
706struct tape_request *
707tape_std_write_block(struct tape_device *device, size_t count)
708{
709 struct tape_request *request;
710
711 request = tape_alloc_request(2, 0);
712 if (IS_ERR(request)) {
713 DBF_EXCEPTION(6, "xwbl fail\n");
714 return request;
715 }
716 request->op = TO_WRI;
717 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
718 tape_ccw_end_idal(request->cpaddr + 1, WRITE_CMD,
719 device->char_data.idal_buf);
720 DBF_EVENT(6, "xwbl ccwg\n");
721 return request;
722}
723
724/*
725 * This routine is called by frontend after an ENOSP on write
726 */
727void
728tape_std_process_eov(struct tape_device *device)
729{
730 /*
731 * End of volume: We have to backspace the last written record, then
732 * we TRY to write a tapemark and then backspace over the written TM
733 */
734 if (tape_mtop(device, MTBSR, 1) == 0 &&
735 tape_mtop(device, MTWEOF, 1) == 0) {
736 tape_mtop(device, MTBSR, 1);
737 }
738}
739
740EXPORT_SYMBOL(tape_std_assign);
741EXPORT_SYMBOL(tape_std_unassign);
742EXPORT_SYMBOL(tape_std_display);
743EXPORT_SYMBOL(tape_std_read_block_id);
744EXPORT_SYMBOL(tape_std_mtload);
745EXPORT_SYMBOL(tape_std_mtsetblk);
746EXPORT_SYMBOL(tape_std_mtreset);
747EXPORT_SYMBOL(tape_std_mtfsf);
748EXPORT_SYMBOL(tape_std_mtfsr);
749EXPORT_SYMBOL(tape_std_mtbsr);
750EXPORT_SYMBOL(tape_std_mtweof);
751EXPORT_SYMBOL(tape_std_mtbsfm);
752EXPORT_SYMBOL(tape_std_mtbsf);
753EXPORT_SYMBOL(tape_std_mtfsfm);
754EXPORT_SYMBOL(tape_std_mtrew);
755EXPORT_SYMBOL(tape_std_mtoffl);
756EXPORT_SYMBOL(tape_std_mtnop);
757EXPORT_SYMBOL(tape_std_mteom);
758EXPORT_SYMBOL(tape_std_mtreten);
759EXPORT_SYMBOL(tape_std_mterase);
760EXPORT_SYMBOL(tape_std_mtunload);
761EXPORT_SYMBOL(tape_std_mtcompression);
762EXPORT_SYMBOL(tape_std_read_block);
763EXPORT_SYMBOL(tape_std_read_backward);
764EXPORT_SYMBOL(tape_std_write_block);
765EXPORT_SYMBOL(tape_std_process_eov);
diff --git a/drivers/s390/char/tape_std.h b/drivers/s390/char/tape_std.h
new file mode 100644
index 000000000000..3ab6aafb7343
--- /dev/null
+++ b/drivers/s390/char/tape_std.h
@@ -0,0 +1,152 @@
1/*
2 * drivers/s390/char/tape_34xx.h
3 * standard tape device functions for ibm tapes.
4 *
5 * S390 and zSeries version
6 * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
9 * Martin Schwidefsky <schwidefsky@de.ibm.com>
10 */
11
12#ifndef _TAPE_STD_H
13#define _TAPE_STD_H
14
15#include <asm/tape390.h>
16
17/*
18 * Biggest block size to handle. Currently 64K because we only build
19 * channel programs without data chaining.
20 */
21#define MAX_BLOCKSIZE 65535
22
23/*
24 * The CCW commands for the Tape type of command.
25 */
26#define INVALID_00 0x00 /* Invalid cmd */
27#define BACKSPACEBLOCK 0x27 /* Back Space block */
28#define BACKSPACEFILE 0x2f /* Back Space file */
29#define DATA_SEC_ERASE 0x97 /* Data security erase */
30#define ERASE_GAP 0x17 /* Erase Gap */
31#define FORSPACEBLOCK 0x37 /* Forward space block */
32#define FORSPACEFILE 0x3F /* Forward Space file */
33#define FORCE_STREAM_CNT 0xEB /* Forced streaming count # */
34#define NOP 0x03 /* No operation */
35#define READ_FORWARD 0x02 /* Read forward */
36#define REWIND 0x07 /* Rewind */
37#define REWIND_UNLOAD 0x0F /* Rewind and Unload */
38#define SENSE 0x04 /* Sense */
39#define NEW_MODE_SET 0xEB /* Guess it is Mode set */
40#define WRITE_CMD 0x01 /* Write */
41#define WRITETAPEMARK 0x1F /* Write Tape Mark */
42
43#define ASSIGN 0xB7 /* 3420 REJECT,3480 OK */
44#define CONTROL_ACCESS 0xE3 /* Set high speed */
45#define DIAG_MODE_SET 0x0B /* 3420 NOP, 3480 REJECT */
46#define LOAD_DISPLAY 0x9F /* 3420 REJECT,3480 OK */
47#define LOCATE 0x4F /* 3420 REJ, 3480 NOP */
48#define LOOP_WRITE_TO_READ 0x8B /* 3480 REJECT */
49#define MODE_SET_DB 0xDB /* 3420 REJECT,3480 OK */
50#define MODE_SET_C3 0xC3 /* for 3420 */
51#define MODE_SET_CB 0xCB /* for 3420 */
52#define MODE_SET_D3 0xD3 /* for 3420 */
53#define READ_BACKWARD 0x0C /* */
54#define READ_BLOCK_ID 0x22 /* 3420 REJECT,3480 OK */
55#define READ_BUFFER 0x12 /* 3420 REJECT,3480 OK */
56#define READ_BUFF_LOG 0x24 /* 3420 REJECT,3480 OK */
57#define RELEASE 0xD4 /* 3420 NOP, 3480 REJECT */
58#define REQ_TRK_IN_ERROR 0x1B /* 3420 NOP, 3480 REJECT */
59#define RESERVE 0xF4 /* 3420 NOP, 3480 REJECT */
60#define SENSE_GROUP_ID 0x34 /* 3420 REJECT,3480 OK */
61#define SENSE_ID 0xE4 /* 3420 REJECT,3480 OK */
62#define READ_DEV_CHAR 0x64 /* Read device characteristics */
63#define SET_DIAGNOSE 0x4B /* 3420 NOP, 3480 REJECT */
64#define SET_GROUP_ID 0xAF /* 3420 REJECT,3480 OK */
65#define SET_TAPE_WRITE_IMMED 0xC3 /* for 3480 */
66#define SUSPEND 0x5B /* 3420 REJ, 3480 NOP */
67#define SYNC 0x43 /* Synchronize (flush buffer) */
68#define UNASSIGN 0xC7 /* 3420 REJECT,3480 OK */
69#define PERF_SUBSYS_FUNC 0x77 /* 3490 CMD */
70#define READ_CONFIG_DATA 0xFA /* 3490 CMD */
71#define READ_MESSAGE_ID 0x4E /* 3490 CMD */
72#define READ_SUBSYS_DATA 0x3E /* 3490 CMD */
73#define SET_INTERFACE_ID 0x73 /* 3490 CMD */
74
75#define SENSE_COMMAND_REJECT 0x80
76#define SENSE_INTERVENTION_REQUIRED 0x40
77#define SENSE_BUS_OUT_CHECK 0x20
78#define SENSE_EQUIPMENT_CHECK 0x10
79#define SENSE_DATA_CHECK 0x08
80#define SENSE_OVERRUN 0x04
81#define SENSE_DEFERRED_UNIT_CHECK 0x02
82#define SENSE_ASSIGNED_ELSEWHERE 0x01
83
84#define SENSE_LOCATE_FAILURE 0x80
85#define SENSE_DRIVE_ONLINE 0x40
86#define SENSE_RESERVED 0x20
87#define SENSE_RECORD_SEQUENCE_ERR 0x10
88#define SENSE_BEGINNING_OF_TAPE 0x08
89#define SENSE_WRITE_MODE 0x04
90#define SENSE_WRITE_PROTECT 0x02
91#define SENSE_NOT_CAPABLE 0x01
92
93#define SENSE_CHANNEL_ADAPTER_CODE 0xE0
94#define SENSE_CHANNEL_ADAPTER_LOC 0x10
95#define SENSE_REPORTING_CU 0x08
96#define SENSE_AUTOMATIC_LOADER 0x04
97#define SENSE_TAPE_SYNC_MODE 0x02
98#define SENSE_TAPE_POSITIONING 0x01
99
100/* discipline functions */
101struct tape_request *tape_std_read_block(struct tape_device *, size_t);
102void tape_std_read_backward(struct tape_device *device,
103 struct tape_request *request);
104struct tape_request *tape_std_write_block(struct tape_device *, size_t);
105struct tape_request *tape_std_bread(struct tape_device *, struct request *);
106void tape_std_free_bread(struct tape_request *);
107void tape_std_check_locate(struct tape_device *, struct tape_request *);
108struct tape_request *tape_std_bwrite(struct request *,
109 struct tape_device *, int);
110
111/* Some non-mtop commands. */
112int tape_std_assign(struct tape_device *);
113int tape_std_unassign(struct tape_device *);
114int tape_std_read_block_id(struct tape_device *device, __u64 *id);
115int tape_std_display(struct tape_device *, struct display_struct *disp);
116int tape_std_terminate_write(struct tape_device *);
117
118/* Standard magnetic tape commands. */
119int tape_std_mtbsf(struct tape_device *, int);
120int tape_std_mtbsfm(struct tape_device *, int);
121int tape_std_mtbsr(struct tape_device *, int);
122int tape_std_mtcompression(struct tape_device *, int);
123int tape_std_mteom(struct tape_device *, int);
124int tape_std_mterase(struct tape_device *, int);
125int tape_std_mtfsf(struct tape_device *, int);
126int tape_std_mtfsfm(struct tape_device *, int);
127int tape_std_mtfsr(struct tape_device *, int);
128int tape_std_mtload(struct tape_device *, int);
129int tape_std_mtnop(struct tape_device *, int);
130int tape_std_mtoffl(struct tape_device *, int);
131int tape_std_mtreset(struct tape_device *, int);
132int tape_std_mtreten(struct tape_device *, int);
133int tape_std_mtrew(struct tape_device *, int);
134int tape_std_mtsetblk(struct tape_device *, int);
135int tape_std_mtunload(struct tape_device *, int);
136int tape_std_mtweof(struct tape_device *, int);
137
138/* Event handlers */
139void tape_std_default_handler(struct tape_device *);
140void tape_std_unexpect_uchk_handler(struct tape_device *);
141void tape_std_irq(struct tape_device *);
142void tape_std_process_eov(struct tape_device *);
143
144// the error recovery stuff:
145void tape_std_error_recovery(struct tape_device *);
146void tape_std_error_recovery_has_failed(struct tape_device *,int error_id);
147void tape_std_error_recovery_succeded(struct tape_device *);
148void tape_std_error_recovery_do_retry(struct tape_device *);
149void tape_std_error_recovery_read_opposite(struct tape_device *);
150void tape_std_error_recovery_HWBUG(struct tape_device *, int condno);
151
152#endif // _TAPE_STD_H
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
new file mode 100644
index 000000000000..7db5ebce7f0f
--- /dev/null
+++ b/drivers/s390/char/tty3270.c
@@ -0,0 +1,1836 @@
1/*
2 * drivers/s390/char/tty3270.c
3 * IBM/3270 Driver - tty functions.
4 *
5 * Author(s):
6 * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
7 * Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
8 * -- Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
9 */
10
11#include <linux/config.h>
12#include <linux/module.h>
13#include <linux/types.h>
14#include <linux/kdev_t.h>
15#include <linux/tty.h>
16#include <linux/vt_kern.h>
17#include <linux/init.h>
18#include <linux/console.h>
19#include <linux/interrupt.h>
20
21#include <linux/slab.h>
22#include <linux/bootmem.h>
23
24#include <asm/ccwdev.h>
25#include <asm/cio.h>
26#include <asm/ebcdic.h>
27#include <asm/uaccess.h>
28
29
30#include "raw3270.h"
31#include "keyboard.h"
32
33#define TTY3270_CHAR_BUF_SIZE 256
34#define TTY3270_OUTPUT_BUFFER_SIZE 1024
35#define TTY3270_STRING_PAGES 5
36
37struct tty_driver *tty3270_driver;
38static int tty3270_max_index;
39
40struct raw3270_fn tty3270_fn;
41
42struct tty3270_cell {
43 unsigned char character;
44 unsigned char highlight;
45 unsigned char f_color;
46};
47
48struct tty3270_line {
49 struct tty3270_cell *cells;
50 int len;
51};
52
53#define ESCAPE_NPAR 8
54
55/*
56 * The main tty view data structure.
57 * FIXME:
58 * 1) describe line orientation & lines list concept against screen
59 * 2) describe conversion of screen to lines
60 * 3) describe line format.
61 */
62struct tty3270 {
63 struct raw3270_view view;
64 struct tty_struct *tty; /* Pointer to tty structure */
65 void **freemem_pages; /* Array of pages used for freemem. */
66 struct list_head freemem; /* List of free memory for strings. */
67
68 /* Output stuff. */
69 struct list_head lines; /* List of lines. */
70 struct list_head update; /* List of lines to update. */
71 unsigned char wcc; /* Write control character. */
72 int nr_lines; /* # lines in list. */
73 int nr_up; /* # lines up in history. */
74 unsigned long update_flags; /* Update indication bits. */
75 struct string *status; /* Lower right of display. */
76 struct raw3270_request *write; /* Single write request. */
77 struct timer_list timer; /* Output delay timer. */
78
79 /* Current tty screen. */
80 unsigned int cx, cy; /* Current output position. */
81 unsigned int highlight; /* Blink/reverse/underscore */
82 unsigned int f_color; /* Foreground color */
83 struct tty3270_line *screen;
84
85 /* Input stuff. */
86 struct string *prompt; /* Output string for input area. */
87 struct string *input; /* Input string for read request. */
88 struct raw3270_request *read; /* Single read request. */
89 struct raw3270_request *kreset; /* Single keyboard reset request. */
90 unsigned char inattr; /* Visible/invisible input. */
91 int throttle, attn; /* tty throttle/unthrottle. */
92 struct tasklet_struct readlet; /* Tasklet to issue read request. */
93 struct kbd_data *kbd; /* key_maps stuff. */
94
95 /* Escape sequence parsing. */
96 int esc_state, esc_ques, esc_npar;
97 int esc_par[ESCAPE_NPAR];
98 unsigned int saved_cx, saved_cy;
99 unsigned int saved_highlight, saved_f_color;
100
101 /* Command recalling. */
102 struct list_head rcl_lines; /* List of recallable lines. */
103 struct list_head *rcl_walk; /* Point in rcl_lines list. */
104 int rcl_nr, rcl_max; /* Number/max number of rcl_lines. */
105
106 /* Character array for put_char/flush_chars. */
107 unsigned int char_count;
108 char char_buf[TTY3270_CHAR_BUF_SIZE];
109};
110
111/* tty3270->update_flags. See tty3270_update for details. */
112#define TTY_UPDATE_ERASE 1 /* Use EWRITEA instead of WRITE. */
113#define TTY_UPDATE_LIST 2 /* Update lines in tty3270->update. */
114#define TTY_UPDATE_INPUT 4 /* Update input line. */
115#define TTY_UPDATE_STATUS 8 /* Update status line. */
116#define TTY_UPDATE_ALL 15
117
118static void tty3270_update(struct tty3270 *);
119
120/*
121 * Setup timeout for a device. On timeout trigger an update.
122 */
123void
124tty3270_set_timer(struct tty3270 *tp, int expires)
125{
126 if (expires == 0) {
127 if (timer_pending(&tp->timer) && del_timer(&tp->timer))
128 raw3270_put_view(&tp->view);
129 return;
130 }
131 if (timer_pending(&tp->timer) &&
132 mod_timer(&tp->timer, jiffies + expires))
133 return;
134 raw3270_get_view(&tp->view);
135 tp->timer.function = (void (*)(unsigned long)) tty3270_update;
136 tp->timer.data = (unsigned long) tp;
137 tp->timer.expires = jiffies + expires;
138 add_timer(&tp->timer);
139}
140
141/*
142 * The input line are the two last lines of the screen.
143 */
144static void
145tty3270_update_prompt(struct tty3270 *tp, char *input, int count)
146{
147 struct string *line;
148 unsigned int off;
149
150 line = tp->prompt;
151 if (count != 0)
152 line->string[5] = TF_INMDT;
153 else
154 line->string[5] = tp->inattr;
155 if (count > tp->view.cols * 2 - 11)
156 count = tp->view.cols * 2 - 11;
157 memcpy(line->string + 6, input, count);
158 line->string[6 + count] = TO_IC;
159 /* Clear to end of input line. */
160 if (count < tp->view.cols * 2 - 11) {
161 line->string[7 + count] = TO_RA;
162 line->string[10 + count] = 0;
163 off = tp->view.cols * tp->view.rows - 9;
164 raw3270_buffer_address(tp->view.dev, line->string+count+8, off);
165 line->len = 11 + count;
166 } else
167 line->len = 7 + count;
168 tp->update_flags |= TTY_UPDATE_INPUT;
169}
170
171static void
172tty3270_create_prompt(struct tty3270 *tp)
173{
174 static const unsigned char blueprint[] =
175 { TO_SBA, 0, 0, 0x6e, TO_SF, TF_INPUT,
176 /* empty input string */
177 TO_IC, TO_RA, 0, 0, 0 };
178 struct string *line;
179 unsigned int offset;
180
181 line = alloc_string(&tp->freemem,
182 sizeof(blueprint) + tp->view.cols * 2 - 9);
183 tp->prompt = line;
184 tp->inattr = TF_INPUT;
185 /* Copy blueprint to status line */
186 memcpy(line->string, blueprint, sizeof(blueprint));
187 line->len = sizeof(blueprint);
188 /* Set output offsets. */
189 offset = tp->view.cols * (tp->view.rows - 2);
190 raw3270_buffer_address(tp->view.dev, line->string + 1, offset);
191 offset = tp->view.cols * tp->view.rows - 9;
192 raw3270_buffer_address(tp->view.dev, line->string + 8, offset);
193
194 /* Allocate input string for reading. */
195 tp->input = alloc_string(&tp->freemem, tp->view.cols * 2 - 9 + 6);
196}
197
198/*
199 * The status line is the last line of the screen. It shows the string
200 * "Running"/"Holding" in the lower right corner of the screen.
201 */
202static void
203tty3270_update_status(struct tty3270 * tp)
204{
205 char *str;
206
207 str = (tp->nr_up != 0) ? "History" : "Running";
208 memcpy(tp->status->string + 8, str, 7);
209 codepage_convert(tp->view.ascebc, tp->status->string + 8, 7);
210 tp->update_flags |= TTY_UPDATE_STATUS;
211}
212
213static void
214tty3270_create_status(struct tty3270 * tp)
215{
216 static const unsigned char blueprint[] =
217 { TO_SBA, 0, 0, TO_SF, TF_LOG, TO_SA, TAT_COLOR, TAC_GREEN,
218 0, 0, 0, 0, 0, 0, 0, TO_SF, TF_LOG, TO_SA, TAT_COLOR,
219 TAC_RESET };
220 struct string *line;
221 unsigned int offset;
222
223 line = alloc_string(&tp->freemem,sizeof(blueprint));
224 tp->status = line;
225 /* Copy blueprint to status line */
226 memcpy(line->string, blueprint, sizeof(blueprint));
227 /* Set address to start of status string (= last 9 characters). */
228 offset = tp->view.cols * tp->view.rows - 9;
229 raw3270_buffer_address(tp->view.dev, line->string + 1, offset);
230}
231
232/*
233 * Set output offsets to 3270 datastream fragment of a tty string.
234 * (TO_SBA offset at the start and TO_RA offset at the end of the string)
235 */
236static void
237tty3270_update_string(struct tty3270 *tp, struct string *line, int nr)
238{
239 unsigned char *cp;
240
241 raw3270_buffer_address(tp->view.dev, line->string + 1,
242 tp->view.cols * nr);
243 cp = line->string + line->len - 4;
244 if (*cp == TO_RA)
245 raw3270_buffer_address(tp->view.dev, cp + 1,
246 tp->view.cols * (nr + 1));
247}
248
249/*
250 * Rebuild update list to print all lines.
251 */
252static void
253tty3270_rebuild_update(struct tty3270 *tp)
254{
255 struct string *s, *n;
256 int line, nr_up;
257
258 /*
259 * Throw away update list and create a new one,
260 * containing all lines that will fit on the screen.
261 */
262 list_for_each_entry_safe(s, n, &tp->update, update)
263 list_del_init(&s->update);
264 line = tp->view.rows - 3;
265 nr_up = tp->nr_up;
266 list_for_each_entry_reverse(s, &tp->lines, list) {
267 if (nr_up > 0) {
268 nr_up--;
269 continue;
270 }
271 tty3270_update_string(tp, s, line);
272 list_add(&s->update, &tp->update);
273 if (--line < 0)
274 break;
275 }
276 tp->update_flags |= TTY_UPDATE_LIST;
277}
278
279/*
280 * Alloc string for size bytes. If there is not enough room in
281 * freemem, free strings until there is room.
282 */
283static struct string *
284tty3270_alloc_string(struct tty3270 *tp, size_t size)
285{
286 struct string *s, *n;
287
288 s = alloc_string(&tp->freemem, size);
289 if (s)
290 return s;
291 list_for_each_entry_safe(s, n, &tp->lines, list) {
292 BUG_ON(tp->nr_lines <= tp->view.rows - 2);
293 list_del(&s->list);
294 if (!list_empty(&s->update))
295 list_del(&s->update);
296 tp->nr_lines--;
297 if (free_string(&tp->freemem, s) >= size)
298 break;
299 }
300 s = alloc_string(&tp->freemem, size);
301 BUG_ON(!s);
302 if (tp->nr_up != 0 &&
303 tp->nr_up + tp->view.rows - 2 >= tp->nr_lines) {
304 tp->nr_up = tp->nr_lines - tp->view.rows + 2;
305 tty3270_rebuild_update(tp);
306 tty3270_update_status(tp);
307 }
308 return s;
309}
310
311/*
312 * Add an empty line to the list.
313 */
314static void
315tty3270_blank_line(struct tty3270 *tp)
316{
317 static const unsigned char blueprint[] =
318 { TO_SBA, 0, 0, TO_SA, TAT_EXTHI, TAX_RESET,
319 TO_SA, TAT_COLOR, TAC_RESET, TO_RA, 0, 0, 0 };
320 struct string *s;
321
322 s = tty3270_alloc_string(tp, sizeof(blueprint));
323 memcpy(s->string, blueprint, sizeof(blueprint));
324 s->len = sizeof(blueprint);
325 list_add_tail(&s->list, &tp->lines);
326 tp->nr_lines++;
327 if (tp->nr_up != 0)
328 tp->nr_up++;
329}
330
331/*
332 * Write request completion callback.
333 */
334static void
335tty3270_write_callback(struct raw3270_request *rq, void *data)
336{
337 struct tty3270 *tp;
338
339 tp = (struct tty3270 *) rq->view;
340 if (rq->rc != 0) {
341 /* Write wasn't successfull. Refresh all. */
342 tty3270_rebuild_update(tp);
343 tp->update_flags = TTY_UPDATE_ALL;
344 tty3270_set_timer(tp, 1);
345 }
346 raw3270_request_reset(rq);
347 xchg(&tp->write, rq);
348}
349
350/*
351 * Update 3270 display.
352 */
353static void
354tty3270_update(struct tty3270 *tp)
355{
356 static char invalid_sba[2] = { 0xff, 0xff };
357 struct raw3270_request *wrq;
358 unsigned long updated;
359 struct string *s, *n;
360 char *sba, *str;
361 int rc, len;
362
363 wrq = xchg(&tp->write, 0);
364 if (!wrq) {
365 tty3270_set_timer(tp, 1);
366 return;
367 }
368
369 spin_lock(&tp->view.lock);
370 updated = 0;
371 if (tp->update_flags & TTY_UPDATE_ERASE) {
372 /* Use erase write alternate to erase display. */
373 raw3270_request_set_cmd(wrq, TC_EWRITEA);
374 updated |= TTY_UPDATE_ERASE;
375 } else
376 raw3270_request_set_cmd(wrq, TC_WRITE);
377
378 raw3270_request_add_data(wrq, &tp->wcc, 1);
379 tp->wcc = TW_NONE;
380
381 /*
382 * Update status line.
383 */
384 if (tp->update_flags & TTY_UPDATE_STATUS)
385 if (raw3270_request_add_data(wrq, tp->status->string,
386 tp->status->len) == 0)
387 updated |= TTY_UPDATE_STATUS;
388
389 /*
390 * Write input line.
391 */
392 if (tp->update_flags & TTY_UPDATE_INPUT)
393 if (raw3270_request_add_data(wrq, tp->prompt->string,
394 tp->prompt->len) == 0)
395 updated |= TTY_UPDATE_INPUT;
396
397 sba = invalid_sba;
398
399 if (tp->update_flags & TTY_UPDATE_LIST) {
400 /* Write strings in the update list to the screen. */
401 list_for_each_entry_safe(s, n, &tp->update, update) {
402 str = s->string;
403 len = s->len;
404 /*
405 * Skip TO_SBA at the start of the string if the
406 * last output position matches the start address
407 * of this line.
408 */
409 if (s->string[1] == sba[0] && s->string[2] == sba[1])
410 str += 3, len -= 3;
411 if (raw3270_request_add_data(wrq, str, len) != 0)
412 break;
413 list_del_init(&s->update);
414 sba = s->string + s->len - 3;
415 }
416 if (list_empty(&tp->update))
417 updated |= TTY_UPDATE_LIST;
418 }
419 wrq->callback = tty3270_write_callback;
420 rc = raw3270_start(&tp->view, wrq);
421 if (rc == 0) {
422 tp->update_flags &= ~updated;
423 if (tp->update_flags)
424 tty3270_set_timer(tp, 1);
425 } else {
426 raw3270_request_reset(wrq);
427 xchg(&tp->write, wrq);
428 }
429 spin_unlock(&tp->view.lock);
430 raw3270_put_view(&tp->view);
431}
432
433/*
434 * Command recalling.
435 */
436static void
437tty3270_rcl_add(struct tty3270 *tp, char *input, int len)
438{
439 struct string *s;
440
441 tp->rcl_walk = 0;
442 if (len <= 0)
443 return;
444 if (tp->rcl_nr >= tp->rcl_max) {
445 s = list_entry(tp->rcl_lines.next, struct string, list);
446 list_del(&s->list);
447 free_string(&tp->freemem, s);
448 tp->rcl_nr--;
449 }
450 s = tty3270_alloc_string(tp, len);
451 memcpy(s->string, input, len);
452 list_add_tail(&s->list, &tp->rcl_lines);
453 tp->rcl_nr++;
454}
455
456static void
457tty3270_rcl_backward(struct kbd_data *kbd)
458{
459 struct tty3270 *tp;
460 struct string *s;
461
462 tp = kbd->tty->driver_data;
463 spin_lock_bh(&tp->view.lock);
464 if (tp->inattr == TF_INPUT) {
465 if (tp->rcl_walk && tp->rcl_walk->prev != &tp->rcl_lines)
466 tp->rcl_walk = tp->rcl_walk->prev;
467 else if (!list_empty(&tp->rcl_lines))
468 tp->rcl_walk = tp->rcl_lines.prev;
469 s = tp->rcl_walk ?
470 list_entry(tp->rcl_walk, struct string, list) : 0;
471 if (tp->rcl_walk) {
472 s = list_entry(tp->rcl_walk, struct string, list);
473 tty3270_update_prompt(tp, s->string, s->len);
474 } else
475 tty3270_update_prompt(tp, 0, 0);
476 tty3270_set_timer(tp, 1);
477 }
478 spin_unlock_bh(&tp->view.lock);
479}
480
481/*
482 * Deactivate tty view.
483 */
484static void
485tty3270_exit_tty(struct kbd_data *kbd)
486{
487 struct tty3270 *tp;
488
489 tp = kbd->tty->driver_data;
490 raw3270_deactivate_view(&tp->view);
491}
492
493/*
494 * Scroll forward in history.
495 */
496static void
497tty3270_scroll_forward(struct kbd_data *kbd)
498{
499 struct tty3270 *tp;
500 int nr_up;
501
502 tp = kbd->tty->driver_data;
503 spin_lock_bh(&tp->view.lock);
504 nr_up = tp->nr_up - tp->view.rows + 2;
505 if (nr_up < 0)
506 nr_up = 0;
507 if (nr_up != tp->nr_up) {
508 tp->nr_up = nr_up;
509 tty3270_rebuild_update(tp);
510 tty3270_update_status(tp);
511 tty3270_set_timer(tp, 1);
512 }
513 spin_unlock_bh(&tp->view.lock);
514}
515
516/*
517 * Scroll backward in history.
518 */
519static void
520tty3270_scroll_backward(struct kbd_data *kbd)
521{
522 struct tty3270 *tp;
523 int nr_up;
524
525 tp = kbd->tty->driver_data;
526 spin_lock_bh(&tp->view.lock);
527 nr_up = tp->nr_up + tp->view.rows - 2;
528 if (nr_up + tp->view.rows - 2 > tp->nr_lines)
529 nr_up = tp->nr_lines - tp->view.rows + 2;
530 if (nr_up != tp->nr_up) {
531 tp->nr_up = nr_up;
532 tty3270_rebuild_update(tp);
533 tty3270_update_status(tp);
534 tty3270_set_timer(tp, 1);
535 }
536 spin_unlock_bh(&tp->view.lock);
537}
538
539/*
540 * Pass input line to tty.
541 */
542static void
543tty3270_read_tasklet(struct raw3270_request *rrq)
544{
545 static char kreset_data = TW_KR;
546 struct tty3270 *tp;
547 char *input;
548 int len;
549
550 tp = (struct tty3270 *) rrq->view;
551 spin_lock_bh(&tp->view.lock);
552 /*
553 * Two AID keys are special: For 0x7d (enter) the input line
554 * has to be emitted to the tty and for 0x6d the screen
555 * needs to be redrawn.
556 */
557 input = 0;
558 len = 0;
559 if (tp->input->string[0] == 0x7d) {
560 /* Enter: write input to tty. */
561 input = tp->input->string + 6;
562 len = tp->input->len - 6 - rrq->rescnt;
563 if (tp->inattr != TF_INPUTN)
564 tty3270_rcl_add(tp, input, len);
565 if (tp->nr_up > 0) {
566 tp->nr_up = 0;
567 tty3270_rebuild_update(tp);
568 tty3270_update_status(tp);
569 }
570 /* Clear input area. */
571 tty3270_update_prompt(tp, 0, 0);
572 tty3270_set_timer(tp, 1);
573 } else if (tp->input->string[0] == 0x6d) {
574 /* Display has been cleared. Redraw. */
575 tty3270_rebuild_update(tp);
576 tp->update_flags = TTY_UPDATE_ALL;
577 tty3270_set_timer(tp, 1);
578 }
579 spin_unlock_bh(&tp->view.lock);
580
581 /* Start keyboard reset command. */
582 raw3270_request_reset(tp->kreset);
583 raw3270_request_set_cmd(tp->kreset, TC_WRITE);
584 raw3270_request_add_data(tp->kreset, &kreset_data, 1);
585 raw3270_start(&tp->view, tp->kreset);
586
587 /* Emit input string. */
588 if (tp->tty) {
589 while (len-- > 0)
590 kbd_keycode(tp->kbd, *input++);
591 /* Emit keycode for AID byte. */
592 kbd_keycode(tp->kbd, 256 + tp->input->string[0]);
593 }
594
595 raw3270_request_reset(rrq);
596 xchg(&tp->read, rrq);
597 raw3270_put_view(&tp->view);
598}
599
600/*
601 * Read request completion callback.
602 */
603static void
604tty3270_read_callback(struct raw3270_request *rq, void *data)
605{
606 raw3270_get_view(rq->view);
607 /* Schedule tasklet to pass input to tty. */
608 tasklet_schedule(&((struct tty3270 *) rq->view)->readlet);
609}
610
611/*
612 * Issue a read request. Call with device lock.
613 */
614static void
615tty3270_issue_read(struct tty3270 *tp, int lock)
616{
617 struct raw3270_request *rrq;
618 int rc;
619
620 rrq = xchg(&tp->read, 0);
621 if (!rrq)
622 /* Read already scheduled. */
623 return;
624 rrq->callback = tty3270_read_callback;
625 rrq->callback_data = tp;
626 raw3270_request_set_cmd(rrq, TC_READMOD);
627 raw3270_request_set_data(rrq, tp->input->string, tp->input->len);
628 /* Issue the read modified request. */
629 if (lock) {
630 rc = raw3270_start(&tp->view, rrq);
631 } else
632 rc = raw3270_start_irq(&tp->view, rrq);
633 if (rc) {
634 raw3270_request_reset(rrq);
635 xchg(&tp->read, rrq);
636 }
637}
638
639/*
640 * Switch to the tty view.
641 */
642static int
643tty3270_activate(struct raw3270_view *view)
644{
645 struct tty3270 *tp;
646 unsigned long flags;
647
648 tp = (struct tty3270 *) view;
649 spin_lock_irqsave(&tp->view.lock, flags);
650 tp->nr_up = 0;
651 tty3270_rebuild_update(tp);
652 tty3270_update_status(tp);
653 tp->update_flags = TTY_UPDATE_ALL;
654 tty3270_set_timer(tp, 1);
655 spin_unlock_irqrestore(&tp->view.lock, flags);
656 start_tty(tp->tty);
657 return 0;
658}
659
660static void
661tty3270_deactivate(struct raw3270_view *view)
662{
663 struct tty3270 *tp;
664
665 tp = (struct tty3270 *) view;
666 if (tp && tp->tty)
667 stop_tty(tp->tty);
668}
669
670static int
671tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb)
672{
673 /* Handle ATTN. Schedule tasklet to read aid. */
674 if (irb->scsw.dstat & DEV_STAT_ATTENTION) {
675 if (!tp->throttle)
676 tty3270_issue_read(tp, 0);
677 else
678 tp->attn = 1;
679 }
680
681 if (rq) {
682 if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK)
683 rq->rc = -EIO;
684 else
685 /* Normal end. Copy residual count. */
686 rq->rescnt = irb->scsw.count;
687 }
688 return RAW3270_IO_DONE;
689}
690
691/*
692 * Allocate tty3270 structure.
693 */
694static struct tty3270 *
695tty3270_alloc_view(void)
696{
697 struct tty3270 *tp;
698 int pages;
699
700 tp = kmalloc(sizeof(struct tty3270),GFP_KERNEL);
701 if (!tp)
702 goto out_err;
703 memset(tp, 0, sizeof(struct tty3270));
704 tp->freemem_pages =
705 kmalloc(sizeof(void *) * TTY3270_STRING_PAGES, GFP_KERNEL);
706 if (!tp->freemem_pages)
707 goto out_tp;
708 INIT_LIST_HEAD(&tp->freemem);
709 init_timer(&tp->timer);
710 for (pages = 0; pages < TTY3270_STRING_PAGES; pages++) {
711 tp->freemem_pages[pages] = (void *)
712 __get_free_pages(GFP_KERNEL|GFP_DMA, 0);
713 if (!tp->freemem_pages[pages])
714 goto out_pages;
715 add_string_memory(&tp->freemem,
716 tp->freemem_pages[pages], PAGE_SIZE);
717 }
718 tp->write = raw3270_request_alloc(TTY3270_OUTPUT_BUFFER_SIZE);
719 if (!tp->write)
720 goto out_pages;
721 tp->read = raw3270_request_alloc(0);
722 if (!tp->read)
723 goto out_write;
724 tp->kreset = raw3270_request_alloc(1);
725 if (!tp->kreset)
726 goto out_read;
727 tp->kbd = kbd_alloc();
728 if (!tp->kbd)
729 goto out_reset;
730 return tp;
731
732out_reset:
733 raw3270_request_free(tp->kreset);
734out_read:
735 raw3270_request_free(tp->read);
736out_write:
737 raw3270_request_free(tp->write);
738out_pages:
739 while (pages--)
740 free_pages((unsigned long) tp->freemem_pages[pages], 0);
741 kfree(tp->freemem_pages);
742out_tp:
743 kfree(tp);
744out_err:
745 return ERR_PTR(-ENOMEM);
746}
747
748/*
749 * Free tty3270 structure.
750 */
751static void
752tty3270_free_view(struct tty3270 *tp)
753{
754 int pages;
755
756 kbd_free(tp->kbd);
757 raw3270_request_free(tp->kreset);
758 raw3270_request_free(tp->read);
759 raw3270_request_free(tp->write);
760 for (pages = 0; pages < TTY3270_STRING_PAGES; pages++)
761 free_pages((unsigned long) tp->freemem_pages[pages], 0);
762 kfree(tp->freemem_pages);
763 kfree(tp);
764}
765
766/*
767 * Allocate tty3270 screen.
768 */
769static int
770tty3270_alloc_screen(struct tty3270 *tp)
771{
772 unsigned long size;
773 int lines;
774
775 size = sizeof(struct tty3270_line) * (tp->view.rows - 2);
776 tp->screen = kmalloc(size, GFP_KERNEL);
777 if (!tp->screen)
778 goto out_err;
779 memset(tp->screen, 0, size);
780 for (lines = 0; lines < tp->view.rows - 2; lines++) {
781 size = sizeof(struct tty3270_cell) * tp->view.cols;
782 tp->screen[lines].cells = kmalloc(size, GFP_KERNEL);
783 if (!tp->screen[lines].cells)
784 goto out_screen;
785 memset(tp->screen[lines].cells, 0, size);
786 }
787 return 0;
788out_screen:
789 while (lines--)
790 kfree(tp->screen[lines].cells);
791 kfree(tp->screen);
792out_err:
793 return -ENOMEM;
794}
795
796/*
797 * Free tty3270 screen.
798 */
799static void
800tty3270_free_screen(struct tty3270 *tp)
801{
802 int lines;
803
804 for (lines = 0; lines < tp->view.rows - 2; lines++)
805 kfree(tp->screen[lines].cells);
806 kfree(tp->screen);
807}
808
809/*
810 * Unlink tty3270 data structure from tty.
811 */
812static void
813tty3270_release(struct raw3270_view *view)
814{
815 struct tty3270 *tp;
816 struct tty_struct *tty;
817
818 tp = (struct tty3270 *) view;
819 tty = tp->tty;
820 if (tty) {
821 tty->driver_data = 0;
822 tp->tty = tp->kbd->tty = 0;
823 tty_hangup(tty);
824 raw3270_put_view(&tp->view);
825 }
826}
827
828/*
829 * Free tty3270 data structure
830 */
831static void
832tty3270_free(struct raw3270_view *view)
833{
834 tty3270_free_screen((struct tty3270 *) view);
835 tty3270_free_view((struct tty3270 *) view);
836}
837
838/*
839 * Delayed freeing of tty3270 views.
840 */
841static void
842tty3270_del_views(void)
843{
844 struct tty3270 *tp;
845 int i;
846
847 for (i = 0; i < tty3270_max_index; i++) {
848 tp = (struct tty3270 *) raw3270_find_view(&tty3270_fn, i);
849 if (!IS_ERR(tp))
850 raw3270_del_view(&tp->view);
851 }
852}
853
854struct raw3270_fn tty3270_fn = {
855 .activate = tty3270_activate,
856 .deactivate = tty3270_deactivate,
857 .intv = (void *) tty3270_irq,
858 .release = tty3270_release,
859 .free = tty3270_free
860};
861
862/*
863 * This routine is called whenever a 3270 tty is opened.
864 */
865static int
866tty3270_open(struct tty_struct *tty, struct file * filp)
867{
868 struct tty3270 *tp;
869 int i, rc;
870
871 if (tty->count > 1)
872 return 0;
873 /* Check if the tty3270 is already there. */
874 tp = (struct tty3270 *) raw3270_find_view(&tty3270_fn, tty->index);
875 if (!IS_ERR(tp)) {
876 tty->driver_data = tp;
877 tty->winsize.ws_row = tp->view.rows - 2;
878 tty->winsize.ws_col = tp->view.cols;
879 tty->low_latency = 0;
880 tp->tty = tty;
881 tp->kbd->tty = tty;
882 tp->inattr = TF_INPUT;
883 return 0;
884 }
885 if (tty3270_max_index < tty->index + 1)
886 tty3270_max_index = tty->index + 1;
887
888 /* Quick exit if there is no device for tty->index. */
889 if (PTR_ERR(tp) == -ENODEV)
890 return -ENODEV;
891
892 /* Allocate tty3270 structure on first open. */
893 tp = tty3270_alloc_view();
894 if (IS_ERR(tp))
895 return PTR_ERR(tp);
896
897 INIT_LIST_HEAD(&tp->lines);
898 INIT_LIST_HEAD(&tp->update);
899 INIT_LIST_HEAD(&tp->rcl_lines);
900 tp->rcl_max = 20;
901 init_timer(&tp->timer);
902 tasklet_init(&tp->readlet,
903 (void (*)(unsigned long)) tty3270_read_tasklet,
904 (unsigned long) tp->read);
905
906 rc = raw3270_add_view(&tp->view, &tty3270_fn, tty->index);
907 if (rc) {
908 tty3270_free_view(tp);
909 return rc;
910 }
911
912 rc = tty3270_alloc_screen(tp);
913 if (rc) {
914 raw3270_del_view(&tp->view);
915 raw3270_put_view(&tp->view);
916 return rc;
917 }
918
919 tp->tty = tty;
920 tty->low_latency = 0;
921 tty->driver_data = tp;
922 tty->winsize.ws_row = tp->view.rows - 2;
923 tty->winsize.ws_col = tp->view.cols;
924
925 tty3270_create_prompt(tp);
926 tty3270_create_status(tp);
927 tty3270_update_status(tp);
928
929 /* Create blank line for every line in the tty output area. */
930 for (i = 0; i < tp->view.rows - 2; i++)
931 tty3270_blank_line(tp);
932
933 tp->kbd->tty = tty;
934 tp->kbd->fn_handler[KVAL(K_INCRCONSOLE)] = tty3270_exit_tty;
935 tp->kbd->fn_handler[KVAL(K_SCROLLBACK)] = tty3270_scroll_backward;
936 tp->kbd->fn_handler[KVAL(K_SCROLLFORW)] = tty3270_scroll_forward;
937 tp->kbd->fn_handler[KVAL(K_CONS)] = tty3270_rcl_backward;
938 kbd_ascebc(tp->kbd, tp->view.ascebc);
939
940 raw3270_activate_view(&tp->view);
941 return 0;
942}
943
944/*
945 * This routine is called when the 3270 tty is closed. We wait
946 * for the remaining request to be completed. Then we clean up.
947 */
948static void
949tty3270_close(struct tty_struct *tty, struct file * filp)
950{
951 struct tty3270 *tp;
952
953 if (tty->count > 1)
954 return;
955 tp = (struct tty3270 *) tty->driver_data;
956 if (tp) {
957 tty->driver_data = 0;
958 tp->tty = tp->kbd->tty = 0;
959 raw3270_put_view(&tp->view);
960 }
961}
962
963/*
964 * We always have room.
965 */
966static int
967tty3270_write_room(struct tty_struct *tty)
968{
969 return INT_MAX;
970}
971
972/*
973 * Insert character into the screen at the current position with the
974 * current color and highlight. This function does NOT do cursor movement.
975 */
976static void
977tty3270_put_character(struct tty3270 *tp, char ch)
978{
979 struct tty3270_line *line;
980 struct tty3270_cell *cell;
981
982 line = tp->screen + tp->cy;
983 if (line->len <= tp->cx) {
984 while (line->len < tp->cx) {
985 cell = line->cells + line->len;
986 cell->character = tp->view.ascebc[' '];
987 cell->highlight = tp->highlight;
988 cell->f_color = tp->f_color;
989 line->len++;
990 }
991 line->len++;
992 }
993 cell = line->cells + tp->cx;
994 cell->character = tp->view.ascebc[(unsigned int) ch];
995 cell->highlight = tp->highlight;
996 cell->f_color = tp->f_color;
997}
998
999/*
1000 * Convert a tty3270_line to a 3270 data fragment usable for output.
1001 */
1002static void
1003tty3270_convert_line(struct tty3270 *tp, int line_nr)
1004{
1005 struct tty3270_line *line;
1006 struct tty3270_cell *cell;
1007 struct string *s, *n;
1008 unsigned char highlight;
1009 unsigned char f_color;
1010 char *cp;
1011 int flen, i;
1012
1013 /* Determine how long the fragment will be. */
1014 flen = 3; /* Prefix (TO_SBA). */
1015 line = tp->screen + line_nr;
1016 flen += line->len;
1017 highlight = TAX_RESET;
1018 f_color = TAC_RESET;
1019 for (i = 0, cell = line->cells; i < line->len; i++, cell++) {
1020 if (cell->highlight != highlight) {
1021 flen += 3; /* TO_SA to switch highlight. */
1022 highlight = cell->highlight;
1023 }
1024 if (cell->f_color != f_color) {
1025 flen += 3; /* TO_SA to switch color. */
1026 f_color = cell->f_color;
1027 }
1028 }
1029 if (highlight != TAX_RESET)
1030 flen += 3; /* TO_SA to reset hightlight. */
1031 if (f_color != TAC_RESET)
1032 flen += 3; /* TO_SA to reset color. */
1033 if (line->len < tp->view.cols)
1034 flen += 4; /* Postfix (TO_RA). */
1035
1036 /* Find the line in the list. */
1037 i = tp->view.rows - 2 - line_nr;
1038 list_for_each_entry_reverse(s, &tp->lines, list)
1039 if (--i <= 0)
1040 break;
1041 /*
1042 * Check if the line needs to get reallocated.
1043 */
1044 if (s->len != flen) {
1045 /* Reallocate string. */
1046 n = tty3270_alloc_string(tp, flen);
1047 list_add(&n->list, &s->list);
1048 list_del_init(&s->list);
1049 if (!list_empty(&s->update))
1050 list_del_init(&s->update);
1051 free_string(&tp->freemem, s);
1052 s = n;
1053 }
1054
1055 /* Write 3270 data fragment. */
1056 cp = s->string;
1057 *cp++ = TO_SBA;
1058 *cp++ = 0;
1059 *cp++ = 0;
1060
1061 highlight = TAX_RESET;
1062 f_color = TAC_RESET;
1063 for (i = 0, cell = line->cells; i < line->len; i++, cell++) {
1064 if (cell->highlight != highlight) {
1065 *cp++ = TO_SA;
1066 *cp++ = TAT_EXTHI;
1067 *cp++ = cell->highlight;
1068 highlight = cell->highlight;
1069 }
1070 if (cell->f_color != f_color) {
1071 *cp++ = TO_SA;
1072 *cp++ = TAT_COLOR;
1073 *cp++ = cell->f_color;
1074 f_color = cell->f_color;
1075 }
1076 *cp++ = cell->character;
1077 }
1078 if (highlight != TAX_RESET) {
1079 *cp++ = TO_SA;
1080 *cp++ = TAT_EXTHI;
1081 *cp++ = TAX_RESET;
1082 }
1083 if (f_color != TAC_RESET) {
1084 *cp++ = TO_SA;
1085 *cp++ = TAT_COLOR;
1086 *cp++ = TAC_RESET;
1087 }
1088 if (line->len < tp->view.cols) {
1089 *cp++ = TO_RA;
1090 *cp++ = 0;
1091 *cp++ = 0;
1092 *cp++ = 0;
1093 }
1094
1095 if (tp->nr_up + line_nr < tp->view.rows - 2) {
1096 /* Line is currently visible on screen. */
1097 tty3270_update_string(tp, s, line_nr);
1098 /* Add line to update list. */
1099 if (list_empty(&s->update)) {
1100 list_add_tail(&s->update, &tp->update);
1101 tp->update_flags |= TTY_UPDATE_LIST;
1102 }
1103 }
1104}
1105
1106/*
1107 * Do carriage return.
1108 */
1109static void
1110tty3270_cr(struct tty3270 *tp)
1111{
1112 tp->cx = 0;
1113}
1114
1115/*
1116 * Do line feed.
1117 */
1118static void
1119tty3270_lf(struct tty3270 *tp)
1120{
1121 struct tty3270_line temp;
1122 int i;
1123
1124 tty3270_convert_line(tp, tp->cy);
1125 if (tp->cy < tp->view.rows - 3) {
1126 tp->cy++;
1127 return;
1128 }
1129 /* Last line just filled up. Add new, blank line. */
1130 tty3270_blank_line(tp);
1131 temp = tp->screen[0];
1132 temp.len = 0;
1133 for (i = 0; i < tp->view.rows - 3; i++)
1134 tp->screen[i] = tp->screen[i+1];
1135 tp->screen[tp->view.rows - 3] = temp;
1136 tty3270_rebuild_update(tp);
1137}
1138
1139static void
1140tty3270_ri(struct tty3270 *tp)
1141{
1142 if (tp->cy > 0) {
1143 tty3270_convert_line(tp, tp->cy);
1144 tp->cy--;
1145 }
1146}
1147
1148/*
1149 * Insert characters at current position.
1150 */
1151static void
1152tty3270_insert_characters(struct tty3270 *tp, int n)
1153{
1154 struct tty3270_line *line;
1155 int k;
1156
1157 line = tp->screen + tp->cy;
1158 while (line->len < tp->cx) {
1159 line->cells[line->len].character = tp->view.ascebc[' '];
1160 line->cells[line->len].highlight = TAX_RESET;
1161 line->cells[line->len].f_color = TAC_RESET;
1162 line->len++;
1163 }
1164 if (n > tp->view.cols - tp->cx)
1165 n = tp->view.cols - tp->cx;
1166 k = min_t(int, line->len - tp->cx, tp->view.cols - tp->cx - n);
1167 while (k--)
1168 line->cells[tp->cx + n + k] = line->cells[tp->cx + k];
1169 line->len += n;
1170 if (line->len > tp->view.cols)
1171 line->len = tp->view.cols;
1172 while (n-- > 0) {
1173 line->cells[tp->cx + n].character = tp->view.ascebc[' '];
1174 line->cells[tp->cx + n].highlight = tp->highlight;
1175 line->cells[tp->cx + n].f_color = tp->f_color;
1176 }
1177}
1178
1179/*
1180 * Delete characters at current position.
1181 */
1182static void
1183tty3270_delete_characters(struct tty3270 *tp, int n)
1184{
1185 struct tty3270_line *line;
1186 int i;
1187
1188 line = tp->screen + tp->cy;
1189 if (line->len <= tp->cx)
1190 return;
1191 if (line->len - tp->cx <= n) {
1192 line->len = tp->cx;
1193 return;
1194 }
1195 for (i = tp->cx; i + n < line->len; i++)
1196 line->cells[i] = line->cells[i + n];
1197 line->len -= n;
1198}
1199
1200/*
1201 * Erase characters at current position.
1202 */
1203static void
1204tty3270_erase_characters(struct tty3270 *tp, int n)
1205{
1206 struct tty3270_line *line;
1207 struct tty3270_cell *cell;
1208
1209 line = tp->screen + tp->cy;
1210 while (line->len > tp->cx && n-- > 0) {
1211 cell = line->cells + tp->cx++;
1212 cell->character = ' ';
1213 cell->highlight = TAX_RESET;
1214 cell->f_color = TAC_RESET;
1215 }
1216 tp->cx += n;
1217 tp->cx = min_t(int, tp->cx, tp->view.cols - 1);
1218}
1219
1220/*
1221 * Erase line, 3 different cases:
1222 * Esc [ 0 K Erase from current position to end of line inclusive
1223 * Esc [ 1 K Erase from beginning of line to current position inclusive
1224 * Esc [ 2 K Erase entire line (without moving cursor)
1225 */
1226static void
1227tty3270_erase_line(struct tty3270 *tp, int mode)
1228{
1229 struct tty3270_line *line;
1230 struct tty3270_cell *cell;
1231 int i;
1232
1233 line = tp->screen + tp->cy;
1234 if (mode == 0)
1235 line->len = tp->cx;
1236 else if (mode == 1) {
1237 for (i = 0; i < tp->cx; i++) {
1238 cell = line->cells + i;
1239 cell->character = ' ';
1240 cell->highlight = TAX_RESET;
1241 cell->f_color = TAC_RESET;
1242 }
1243 if (line->len <= tp->cx)
1244 line->len = tp->cx + 1;
1245 } else if (mode == 2)
1246 line->len = 0;
1247 tty3270_convert_line(tp, tp->cy);
1248}
1249
1250/*
1251 * Erase display, 3 different cases:
1252 * Esc [ 0 J Erase from current position to bottom of screen inclusive
1253 * Esc [ 1 J Erase from top of screen to current position inclusive
1254 * Esc [ 2 J Erase entire screen (without moving the cursor)
1255 */
1256static void
1257tty3270_erase_display(struct tty3270 *tp, int mode)
1258{
1259 int i;
1260
1261 if (mode == 0) {
1262 tty3270_erase_line(tp, 0);
1263 for (i = tp->cy + 1; i < tp->view.rows - 2; i++) {
1264 tp->screen[i].len = 0;
1265 tty3270_convert_line(tp, i);
1266 }
1267 } else if (mode == 1) {
1268 for (i = 0; i < tp->cy; i++) {
1269 tp->screen[i].len = 0;
1270 tty3270_convert_line(tp, i);
1271 }
1272 tty3270_erase_line(tp, 1);
1273 } else if (mode == 2) {
1274 for (i = 0; i < tp->view.rows - 2; i++) {
1275 tp->screen[i].len = 0;
1276 tty3270_convert_line(tp, i);
1277 }
1278 }
1279 tty3270_rebuild_update(tp);
1280}
1281
1282/*
1283 * Set attributes found in an escape sequence.
1284 * Esc [ <attr> ; <attr> ; ... m
1285 */
1286static void
1287tty3270_set_attributes(struct tty3270 *tp)
1288{
1289 static unsigned char f_colors[] = {
1290 TAC_DEFAULT, TAC_RED, TAC_GREEN, TAC_YELLOW, TAC_BLUE,
1291 TAC_PINK, TAC_TURQ, TAC_WHITE, 0, TAC_DEFAULT
1292 };
1293 int i, attr;
1294
1295 for (i = 0; i <= tp->esc_npar; i++) {
1296 attr = tp->esc_par[i];
1297 switch (attr) {
1298 case 0: /* Reset */
1299 tp->highlight = TAX_RESET;
1300 tp->f_color = TAC_RESET;
1301 break;
1302 /* Highlight. */
1303 case 4: /* Start underlining. */
1304 tp->highlight = TAX_UNDER;
1305 break;
1306 case 5: /* Start blink. */
1307 tp->highlight = TAX_BLINK;
1308 break;
1309 case 7: /* Start reverse. */
1310 tp->highlight = TAX_REVER;
1311 break;
1312 case 24: /* End underlining */
1313 if (tp->highlight == TAX_UNDER)
1314 tp->highlight = TAX_RESET;
1315 break;
1316 case 25: /* End blink. */
1317 if (tp->highlight == TAX_BLINK)
1318 tp->highlight = TAX_RESET;
1319 break;
1320 case 27: /* End reverse. */
1321 if (tp->highlight == TAX_REVER)
1322 tp->highlight = TAX_RESET;
1323 break;
1324 /* Foreground color. */
1325 case 30: /* Black */
1326 case 31: /* Red */
1327 case 32: /* Green */
1328 case 33: /* Yellow */
1329 case 34: /* Blue */
1330 case 35: /* Magenta */
1331 case 36: /* Cyan */
1332 case 37: /* White */
1333 case 39: /* Black */
1334 tp->f_color = f_colors[attr - 30];
1335 break;
1336 }
1337 }
1338}
1339
1340static inline int
1341tty3270_getpar(struct tty3270 *tp, int ix)
1342{
1343 return (tp->esc_par[ix] > 0) ? tp->esc_par[ix] : 1;
1344}
1345
1346static void
1347tty3270_goto_xy(struct tty3270 *tp, int cx, int cy)
1348{
1349 tp->cx = min_t(int, tp->view.cols - 1, max_t(int, 0, cx));
1350 cy = min_t(int, tp->view.rows - 3, max_t(int, 0, cy));
1351 if (cy != tp->cy) {
1352 tty3270_convert_line(tp, tp->cy);
1353 tp->cy = cy;
1354 }
1355}
1356
1357/*
1358 * Process escape sequences. Known sequences:
1359 * Esc 7 Save Cursor Position
1360 * Esc 8 Restore Cursor Position
1361 * Esc [ Pn ; Pn ; .. m Set attributes
1362 * Esc [ Pn ; Pn H Cursor Position
1363 * Esc [ Pn ; Pn f Cursor Position
1364 * Esc [ Pn A Cursor Up
1365 * Esc [ Pn B Cursor Down
1366 * Esc [ Pn C Cursor Forward
1367 * Esc [ Pn D Cursor Backward
1368 * Esc [ Pn G Cursor Horizontal Absolute
1369 * Esc [ Pn X Erase Characters
1370 * Esc [ Ps J Erase in Display
1371 * Esc [ Ps K Erase in Line
1372 * // FIXME: add all the new ones.
1373 *
1374 * Pn is a numeric parameter, a string of zero or more decimal digits.
1375 * Ps is a selective parameter.
1376 */
1377static void
1378tty3270_escape_sequence(struct tty3270 *tp, char ch)
1379{
1380 enum { ESnormal, ESesc, ESsquare, ESgetpars };
1381
1382 if (tp->esc_state == ESnormal) {
1383 if (ch == 0x1b)
1384 /* Starting new escape sequence. */
1385 tp->esc_state = ESesc;
1386 return;
1387 }
1388 if (tp->esc_state == ESesc) {
1389 tp->esc_state = ESnormal;
1390 switch (ch) {
1391 case '[':
1392 tp->esc_state = ESsquare;
1393 break;
1394 case 'E':
1395 tty3270_cr(tp);
1396 tty3270_lf(tp);
1397 break;
1398 case 'M':
1399 tty3270_ri(tp);
1400 break;
1401 case 'D':
1402 tty3270_lf(tp);
1403 break;
1404 case 'Z': /* Respond ID. */
1405 kbd_puts_queue(tp->tty, "\033[?6c");
1406 break;
1407 case '7': /* Save cursor position. */
1408 tp->saved_cx = tp->cx;
1409 tp->saved_cy = tp->cy;
1410 tp->saved_highlight = tp->highlight;
1411 tp->saved_f_color = tp->f_color;
1412 break;
1413 case '8': /* Restore cursor position. */
1414 tty3270_convert_line(tp, tp->cy);
1415 tty3270_goto_xy(tp, tp->saved_cx, tp->saved_cy);
1416 tp->highlight = tp->saved_highlight;
1417 tp->f_color = tp->saved_f_color;
1418 break;
1419 case 'c': /* Reset terminal. */
1420 tp->cx = tp->saved_cx = 0;
1421 tp->cy = tp->saved_cy = 0;
1422 tp->highlight = tp->saved_highlight = TAX_RESET;
1423 tp->f_color = tp->saved_f_color = TAC_RESET;
1424 tty3270_erase_display(tp, 2);
1425 break;
1426 }
1427 return;
1428 }
1429 if (tp->esc_state == ESsquare) {
1430 tp->esc_state = ESgetpars;
1431 memset(tp->esc_par, 0, sizeof(tp->esc_par));
1432 tp->esc_npar = 0;
1433 tp->esc_ques = (ch == '?');
1434 if (tp->esc_ques)
1435 return;
1436 }
1437 if (tp->esc_state == ESgetpars) {
1438 if (ch == ';' && tp->esc_npar < ESCAPE_NPAR - 1) {
1439 tp->esc_npar++;
1440 return;
1441 }
1442 if (ch >= '0' && ch <= '9') {
1443 tp->esc_par[tp->esc_npar] *= 10;
1444 tp->esc_par[tp->esc_npar] += ch - '0';
1445 return;
1446 }
1447 }
1448 tp->esc_state = ESnormal;
1449 if (ch == 'n' && !tp->esc_ques) {
1450 if (tp->esc_par[0] == 5) /* Status report. */
1451 kbd_puts_queue(tp->tty, "\033[0n");
1452 else if (tp->esc_par[0] == 6) { /* Cursor report. */
1453 char buf[40];
1454 sprintf(buf, "\033[%d;%dR", tp->cy + 1, tp->cx + 1);
1455 kbd_puts_queue(tp->tty, buf);
1456 }
1457 return;
1458 }
1459 if (tp->esc_ques)
1460 return;
1461 switch (ch) {
1462 case 'm':
1463 tty3270_set_attributes(tp);
1464 break;
1465 case 'H': /* Set cursor position. */
1466 case 'f':
1467 tty3270_goto_xy(tp, tty3270_getpar(tp, 1) - 1,
1468 tty3270_getpar(tp, 0) - 1);
1469 break;
1470 case 'd': /* Set y position. */
1471 tty3270_goto_xy(tp, tp->cx, tty3270_getpar(tp, 0) - 1);
1472 break;
1473 case 'A': /* Cursor up. */
1474 case 'F':
1475 tty3270_goto_xy(tp, tp->cx, tp->cy - tty3270_getpar(tp, 0));
1476 break;
1477 case 'B': /* Cursor down. */
1478 case 'e':
1479 case 'E':
1480 tty3270_goto_xy(tp, tp->cx, tp->cy + tty3270_getpar(tp, 0));
1481 break;
1482 case 'C': /* Cursor forward. */
1483 case 'a':
1484 tty3270_goto_xy(tp, tp->cx + tty3270_getpar(tp, 0), tp->cy);
1485 break;
1486 case 'D': /* Cursor backward. */
1487 tty3270_goto_xy(tp, tp->cx - tty3270_getpar(tp, 0), tp->cy);
1488 break;
1489 case 'G': /* Set x position. */
1490 case '`':
1491 tty3270_goto_xy(tp, tty3270_getpar(tp, 0), tp->cy);
1492 break;
1493 case 'X': /* Erase Characters. */
1494 tty3270_erase_characters(tp, tty3270_getpar(tp, 0));
1495 break;
1496 case 'J': /* Erase display. */
1497 tty3270_erase_display(tp, tp->esc_par[0]);
1498 break;
1499 case 'K': /* Erase line. */
1500 tty3270_erase_line(tp, tp->esc_par[0]);
1501 break;
1502 case 'P': /* Delete characters. */
1503 tty3270_delete_characters(tp, tty3270_getpar(tp, 0));
1504 break;
1505 case '@': /* Insert characters. */
1506 tty3270_insert_characters(tp, tty3270_getpar(tp, 0));
1507 break;
1508 case 's': /* Save cursor position. */
1509 tp->saved_cx = tp->cx;
1510 tp->saved_cy = tp->cy;
1511 tp->saved_highlight = tp->highlight;
1512 tp->saved_f_color = tp->f_color;
1513 break;
1514 case 'u': /* Restore cursor position. */
1515 tty3270_convert_line(tp, tp->cy);
1516 tty3270_goto_xy(tp, tp->saved_cx, tp->saved_cy);
1517 tp->highlight = tp->saved_highlight;
1518 tp->f_color = tp->saved_f_color;
1519 break;
1520 }
1521}
1522
1523/*
1524 * String write routine for 3270 ttys
1525 */
1526static void
1527tty3270_do_write(struct tty3270 *tp, const unsigned char *buf, int count)
1528{
1529 int i_msg, i;
1530
1531 spin_lock_bh(&tp->view.lock);
1532 for (i_msg = 0; !tp->tty->stopped && i_msg < count; i_msg++) {
1533 if (tp->esc_state != 0) {
1534 /* Continue escape sequence. */
1535 tty3270_escape_sequence(tp, buf[i_msg]);
1536 continue;
1537 }
1538
1539 switch (buf[i_msg]) {
1540 case 0x07: /* '\a' -- Alarm */
1541 tp->wcc |= TW_PLUSALARM;
1542 break;
1543 case 0x08: /* Backspace. */
1544 if (tp->cx > 0) {
1545 tp->cx--;
1546 tty3270_put_character(tp, ' ');
1547 }
1548 break;
1549 case 0x09: /* '\t' -- Tabulate */
1550 for (i = tp->cx % 8; i < 8; i++) {
1551 if (tp->cx >= tp->view.cols) {
1552 tty3270_cr(tp);
1553 tty3270_lf(tp);
1554 break;
1555 }
1556 tty3270_put_character(tp, ' ');
1557 tp->cx++;
1558 }
1559 break;
1560 case 0x0a: /* '\n' -- New Line */
1561 tty3270_cr(tp);
1562 tty3270_lf(tp);
1563 break;
1564 case 0x0c: /* '\f' -- Form Feed */
1565 tty3270_erase_display(tp, 2);
1566 tp->cx = tp->cy = 0;
1567 break;
1568 case 0x0d: /* '\r' -- Carriage Return */
1569 tp->cx = 0;
1570 break;
1571 case 0x0f: /* SuSE "exit alternate mode" */
1572 break;
1573 case 0x1b: /* Start escape sequence. */
1574 tty3270_escape_sequence(tp, buf[i_msg]);
1575 break;
1576 default: /* Insert normal character. */
1577 if (tp->cx >= tp->view.cols) {
1578 tty3270_cr(tp);
1579 tty3270_lf(tp);
1580 }
1581 tty3270_put_character(tp, buf[i_msg]);
1582 tp->cx++;
1583 break;
1584 }
1585 }
1586 /* Convert current line to 3270 data fragment. */
1587 tty3270_convert_line(tp, tp->cy);
1588
1589 /* Setup timer to update display after 1/10 second */
1590 if (!timer_pending(&tp->timer))
1591 tty3270_set_timer(tp, HZ/10);
1592
1593 spin_unlock_bh(&tp->view.lock);
1594}
1595
1596/*
1597 * String write routine for 3270 ttys
1598 */
1599static int
1600tty3270_write(struct tty_struct * tty,
1601 const unsigned char *buf, int count)
1602{
1603 struct tty3270 *tp;
1604
1605 tp = tty->driver_data;
1606 if (!tp)
1607 return 0;
1608 if (tp->char_count > 0) {
1609 tty3270_do_write(tp, tp->char_buf, tp->char_count);
1610 tp->char_count = 0;
1611 }
1612 tty3270_do_write(tp, buf, count);
1613 return count;
1614}
1615
1616/*
1617 * Put single characters to the ttys character buffer
1618 */
1619static void
1620tty3270_put_char(struct tty_struct *tty, unsigned char ch)
1621{
1622 struct tty3270 *tp;
1623
1624 tp = tty->driver_data;
1625 if (!tp)
1626 return;
1627 if (tp->char_count < TTY3270_CHAR_BUF_SIZE)
1628 tp->char_buf[tp->char_count++] = ch;
1629}
1630
1631/*
1632 * Flush all characters from the ttys characeter buffer put there
1633 * by tty3270_put_char.
1634 */
1635static void
1636tty3270_flush_chars(struct tty_struct *tty)
1637{
1638 struct tty3270 *tp;
1639
1640 tp = tty->driver_data;
1641 if (!tp)
1642 return;
1643 if (tp->char_count > 0) {
1644 tty3270_do_write(tp, tp->char_buf, tp->char_count);
1645 tp->char_count = 0;
1646 }
1647}
1648
1649/*
1650 * Returns the number of characters in the output buffer. This is
1651 * used in tty_wait_until_sent to wait until all characters have
1652 * appeared on the screen.
1653 */
1654static int
1655tty3270_chars_in_buffer(struct tty_struct *tty)
1656{
1657 return 0;
1658}
1659
1660static void
1661tty3270_flush_buffer(struct tty_struct *tty)
1662{
1663}
1664
1665/*
1666 * Check for visible/invisible input switches
1667 */
1668static void
1669tty3270_set_termios(struct tty_struct *tty, struct termios *old)
1670{
1671 struct tty3270 *tp;
1672 int new;
1673
1674 tp = tty->driver_data;
1675 if (!tp)
1676 return;
1677 spin_lock_bh(&tp->view.lock);
1678 if (L_ICANON(tty)) {
1679 new = L_ECHO(tty) ? TF_INPUT: TF_INPUTN;
1680 if (new != tp->inattr) {
1681 tp->inattr = new;
1682 tty3270_update_prompt(tp, 0, 0);
1683 tty3270_set_timer(tp, 1);
1684 }
1685 }
1686 spin_unlock_bh(&tp->view.lock);
1687}
1688
1689/*
1690 * Disable reading from a 3270 tty
1691 */
1692static void
1693tty3270_throttle(struct tty_struct * tty)
1694{
1695 struct tty3270 *tp;
1696
1697 tp = tty->driver_data;
1698 if (!tp)
1699 return;
1700 tp->throttle = 1;
1701}
1702
1703/*
1704 * Enable reading from a 3270 tty
1705 */
1706static void
1707tty3270_unthrottle(struct tty_struct * tty)
1708{
1709 struct tty3270 *tp;
1710
1711 tp = tty->driver_data;
1712 if (!tp)
1713 return;
1714 tp->throttle = 0;
1715 if (tp->attn)
1716 tty3270_issue_read(tp, 1);
1717}
1718
1719/*
1720 * Hang up the tty device.
1721 */
1722static void
1723tty3270_hangup(struct tty_struct *tty)
1724{
1725 // FIXME: implement
1726}
1727
1728static void
1729tty3270_wait_until_sent(struct tty_struct *tty, int timeout)
1730{
1731}
1732
1733static int
1734tty3270_ioctl(struct tty_struct *tty, struct file *file,
1735 unsigned int cmd, unsigned long arg)
1736{
1737 struct tty3270 *tp;
1738
1739 tp = tty->driver_data;
1740 if (!tp)
1741 return -ENODEV;
1742 if (tty->flags & (1 << TTY_IO_ERROR))
1743 return -EIO;
1744 return kbd_ioctl(tp->kbd, file, cmd, arg);
1745}
1746
1747static struct tty_operations tty3270_ops = {
1748 .open = tty3270_open,
1749 .close = tty3270_close,
1750 .write = tty3270_write,
1751 .put_char = tty3270_put_char,
1752 .flush_chars = tty3270_flush_chars,
1753 .write_room = tty3270_write_room,
1754 .chars_in_buffer = tty3270_chars_in_buffer,
1755 .flush_buffer = tty3270_flush_buffer,
1756 .throttle = tty3270_throttle,
1757 .unthrottle = tty3270_unthrottle,
1758 .hangup = tty3270_hangup,
1759 .wait_until_sent = tty3270_wait_until_sent,
1760 .ioctl = tty3270_ioctl,
1761 .set_termios = tty3270_set_termios
1762};
1763
1764void
1765tty3270_notifier(int index, int active)
1766{
1767 if (active)
1768 tty_register_device(tty3270_driver, index, 0);
1769 else
1770 tty_unregister_device(tty3270_driver, index);
1771}
1772
1773/*
1774 * 3270 tty registration code called from tty_init().
1775 * Most kernel services (incl. kmalloc) are available at this poimt.
1776 */
1777int __init
1778tty3270_init(void)
1779{
1780 struct tty_driver *driver;
1781 int ret;
1782
1783 driver = alloc_tty_driver(256);
1784 if (!driver)
1785 return -ENOMEM;
1786
1787 /*
1788 * Initialize the tty_driver structure
1789 * Entries in tty3270_driver that are NOT initialized:
1790 * proc_entry, set_termios, flush_buffer, set_ldisc, write_proc
1791 */
1792 driver->owner = THIS_MODULE;
1793 driver->devfs_name = "ttyTUB/";
1794 driver->driver_name = "ttyTUB";
1795 driver->name = "ttyTUB";
1796 driver->major = IBM_TTY3270_MAJOR;
1797 driver->type = TTY_DRIVER_TYPE_SYSTEM;
1798 driver->subtype = SYSTEM_TYPE_TTY;
1799 driver->init_termios = tty_std_termios;
1800 driver->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_NO_DEVFS;
1801 tty_set_operations(driver, &tty3270_ops);
1802 ret = tty_register_driver(driver);
1803 if (ret) {
1804 printk(KERN_ERR "tty3270 registration failed with %d\n", ret);
1805 put_tty_driver(driver);
1806 return ret;
1807 }
1808 tty3270_driver = driver;
1809 ret = raw3270_register_notifier(tty3270_notifier);
1810 if (ret) {
1811 printk(KERN_ERR "tty3270 notifier registration failed "
1812 "with %d\n", ret);
1813 put_tty_driver(driver);
1814 return ret;
1815
1816 }
1817 return 0;
1818}
1819
1820static void __exit
1821tty3270_exit(void)
1822{
1823 struct tty_driver *driver;
1824
1825 raw3270_unregister_notifier(tty3270_notifier);
1826 driver = tty3270_driver;
1827 tty3270_driver = 0;
1828 tty_unregister_driver(driver);
1829 tty3270_del_views();
1830}
1831
1832MODULE_LICENSE("GPL");
1833MODULE_ALIAS_CHARDEV_MAJOR(IBM_TTY3270_MAJOR);
1834
1835module_init(tty3270_init);
1836module_exit(tty3270_exit);
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
new file mode 100644
index 000000000000..edf50d2bd10b
--- /dev/null
+++ b/drivers/s390/char/vmlogrdr.c
@@ -0,0 +1,920 @@
1/*
2 * drivers/s390/char/vmlogrdr.c
3 * character device driver for reading z/VM system service records
4 *
5 *
6 * Copyright (C) 2004 IBM Corporation
7 * character device driver for reading z/VM system service records,
8 * Version 1.0
9 * Author(s): Xenia Tkatschow <xenia@us.ibm.com>
10 * Stefan Weinhuber <wein@de.ibm.com>
11 *
12 */
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/errno.h>
16#include <linux/types.h>
17#include <linux/interrupt.h>
18#include <linux/spinlock.h>
19#include <asm/atomic.h>
20#include <asm/uaccess.h>
21#include <asm/cpcmd.h>
22#include <asm/debug.h>
23#include <asm/ebcdic.h>
24#include "../net/iucv.h"
25#include <linux/kmod.h>
26#include <linux/cdev.h>
27#include <linux/device.h>
28#include <linux/string.h>
29
30
31
32MODULE_AUTHOR
33 ("(C) 2004 IBM Corporation by Xenia Tkatschow (xenia@us.ibm.com)\n"
34 " Stefan Weinhuber (wein@de.ibm.com)");
35MODULE_DESCRIPTION ("Character device driver for reading z/VM "
36 "system service records.");
37MODULE_LICENSE("GPL");
38
39
40/*
41 * The size of the buffer for iucv data transfer is one page,
42 * but in addition to the data we read from iucv we also
43 * place an integer and some characters into that buffer,
44 * so the maximum size for record data is a little less then
45 * one page.
46 */
47#define NET_BUFFER_SIZE (PAGE_SIZE - sizeof(int) - sizeof(FENCE))
48
49/*
50 * The elements that are concurrently accessed by bottom halves are
51 * connection_established, iucv_path_severed, local_interrupt_buffer
52 * and receive_ready. The first three can be protected by
53 * priv_lock. receive_ready is atomic, so it can be incremented and
54 * decremented without holding a lock.
55 * The variable dev_in_use needs to be protected by the lock, since
56 * it's a flag used by open to make sure that the device is opened only
57 * by one user at the same time.
58 */
59struct vmlogrdr_priv_t {
60 char system_service[8];
61 char internal_name[8];
62 char recording_name[8];
63 u16 pathid;
64 int connection_established;
65 int iucv_path_severed;
66 iucv_MessagePending local_interrupt_buffer;
67 atomic_t receive_ready;
68 iucv_handle_t iucv_handle;
69 int minor_num;
70 char * buffer;
71 char * current_position;
72 int remaining;
73 ulong residual_length;
74 int buffer_free;
75 int dev_in_use; /* 1: already opened, 0: not opened*/
76 spinlock_t priv_lock;
77 struct device *device;
78 struct class_device *class_device;
79 int autorecording;
80 int autopurge;
81};
82
83
84/*
85 * File operation structure for vmlogrdr devices
86 */
87static int vmlogrdr_open(struct inode *, struct file *);
88static int vmlogrdr_release(struct inode *, struct file *);
89static ssize_t vmlogrdr_read (struct file *filp, char *data, size_t count,
90 loff_t * ppos);
91
92static struct file_operations vmlogrdr_fops = {
93 .owner = THIS_MODULE,
94 .open = vmlogrdr_open,
95 .release = vmlogrdr_release,
96 .read = vmlogrdr_read,
97};
98
99
100static u8 iucvMagic[16] = {
101 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
102 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
103};
104
105
106static u8 mask[] = {
107 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
108 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
109 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
110 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
111};
112
113
114static u8 iucv_host[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
115
116
117static void
118vmlogrdr_iucv_ConnectionComplete(iucv_ConnectionComplete *eib, void *pgm_data);
119static void
120vmlogrdr_iucv_ConnectionSevered(iucv_ConnectionSevered *eib, void *pgm_data);
121static void
122vmlogrdr_iucv_MessagePending(iucv_MessagePending *eib, void *pgm_data);
123
124
125static iucv_interrupt_ops_t vmlogrdr_iucvops = {
126 .ConnectionComplete = vmlogrdr_iucv_ConnectionComplete,
127 .ConnectionSevered = vmlogrdr_iucv_ConnectionSevered,
128 .MessagePending = vmlogrdr_iucv_MessagePending,
129};
130
131
132DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue);
133DECLARE_WAIT_QUEUE_HEAD(read_wait_queue);
134
135/*
136 * pointer to system service private structure
137 * minor number 0 --> logrec
138 * minor number 1 --> account
139 * minor number 2 --> symptom
140 */
141
142static struct vmlogrdr_priv_t sys_ser[] = {
143 { .system_service = "*LOGREC ",
144 .internal_name = "logrec",
145 .recording_name = "EREP",
146 .minor_num = 0,
147 .buffer_free = 1,
148 .priv_lock = SPIN_LOCK_UNLOCKED,
149 .autorecording = 1,
150 .autopurge = 1,
151 },
152 { .system_service = "*ACCOUNT",
153 .internal_name = "account",
154 .recording_name = "ACCOUNT",
155 .minor_num = 1,
156 .buffer_free = 1,
157 .priv_lock = SPIN_LOCK_UNLOCKED,
158 .autorecording = 1,
159 .autopurge = 1,
160 },
161 { .system_service = "*SYMPTOM",
162 .internal_name = "symptom",
163 .recording_name = "SYMPTOM",
164 .minor_num = 2,
165 .buffer_free = 1,
166 .priv_lock = SPIN_LOCK_UNLOCKED,
167 .autorecording = 1,
168 .autopurge = 1,
169 }
170};
171
172#define MAXMINOR (sizeof(sys_ser)/sizeof(struct vmlogrdr_priv_t))
173
174static char FENCE[] = {"EOR"};
175static int vmlogrdr_major = 0;
176static struct cdev *vmlogrdr_cdev = NULL;
177static int recording_class_AB;
178
179
180static void
181vmlogrdr_iucv_ConnectionComplete (iucv_ConnectionComplete * eib,
182 void * pgm_data)
183{
184 struct vmlogrdr_priv_t * logptr = pgm_data;
185 spin_lock(&logptr->priv_lock);
186 logptr->connection_established = 1;
187 spin_unlock(&logptr->priv_lock);
188 wake_up(&conn_wait_queue);
189 return;
190}
191
192
193static void
194vmlogrdr_iucv_ConnectionSevered (iucv_ConnectionSevered * eib, void * pgm_data)
195{
196 u8 reason = (u8) eib->ipuser[8];
197 struct vmlogrdr_priv_t * logptr = pgm_data;
198
199 printk (KERN_ERR "vmlogrdr: connection severed with"
200 " reason %i\n", reason);
201
202 spin_lock(&logptr->priv_lock);
203 logptr->connection_established = 0;
204 logptr->iucv_path_severed = 1;
205 spin_unlock(&logptr->priv_lock);
206
207 wake_up(&conn_wait_queue);
208 /* just in case we're sleeping waiting for a record */
209 wake_up_interruptible(&read_wait_queue);
210}
211
212
213static void
214vmlogrdr_iucv_MessagePending (iucv_MessagePending * eib, void * pgm_data)
215{
216 struct vmlogrdr_priv_t * logptr = pgm_data;
217
218 /*
219 * This function is the bottom half so it should be quick.
220 * Copy the external interrupt data into our local eib and increment
221 * the usage count
222 */
223 spin_lock(&logptr->priv_lock);
224 memcpy(&(logptr->local_interrupt_buffer), eib, sizeof(*eib));
225 atomic_inc(&logptr->receive_ready);
226 spin_unlock(&logptr->priv_lock);
227 wake_up_interruptible(&read_wait_queue);
228}
229
230
231static int
232vmlogrdr_get_recording_class_AB(void) {
233 char cp_command[]="QUERY COMMAND RECORDING ";
234 char cp_response[80];
235 char *tail;
236 int len,i;
237
238 printk (KERN_DEBUG "vmlogrdr: query command: %s\n", cp_command);
239 cpcmd(cp_command, cp_response, sizeof(cp_response));
240 printk (KERN_DEBUG "vmlogrdr: response: %s", cp_response);
241 len = strnlen(cp_response,sizeof(cp_response));
242 // now the parsing
243 tail=strnchr(cp_response,len,'=');
244 if (!tail)
245 return 0;
246 tail++;
247 if (!strncmp("ANY",tail,3))
248 return 1;
249 if (!strncmp("NONE",tail,4))
250 return 0;
251 /*
252 * expect comma separated list of classes here, if one of them
253 * is A or B return 1 otherwise 0
254 */
255 for (i=tail-cp_response; i<len; i++)
256 if ( cp_response[i]=='A' || cp_response[i]=='B' )
257 return 1;
258 return 0;
259}
260
261
262static int
263vmlogrdr_recording(struct vmlogrdr_priv_t * logptr, int action, int purge) {
264
265 char cp_command[80];
266 char cp_response[160];
267 char *onoff, *qid_string;
268
269 memset(cp_command, 0x00, sizeof(cp_command));
270 memset(cp_response, 0x00, sizeof(cp_response));
271
272 onoff = ((action == 1) ? "ON" : "OFF");
273 qid_string = ((recording_class_AB == 1) ? " QID * " : "");
274
275 /*
276 * The recording commands needs to be called with option QID
277 * for guests that have previlege classes A or B.
278 * Purging has to be done as separate step, because recording
279 * can't be switched on as long as records are on the queue.
280 * Doing both at the same time doesn't work.
281 */
282
283 if (purge) {
284 snprintf(cp_command, sizeof(cp_command),
285 "RECORDING %s PURGE %s",
286 logptr->recording_name,
287 qid_string);
288
289 printk (KERN_DEBUG "vmlogrdr: recording command: %s\n",
290 cp_command);
291 cpcmd(cp_command, cp_response, sizeof(cp_response));
292 printk (KERN_DEBUG "vmlogrdr: recording response: %s",
293 cp_response);
294 }
295
296 memset(cp_command, 0x00, sizeof(cp_command));
297 memset(cp_response, 0x00, sizeof(cp_response));
298 snprintf(cp_command, sizeof(cp_command), "RECORDING %s %s %s",
299 logptr->recording_name,
300 onoff,
301 qid_string);
302
303 printk (KERN_DEBUG "vmlogrdr: recording command: %s\n", cp_command);
304 cpcmd(cp_command, cp_response, sizeof(cp_response));
305 printk (KERN_DEBUG "vmlogrdr: recording response: %s",
306 cp_response);
307 /* The recording command will usually answer with 'Command complete'
308 * on success, but when the specific service was never connected
309 * before then there might be an additional informational message
310 * 'HCPCRC8072I Recording entry not found' before the
311 * 'Command complete'. So I use strstr rather then the strncmp.
312 */
313 if (strstr(cp_response,"Command complete"))
314 return 0;
315 else
316 return -EIO;
317
318}
319
320
321static int
322vmlogrdr_open (struct inode *inode, struct file *filp)
323{
324 int dev_num = 0;
325 struct vmlogrdr_priv_t * logptr = NULL;
326 int connect_rc = 0;
327 int ret;
328
329 dev_num = iminor(inode);
330 if (dev_num > MAXMINOR)
331 return -ENODEV;
332
333 logptr = &sys_ser[dev_num];
334 if (logptr == NULL)
335 return -ENODEV;
336
337 /*
338 * only allow for blocking reads to be open
339 */
340 if (filp->f_flags & O_NONBLOCK)
341 return -ENOSYS;
342
343 /* Besure this device hasn't already been opened */
344 spin_lock_bh(&logptr->priv_lock);
345 if (logptr->dev_in_use) {
346 spin_unlock_bh(&logptr->priv_lock);
347 return -EBUSY;
348 } else {
349 logptr->dev_in_use = 1;
350 spin_unlock_bh(&logptr->priv_lock);
351 }
352
353 atomic_set(&logptr->receive_ready, 0);
354 logptr->buffer_free = 1;
355
356 /* set the file options */
357 filp->private_data = logptr;
358 filp->f_op = &vmlogrdr_fops;
359
360 /* start recording for this service*/
361 ret=0;
362 if (logptr->autorecording)
363 ret = vmlogrdr_recording(logptr,1,logptr->autopurge);
364 if (ret)
365 printk (KERN_WARNING "vmlogrdr: failed to start "
366 "recording automatically\n");
367
368 /* Register with iucv driver */
369 logptr->iucv_handle = iucv_register_program(iucvMagic,
370 logptr->system_service, mask, &vmlogrdr_iucvops,
371 logptr);
372
373 if (logptr->iucv_handle == NULL) {
374 printk (KERN_ERR "vmlogrdr: failed to register with"
375 "iucv driver\n");
376 goto not_registered;
377 }
378
379 /* create connection to the system service */
380 spin_lock_bh(&logptr->priv_lock);
381 logptr->connection_established = 0;
382 logptr->iucv_path_severed = 0;
383 spin_unlock_bh(&logptr->priv_lock);
384
385 connect_rc = iucv_connect (&(logptr->pathid), 10, iucvMagic,
386 logptr->system_service, iucv_host, 0,
387 NULL, NULL,
388 logptr->iucv_handle, NULL);
389 if (connect_rc) {
390 printk (KERN_ERR "vmlogrdr: iucv connection to %s "
391 "failed with rc %i \n", logptr->system_service,
392 connect_rc);
393 goto not_connected;
394 }
395
396 /* We've issued the connect and now we must wait for a
397 * ConnectionComplete or ConnectinSevered Interrupt
398 * before we can continue to process.
399 */
400 wait_event(conn_wait_queue, (logptr->connection_established)
401 || (logptr->iucv_path_severed));
402 if (logptr->iucv_path_severed) {
403 goto not_connected;
404 }
405
406 return nonseekable_open(inode, filp);
407
408not_connected:
409 iucv_unregister_program(logptr->iucv_handle);
410 logptr->iucv_handle = NULL;
411not_registered:
412 if (logptr->autorecording)
413 vmlogrdr_recording(logptr,0,logptr->autopurge);
414 logptr->dev_in_use = 0;
415 return -EIO;
416
417
418}
419
420
421static int
422vmlogrdr_release (struct inode *inode, struct file *filp)
423{
424 int ret;
425
426 struct vmlogrdr_priv_t * logptr = filp->private_data;
427
428 iucv_unregister_program(logptr->iucv_handle);
429 logptr->iucv_handle = NULL;
430
431 if (logptr->autorecording) {
432 ret = vmlogrdr_recording(logptr,0,logptr->autopurge);
433 if (ret)
434 printk (KERN_WARNING "vmlogrdr: failed to stop "
435 "recording automatically\n");
436 }
437 logptr->dev_in_use = 0;
438
439 return 0;
440}
441
442
443static int
444vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv) {
445 int rc, *temp;
446 /* we need to keep track of two data sizes here:
447 * The number of bytes we need to receive from iucv and
448 * the total number of bytes we actually write into the buffer.
449 */
450 int user_data_count, iucv_data_count;
451 char * buffer;
452
453 if (atomic_read(&priv->receive_ready)) {
454 spin_lock_bh(&priv->priv_lock);
455 if (priv->residual_length){
456 /* receive second half of a record */
457 iucv_data_count = priv->residual_length;
458 user_data_count = 0;
459 buffer = priv->buffer;
460 } else {
461 /* receive a new record:
462 * We need to return the total length of the record
463 * + size of FENCE in the first 4 bytes of the buffer.
464 */
465 iucv_data_count =
466 priv->local_interrupt_buffer.ln1msg2.ipbfln1f;
467 user_data_count = sizeof(int);
468 temp = (int*)priv->buffer;
469 *temp= iucv_data_count + sizeof(FENCE);
470 buffer = priv->buffer + sizeof(int);
471 }
472 /*
473 * If the record is bigger then our buffer, we receive only
474 * a part of it. We can get the rest later.
475 */
476 if (iucv_data_count > NET_BUFFER_SIZE)
477 iucv_data_count = NET_BUFFER_SIZE;
478 rc = iucv_receive(priv->pathid,
479 priv->local_interrupt_buffer.ipmsgid,
480 priv->local_interrupt_buffer.iptrgcls,
481 buffer,
482 iucv_data_count,
483 NULL,
484 NULL,
485 &priv->residual_length);
486 spin_unlock_bh(&priv->priv_lock);
487 /* An rc of 5 indicates that the record was bigger then
488 * the buffer, which is OK for us. A 9 indicates that the
489 * record was purged befor we could receive it.
490 */
491 if (rc == 5)
492 rc = 0;
493 if (rc == 9)
494 atomic_set(&priv->receive_ready, 0);
495 } else {
496 rc = 1;
497 }
498 if (!rc) {
499 priv->buffer_free = 0;
500 user_data_count += iucv_data_count;
501 priv->current_position = priv->buffer;
502 if (priv->residual_length == 0){
503 /* the whole record has been captured,
504 * now add the fence */
505 atomic_dec(&priv->receive_ready);
506 buffer = priv->buffer + user_data_count;
507 memcpy(buffer, FENCE, sizeof(FENCE));
508 user_data_count += sizeof(FENCE);
509 }
510 priv->remaining = user_data_count;
511 }
512
513 return rc;
514}
515
516
517static ssize_t
518vmlogrdr_read (struct file *filp, char *data, size_t count, loff_t * ppos)
519{
520 int rc;
521 struct vmlogrdr_priv_t * priv = filp->private_data;
522
523 while (priv->buffer_free) {
524 rc = vmlogrdr_receive_data(priv);
525 if (rc) {
526 rc = wait_event_interruptible(read_wait_queue,
527 atomic_read(&priv->receive_ready));
528 if (rc)
529 return rc;
530 }
531 }
532 /* copy only up to end of record */
533 if (count > priv->remaining)
534 count = priv->remaining;
535
536 if (copy_to_user(data, priv->current_position, count))
537 return -EFAULT;
538
539 *ppos += count;
540 priv->current_position += count;
541 priv->remaining -= count;
542
543 /* if all data has been transferred, set buffer free */
544 if (priv->remaining == 0)
545 priv->buffer_free = 1;
546
547 return count;
548}
549
550static ssize_t
551vmlogrdr_autopurge_store(struct device * dev, const char * buf, size_t count) {
552 struct vmlogrdr_priv_t *priv = dev->driver_data;
553 ssize_t ret = count;
554
555 switch (buf[0]) {
556 case '0':
557 priv->autopurge=0;
558 break;
559 case '1':
560 priv->autopurge=1;
561 break;
562 default:
563 ret = -EINVAL;
564 }
565 return ret;
566}
567
568
569static ssize_t
570vmlogrdr_autopurge_show(struct device *dev, char *buf) {
571 struct vmlogrdr_priv_t *priv = dev->driver_data;
572 return sprintf(buf, "%u\n", priv->autopurge);
573}
574
575
576static DEVICE_ATTR(autopurge, 0644, vmlogrdr_autopurge_show,
577 vmlogrdr_autopurge_store);
578
579
580static ssize_t
581vmlogrdr_purge_store(struct device * dev, const char * buf, size_t count) {
582
583 char cp_command[80];
584 char cp_response[80];
585 struct vmlogrdr_priv_t *priv = dev->driver_data;
586
587 if (buf[0] != '1')
588 return -EINVAL;
589
590 memset(cp_command, 0x00, sizeof(cp_command));
591 memset(cp_response, 0x00, sizeof(cp_response));
592
593 /*
594 * The recording command needs to be called with option QID
595 * for guests that have previlege classes A or B.
596 * Other guests will not recognize the command and we have to
597 * issue the same command without the QID parameter.
598 */
599
600 if (recording_class_AB)
601 snprintf(cp_command, sizeof(cp_command),
602 "RECORDING %s PURGE QID * ",
603 priv->recording_name);
604 else
605 snprintf(cp_command, sizeof(cp_command),
606 "RECORDING %s PURGE ",
607 priv->recording_name);
608
609 printk (KERN_DEBUG "vmlogrdr: recording command: %s\n", cp_command);
610 cpcmd(cp_command, cp_response, sizeof(cp_response));
611 printk (KERN_DEBUG "vmlogrdr: recording response: %s",
612 cp_response);
613
614 return count;
615}
616
617
618static DEVICE_ATTR(purge, 0200, NULL, vmlogrdr_purge_store);
619
620
621static ssize_t
622vmlogrdr_autorecording_store(struct device *dev, const char *buf,
623 size_t count) {
624 struct vmlogrdr_priv_t *priv = dev->driver_data;
625 ssize_t ret = count;
626
627 switch (buf[0]) {
628 case '0':
629 priv->autorecording=0;
630 break;
631 case '1':
632 priv->autorecording=1;
633 break;
634 default:
635 ret = -EINVAL;
636 }
637 return ret;
638}
639
640
641static ssize_t
642vmlogrdr_autorecording_show(struct device *dev, char *buf) {
643 struct vmlogrdr_priv_t *priv = dev->driver_data;
644 return sprintf(buf, "%u\n", priv->autorecording);
645}
646
647
648static DEVICE_ATTR(autorecording, 0644, vmlogrdr_autorecording_show,
649 vmlogrdr_autorecording_store);
650
651
652static ssize_t
653vmlogrdr_recording_store(struct device * dev, const char * buf, size_t count) {
654
655 struct vmlogrdr_priv_t *priv = dev->driver_data;
656 ssize_t ret;
657
658 switch (buf[0]) {
659 case '0':
660 ret = vmlogrdr_recording(priv,0,0);
661 break;
662 case '1':
663 ret = vmlogrdr_recording(priv,1,0);
664 break;
665 default:
666 ret = -EINVAL;
667 }
668 if (ret)
669 return ret;
670 else
671 return count;
672
673}
674
675
676static DEVICE_ATTR(recording, 0200, NULL, vmlogrdr_recording_store);
677
678
679static ssize_t
680vmlogrdr_recording_status_show(struct device_driver *driver, char *buf) {
681
682 char cp_command[] = "QUERY RECORDING ";
683 int len;
684
685 cpcmd(cp_command, buf, 4096);
686 len = strlen(buf);
687 return len;
688}
689
690
691static DRIVER_ATTR(recording_status, 0444, vmlogrdr_recording_status_show,
692 NULL);
693
694static struct attribute *vmlogrdr_attrs[] = {
695 &dev_attr_autopurge.attr,
696 &dev_attr_purge.attr,
697 &dev_attr_autorecording.attr,
698 &dev_attr_recording.attr,
699 NULL,
700};
701
702static struct attribute_group vmlogrdr_attr_group = {
703 .attrs = vmlogrdr_attrs,
704};
705
706static struct class_simple *vmlogrdr_class;
707static struct device_driver vmlogrdr_driver = {
708 .name = "vmlogrdr",
709 .bus = &iucv_bus,
710};
711
712
713static int
714vmlogrdr_register_driver(void) {
715 int ret;
716
717 ret = driver_register(&vmlogrdr_driver);
718 if (ret) {
719 printk(KERN_ERR "vmlogrdr: failed to register driver.\n");
720 return ret;
721 }
722
723 ret = driver_create_file(&vmlogrdr_driver,
724 &driver_attr_recording_status);
725 if (ret) {
726 printk(KERN_ERR "vmlogrdr: failed to add driver attribute.\n");
727 goto unregdriver;
728 }
729
730 vmlogrdr_class = class_simple_create(THIS_MODULE, "vmlogrdr");
731 if (IS_ERR(vmlogrdr_class)) {
732 printk(KERN_ERR "vmlogrdr: failed to create class.\n");
733 ret=PTR_ERR(vmlogrdr_class);
734 vmlogrdr_class=NULL;
735 goto unregattr;
736 }
737 return 0;
738
739unregattr:
740 driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status);
741unregdriver:
742 driver_unregister(&vmlogrdr_driver);
743 return ret;
744}
745
746
747static void
748vmlogrdr_unregister_driver(void) {
749 class_simple_destroy(vmlogrdr_class);
750 vmlogrdr_class = NULL;
751 driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status);
752 driver_unregister(&vmlogrdr_driver);
753 return;
754}
755
756
757static int
758vmlogrdr_register_device(struct vmlogrdr_priv_t *priv) {
759 struct device *dev;
760 int ret;
761
762 dev = kmalloc(sizeof(struct device), GFP_KERNEL);
763 if (dev) {
764 memset(dev, 0, sizeof(struct device));
765 snprintf(dev->bus_id, BUS_ID_SIZE, "%s",
766 priv->internal_name);
767 dev->bus = &iucv_bus;
768 dev->parent = iucv_root;
769 dev->driver = &vmlogrdr_driver;
770 /*
771 * The release function could be called after the
772 * module has been unloaded. It's _only_ task is to
773 * free the struct. Therefore, we specify kfree()
774 * directly here. (Probably a little bit obfuscating
775 * but legitime ...).
776 */
777 dev->release = (void (*)(struct device *))kfree;
778 } else
779 return -ENOMEM;
780 ret = device_register(dev);
781 if (ret)
782 return ret;
783
784 ret = sysfs_create_group(&dev->kobj, &vmlogrdr_attr_group);
785 if (ret) {
786 device_unregister(dev);
787 return ret;
788 }
789 priv->class_device = class_simple_device_add(
790 vmlogrdr_class,
791 MKDEV(vmlogrdr_major, priv->minor_num),
792 dev,
793 "%s", dev->bus_id );
794 if (IS_ERR(priv->class_device)) {
795 ret = PTR_ERR(priv->class_device);
796 priv->class_device=NULL;
797 sysfs_remove_group(&dev->kobj, &vmlogrdr_attr_group);
798 device_unregister(dev);
799 return ret;
800 }
801 dev->driver_data = priv;
802 priv->device = dev;
803 return 0;
804}
805
806
807static int
808vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv ) {
809 class_simple_device_remove(MKDEV(vmlogrdr_major, priv->minor_num));
810 if (priv->device != NULL) {
811 sysfs_remove_group(&priv->device->kobj, &vmlogrdr_attr_group);
812 device_unregister(priv->device);
813 priv->device=NULL;
814 }
815 return 0;
816}
817
818
819static int
820vmlogrdr_register_cdev(dev_t dev) {
821 int rc = 0;
822 vmlogrdr_cdev = cdev_alloc();
823 if (!vmlogrdr_cdev) {
824 return -ENOMEM;
825 }
826 vmlogrdr_cdev->owner = THIS_MODULE;
827 vmlogrdr_cdev->ops = &vmlogrdr_fops;
828 vmlogrdr_cdev->dev = dev;
829 rc = cdev_add(vmlogrdr_cdev, vmlogrdr_cdev->dev, MAXMINOR);
830 if (!rc)
831 return 0;
832
833 // cleanup: cdev is not fully registered, no cdev_del here!
834 kobject_put(&vmlogrdr_cdev->kobj);
835 vmlogrdr_cdev=NULL;
836 return rc;
837}
838
839
840static void
841vmlogrdr_cleanup(void) {
842 int i;
843 if (vmlogrdr_cdev) {
844 cdev_del(vmlogrdr_cdev);
845 vmlogrdr_cdev=NULL;
846 }
847 for (i=0; i < MAXMINOR; ++i ) {
848 vmlogrdr_unregister_device(&sys_ser[i]);
849 free_page((unsigned long)sys_ser[i].buffer);
850 }
851 vmlogrdr_unregister_driver();
852 if (vmlogrdr_major) {
853 unregister_chrdev_region(MKDEV(vmlogrdr_major, 0), MAXMINOR);
854 vmlogrdr_major=0;
855 }
856}
857
858
859static int
860vmlogrdr_init(void)
861{
862 int rc;
863 int i;
864 dev_t dev;
865
866 if (! MACHINE_IS_VM) {
867 printk (KERN_ERR "vmlogrdr: not running under VM, "
868 "driver not loaded.\n");
869 return -ENODEV;
870 }
871
872 recording_class_AB = vmlogrdr_get_recording_class_AB();
873
874 rc = alloc_chrdev_region(&dev, 0, MAXMINOR, "vmlogrdr");
875 if (rc)
876 return rc;
877 vmlogrdr_major = MAJOR(dev);
878
879 rc=vmlogrdr_register_driver();
880 if (rc)
881 goto cleanup;
882
883 for (i=0; i < MAXMINOR; ++i ) {
884 sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL);
885 if (!sys_ser[i].buffer) {
886 rc = ENOMEM;
887 break;
888 }
889 sys_ser[i].current_position = sys_ser[i].buffer;
890 rc=vmlogrdr_register_device(&sys_ser[i]);
891 if (rc)
892 break;
893 }
894 if (rc)
895 goto cleanup;
896
897 rc = vmlogrdr_register_cdev(dev);
898 if (rc)
899 goto cleanup;
900 printk (KERN_INFO "vmlogrdr: driver loaded\n");
901 return 0;
902
903cleanup:
904 vmlogrdr_cleanup();
905 printk (KERN_ERR "vmlogrdr: driver not loaded.\n");
906 return rc;
907}
908
909
910static void
911vmlogrdr_exit(void)
912{
913 vmlogrdr_cleanup();
914 printk (KERN_INFO "vmlogrdr: driver unloaded\n");
915 return;
916}
917
918
919module_init(vmlogrdr_init);
920module_exit(vmlogrdr_exit);
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c
new file mode 100644
index 000000000000..22cf4fec8da9
--- /dev/null
+++ b/drivers/s390/char/vmwatchdog.c
@@ -0,0 +1,292 @@
1/*
2 * Watchdog implementation based on z/VM Watchdog Timer API
3 *
4 * The user space watchdog daemon can use this driver as
5 * /dev/vmwatchdog to have z/VM execute the specified CP
6 * command when the timeout expires. The default command is
7 * "IPL", which which cause an immediate reboot.
8 */
9#include <linux/init.h>
10#include <linux/fs.h>
11#include <linux/kernel.h>
12#include <linux/miscdevice.h>
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/watchdog.h>
16
17#include <asm/ebcdic.h>
18#include <asm/io.h>
19#include <asm/uaccess.h>
20
21#define MAX_CMDLEN 240
22#define MIN_INTERVAL 15
23static char vmwdt_cmd[MAX_CMDLEN] = "IPL";
24static int vmwdt_conceal;
25
26#ifdef CONFIG_WATCHDOG_NOWAYOUT
27static int vmwdt_nowayout = 1;
28#else
29static int vmwdt_nowayout = 0;
30#endif
31
32MODULE_LICENSE("GPL");
33MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
34MODULE_DESCRIPTION("z/VM Watchdog Timer");
35module_param_string(cmd, vmwdt_cmd, MAX_CMDLEN, 0644);
36MODULE_PARM_DESC(cmd, "CP command that is run when the watchdog triggers");
37module_param_named(conceal, vmwdt_conceal, bool, 0644);
38MODULE_PARM_DESC(conceal, "Enable the CONCEAL CP option while the watchdog "
39 " is active");
40module_param_named(nowayout, vmwdt_nowayout, bool, 0);
41MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"
42 " (default=CONFIG_WATCHDOG_NOWAYOUT)");
43MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
44
45static unsigned int vmwdt_interval = 60;
46static unsigned long vmwdt_is_open;
47static int vmwdt_expect_close;
48
49enum vmwdt_func {
50 /* function codes */
51 wdt_init = 0,
52 wdt_change = 1,
53 wdt_cancel = 2,
54 /* flags */
55 wdt_conceal = 0x80000000,
56};
57
58static int __diag288(enum vmwdt_func func, unsigned int timeout,
59 char *cmd, size_t len)
60{
61 register unsigned long __func asm("2");
62 register unsigned long __timeout asm("3");
63 register unsigned long __cmdp asm("4");
64 register unsigned long __cmdl asm("5");
65 int err;
66
67 __func = func;
68 __timeout = timeout;
69 __cmdp = virt_to_phys(cmd);
70 __cmdl = len;
71 err = 0;
72 asm volatile (
73#ifdef __s390x__
74 "diag %2,%4,0x288\n"
75 "1: \n"
76 ".section .fixup,\"ax\"\n"
77 "2: lghi %0,%1\n"
78 " jg 1b\n"
79 ".previous\n"
80 ".section __ex_table,\"a\"\n"
81 " .align 8\n"
82 " .quad 1b,2b\n"
83 ".previous\n"
84#else
85 "diag %2,%4,0x288\n"
86 "1: \n"
87 ".section .fixup,\"ax\"\n"
88 "2: lhi %0,%1\n"
89 " bras 1,3f\n"
90 " .long 1b\n"
91 "3: l 1,0(1)\n"
92 " br 1\n"
93 ".previous\n"
94 ".section __ex_table,\"a\"\n"
95 " .align 4\n"
96 " .long 1b,2b\n"
97 ".previous\n"
98#endif
99 : "+&d"(err)
100 : "i"(-EINVAL), "d"(__func), "d"(__timeout),
101 "d"(__cmdp), "d"(__cmdl)
102 : "1", "cc");
103 return err;
104}
105
106static int vmwdt_keepalive(void)
107{
108 /* we allocate new memory every time to avoid having
109 * to track the state. static allocation is not an
110 * option since that might not be contiguous in real
111 * storage in case of a modular build */
112 static char *ebc_cmd;
113 size_t len;
114 int ret;
115 unsigned int func;
116
117 ebc_cmd = kmalloc(MAX_CMDLEN, GFP_KERNEL);
118 if (!ebc_cmd)
119 return -ENOMEM;
120
121 len = strlcpy(ebc_cmd, vmwdt_cmd, MAX_CMDLEN);
122 ASCEBC(ebc_cmd, MAX_CMDLEN);
123 EBC_TOUPPER(ebc_cmd, MAX_CMDLEN);
124
125 func = vmwdt_conceal ? (wdt_init | wdt_conceal) : wdt_init;
126 ret = __diag288(func, vmwdt_interval, ebc_cmd, len);
127 kfree(ebc_cmd);
128
129 if (ret) {
130 printk(KERN_WARNING "%s: problem setting interval %d, "
131 "cmd %s\n", __FUNCTION__, vmwdt_interval,
132 vmwdt_cmd);
133 }
134 return ret;
135}
136
137static int vmwdt_disable(void)
138{
139 int ret = __diag288(wdt_cancel, 0, "", 0);
140 if (ret) {
141 printk(KERN_WARNING "%s: problem disabling watchdog\n",
142 __FUNCTION__);
143 }
144 return ret;
145}
146
147static int __init vmwdt_probe(void)
148{
149 /* there is no real way to see if the watchdog is supported,
150 * so we try initializing it with a NOP command ("BEGIN")
151 * that won't cause any harm even if the following disable
152 * fails for some reason */
153 static char __initdata ebc_begin[] = {
154 194, 197, 199, 201, 213
155 };
156 if (__diag288(wdt_init, 15, ebc_begin, sizeof(ebc_begin)) != 0) {
157 printk(KERN_INFO "z/VM watchdog not available\n");
158 return -EINVAL;
159 }
160 return vmwdt_disable();
161}
162
163static int vmwdt_open(struct inode *i, struct file *f)
164{
165 int ret;
166 if (test_and_set_bit(0, &vmwdt_is_open))
167 return -EBUSY;
168 ret = vmwdt_keepalive();
169 if (ret)
170 clear_bit(0, &vmwdt_is_open);
171 return ret ? ret : nonseekable_open(i, f);
172}
173
174static int vmwdt_close(struct inode *i, struct file *f)
175{
176 if (vmwdt_expect_close == 42)
177 vmwdt_disable();
178 vmwdt_expect_close = 0;
179 clear_bit(0, &vmwdt_is_open);
180 return 0;
181}
182
183static struct watchdog_info vmwdt_info = {
184 .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
185 .firmware_version = 0,
186 .identity = "z/VM Watchdog Timer",
187};
188
189static int vmwdt_ioctl(struct inode *i, struct file *f,
190 unsigned int cmd, unsigned long arg)
191{
192 switch (cmd) {
193 case WDIOC_GETSUPPORT:
194 if (copy_to_user((void __user *)arg, &vmwdt_info,
195 sizeof(vmwdt_info)))
196 return -EFAULT;
197 return 0;
198 case WDIOC_GETSTATUS:
199 case WDIOC_GETBOOTSTATUS:
200 return put_user(0, (int *)arg);
201 case WDIOC_GETTEMP:
202 return -EINVAL;
203 case WDIOC_SETOPTIONS:
204 {
205 int options, ret;
206 if (get_user(options, (int __user *)arg))
207 return -EFAULT;
208 ret = -EINVAL;
209 if (options & WDIOS_DISABLECARD) {
210 ret = vmwdt_disable();
211 if (ret)
212 return ret;
213 }
214 if (options & WDIOS_ENABLECARD) {
215 ret = vmwdt_keepalive();
216 }
217 return ret;
218 }
219 case WDIOC_GETTIMEOUT:
220 return put_user(vmwdt_interval, (int __user *)arg);
221 case WDIOC_SETTIMEOUT:
222 {
223 int interval;
224 if (get_user(interval, (int __user *)arg))
225 return -EFAULT;
226 if (interval < MIN_INTERVAL)
227 return -EINVAL;
228 vmwdt_interval = interval;
229 }
230 return vmwdt_keepalive();
231 case WDIOC_KEEPALIVE:
232 return vmwdt_keepalive();
233 }
234
235 return -EINVAL;
236}
237
238static ssize_t vmwdt_write(struct file *f, const char __user *buf,
239 size_t count, loff_t *ppos)
240{
241 if(count) {
242 if (!vmwdt_nowayout) {
243 size_t i;
244
245 /* note: just in case someone wrote the magic character
246 * five months ago... */
247 vmwdt_expect_close = 0;
248
249 for (i = 0; i != count; i++) {
250 char c;
251 if (get_user(c, buf+i))
252 return -EFAULT;
253 if (c == 'V')
254 vmwdt_expect_close = 42;
255 }
256 }
257 /* someone wrote to us, we should restart timer */
258 vmwdt_keepalive();
259 }
260 return count;
261}
262
263static struct file_operations vmwdt_fops = {
264 .open = &vmwdt_open,
265 .release = &vmwdt_close,
266 .ioctl = &vmwdt_ioctl,
267 .write = &vmwdt_write,
268 .owner = THIS_MODULE,
269};
270
271static struct miscdevice vmwdt_dev = {
272 .minor = WATCHDOG_MINOR,
273 .name = "watchdog",
274 .fops = &vmwdt_fops,
275};
276
277static int __init vmwdt_init(void)
278{
279 int ret;
280
281 ret = vmwdt_probe();
282 if (ret)
283 return ret;
284 return misc_register(&vmwdt_dev);
285}
286module_init(vmwdt_init);
287
288static void __exit vmwdt_exit(void)
289{
290 WARN_ON(misc_deregister(&vmwdt_dev) != 0);
291}
292module_exit(vmwdt_exit);