aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/block/viodasd.c809
-rw-r--r--drivers/cdrom/viocd.c739
-rw-r--r--drivers/char/viotape.c1041
3 files changed, 0 insertions, 2589 deletions
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
deleted file mode 100644
index 9a5b2a2d616d..000000000000
--- a/drivers/block/viodasd.c
+++ /dev/null
@@ -1,809 +0,0 @@
1/* -*- linux-c -*-
2 * viodasd.c
3 * Authors: Dave Boutcher <boutcher@us.ibm.com>
4 * Ryan Arnold <ryanarn@us.ibm.com>
5 * Colin Devilbiss <devilbis@us.ibm.com>
6 * Stephen Rothwell
7 *
8 * (C) Copyright 2000-2004 IBM Corporation
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; either version 2 of the
13 * License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 * This routine provides access to disk space (termed "DASD" in historical
25 * IBM terms) owned and managed by an OS/400 partition running on the
26 * same box as this Linux partition.
27 *
28 * All disk operations are performed by sending messages back and forth to
29 * the OS/400 partition.
30 */
31
32#define pr_fmt(fmt) "viod: " fmt
33
34#include <linux/major.h>
35#include <linux/fs.h>
36#include <linux/module.h>
37#include <linux/kernel.h>
38#include <linux/blkdev.h>
39#include <linux/genhd.h>
40#include <linux/hdreg.h>
41#include <linux/errno.h>
42#include <linux/init.h>
43#include <linux/string.h>
44#include <linux/mutex.h>
45#include <linux/dma-mapping.h>
46#include <linux/completion.h>
47#include <linux/device.h>
48#include <linux/scatterlist.h>
49
50#include <asm/uaccess.h>
51#include <asm/vio.h>
52#include <asm/iseries/hv_types.h>
53#include <asm/iseries/hv_lp_event.h>
54#include <asm/iseries/hv_lp_config.h>
55#include <asm/iseries/vio.h>
56#include <asm/firmware.h>
57
58MODULE_DESCRIPTION("iSeries Virtual DASD");
59MODULE_AUTHOR("Dave Boutcher");
60MODULE_LICENSE("GPL");
61
62/*
63 * We only support 7 partitions per physical disk....so with minor
64 * numbers 0-255 we get a maximum of 32 disks.
65 */
66#define VIOD_GENHD_NAME "iseries/vd"
67
68#define VIOD_VERS "1.64"
69
70enum {
71 PARTITION_SHIFT = 3,
72 MAX_DISKNO = HVMAXARCHITECTEDVIRTUALDISKS,
73 MAX_DISK_NAME = FIELD_SIZEOF(struct gendisk, disk_name)
74};
75
76static DEFINE_MUTEX(viodasd_mutex);
77static DEFINE_SPINLOCK(viodasd_spinlock);
78
79#define VIOMAXREQ 16
80
81#define DEVICE_NO(cell) ((struct viodasd_device *)(cell) - &viodasd_devices[0])
82
83struct viodasd_waitevent {
84 struct completion com;
85 int rc;
86 u16 sub_result;
87 int max_disk; /* open */
88};
89
90static const struct vio_error_entry viodasd_err_table[] = {
91 { 0x0201, EINVAL, "Invalid Range" },
92 { 0x0202, EINVAL, "Invalid Token" },
93 { 0x0203, EIO, "DMA Error" },
94 { 0x0204, EIO, "Use Error" },
95 { 0x0205, EIO, "Release Error" },
96 { 0x0206, EINVAL, "Invalid Disk" },
97 { 0x0207, EBUSY, "Can't Lock" },
98 { 0x0208, EIO, "Already Locked" },
99 { 0x0209, EIO, "Already Unlocked" },
100 { 0x020A, EIO, "Invalid Arg" },
101 { 0x020B, EIO, "Bad IFS File" },
102 { 0x020C, EROFS, "Read Only Device" },
103 { 0x02FF, EIO, "Internal Error" },
104 { 0x0000, 0, NULL },
105};
106
107/*
108 * Figure out the biggest I/O request (in sectors) we can accept
109 */
110#define VIODASD_MAXSECTORS (4096 / 512 * VIOMAXBLOCKDMA)
111
112/*
113 * Number of disk I/O requests we've sent to OS/400
114 */
115static int num_req_outstanding;
116
117/*
118 * This is our internal structure for keeping track of disk devices
119 */
120struct viodasd_device {
121 u16 cylinders;
122 u16 tracks;
123 u16 sectors;
124 u16 bytes_per_sector;
125 u64 size;
126 int read_only;
127 spinlock_t q_lock;
128 struct gendisk *disk;
129 struct device *dev;
130} viodasd_devices[MAX_DISKNO];
131
132/*
133 * External open entry point.
134 */
135static int viodasd_open(struct block_device *bdev, fmode_t mode)
136{
137 struct viodasd_device *d = bdev->bd_disk->private_data;
138 HvLpEvent_Rc hvrc;
139 struct viodasd_waitevent we;
140 u16 flags = 0;
141
142 if (d->read_only) {
143 if (mode & FMODE_WRITE)
144 return -EROFS;
145 flags = vioblockflags_ro;
146 }
147
148 init_completion(&we.com);
149
150 /* Send the open event to OS/400 */
151 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
152 HvLpEvent_Type_VirtualIo,
153 viomajorsubtype_blockio | vioblockopen,
154 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
155 viopath_sourceinst(viopath_hostLp),
156 viopath_targetinst(viopath_hostLp),
157 (u64)(unsigned long)&we, VIOVERSION << 16,
158 ((u64)DEVICE_NO(d) << 48) | ((u64)flags << 32),
159 0, 0, 0);
160 if (hvrc != 0) {
161 pr_warning("HV open failed %d\n", (int)hvrc);
162 return -EIO;
163 }
164
165 wait_for_completion(&we.com);
166
167 /* Check the return code */
168 if (we.rc != 0) {
169 const struct vio_error_entry *err =
170 vio_lookup_rc(viodasd_err_table, we.sub_result);
171
172 pr_warning("bad rc opening disk: %d:0x%04x (%s)\n",
173 (int)we.rc, we.sub_result, err->msg);
174 return -EIO;
175 }
176
177 return 0;
178}
179
180static int viodasd_unlocked_open(struct block_device *bdev, fmode_t mode)
181{
182 int ret;
183
184 mutex_lock(&viodasd_mutex);
185 ret = viodasd_open(bdev, mode);
186 mutex_unlock(&viodasd_mutex);
187
188 return ret;
189}
190
191
192/*
193 * External release entry point.
194 */
195static int viodasd_release(struct gendisk *disk, fmode_t mode)
196{
197 struct viodasd_device *d = disk->private_data;
198 HvLpEvent_Rc hvrc;
199
200 mutex_lock(&viodasd_mutex);
201 /* Send the event to OS/400. We DON'T expect a response */
202 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
203 HvLpEvent_Type_VirtualIo,
204 viomajorsubtype_blockio | vioblockclose,
205 HvLpEvent_AckInd_NoAck, HvLpEvent_AckType_ImmediateAck,
206 viopath_sourceinst(viopath_hostLp),
207 viopath_targetinst(viopath_hostLp),
208 0, VIOVERSION << 16,
209 ((u64)DEVICE_NO(d) << 48) /* | ((u64)flags << 32) */,
210 0, 0, 0);
211 if (hvrc != 0)
212 pr_warning("HV close call failed %d\n", (int)hvrc);
213
214 mutex_unlock(&viodasd_mutex);
215
216 return 0;
217}
218
219
220/* External ioctl entry point.
221 */
222static int viodasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
223{
224 struct gendisk *disk = bdev->bd_disk;
225 struct viodasd_device *d = disk->private_data;
226
227 geo->sectors = d->sectors ? d->sectors : 32;
228 geo->heads = d->tracks ? d->tracks : 64;
229 geo->cylinders = d->cylinders ? d->cylinders :
230 get_capacity(disk) / (geo->sectors * geo->heads);
231
232 return 0;
233}
234
235/*
236 * Our file operations table
237 */
238static const struct block_device_operations viodasd_fops = {
239 .owner = THIS_MODULE,
240 .open = viodasd_unlocked_open,
241 .release = viodasd_release,
242 .getgeo = viodasd_getgeo,
243};
244
245/*
246 * End a request
247 */
248static void viodasd_end_request(struct request *req, int error,
249 int num_sectors)
250{
251 __blk_end_request(req, error, num_sectors << 9);
252}
253
254/*
255 * Send an actual I/O request to OS/400
256 */
257static int send_request(struct request *req)
258{
259 u64 start;
260 int direction;
261 int nsg;
262 u16 viocmd;
263 HvLpEvent_Rc hvrc;
264 struct vioblocklpevent *bevent;
265 struct HvLpEvent *hev;
266 struct scatterlist sg[VIOMAXBLOCKDMA];
267 int sgindex;
268 struct viodasd_device *d;
269 unsigned long flags;
270
271 start = (u64)blk_rq_pos(req) << 9;
272
273 if (rq_data_dir(req) == READ) {
274 direction = DMA_FROM_DEVICE;
275 viocmd = viomajorsubtype_blockio | vioblockread;
276 } else {
277 direction = DMA_TO_DEVICE;
278 viocmd = viomajorsubtype_blockio | vioblockwrite;
279 }
280
281 d = req->rq_disk->private_data;
282
283 /* Now build the scatter-gather list */
284 sg_init_table(sg, VIOMAXBLOCKDMA);
285 nsg = blk_rq_map_sg(req->q, req, sg);
286 nsg = dma_map_sg(d->dev, sg, nsg, direction);
287
288 spin_lock_irqsave(&viodasd_spinlock, flags);
289 num_req_outstanding++;
290
291 /* This optimization handles a single DMA block */
292 if (nsg == 1)
293 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
294 HvLpEvent_Type_VirtualIo, viocmd,
295 HvLpEvent_AckInd_DoAck,
296 HvLpEvent_AckType_ImmediateAck,
297 viopath_sourceinst(viopath_hostLp),
298 viopath_targetinst(viopath_hostLp),
299 (u64)(unsigned long)req, VIOVERSION << 16,
300 ((u64)DEVICE_NO(d) << 48), start,
301 ((u64)sg_dma_address(&sg[0])) << 32,
302 sg_dma_len(&sg[0]));
303 else {
304 bevent = (struct vioblocklpevent *)
305 vio_get_event_buffer(viomajorsubtype_blockio);
306 if (bevent == NULL) {
307 pr_warning("error allocating disk event buffer\n");
308 goto error_ret;
309 }
310
311 /*
312 * Now build up the actual request. Note that we store
313 * the pointer to the request in the correlation
314 * token so we can match the response up later
315 */
316 memset(bevent, 0, sizeof(struct vioblocklpevent));
317 hev = &bevent->event;
318 hev->flags = HV_LP_EVENT_VALID | HV_LP_EVENT_DO_ACK |
319 HV_LP_EVENT_INT;
320 hev->xType = HvLpEvent_Type_VirtualIo;
321 hev->xSubtype = viocmd;
322 hev->xSourceLp = HvLpConfig_getLpIndex();
323 hev->xTargetLp = viopath_hostLp;
324 hev->xSizeMinus1 =
325 offsetof(struct vioblocklpevent, u.rw_data.dma_info) +
326 (sizeof(bevent->u.rw_data.dma_info[0]) * nsg) - 1;
327 hev->xSourceInstanceId = viopath_sourceinst(viopath_hostLp);
328 hev->xTargetInstanceId = viopath_targetinst(viopath_hostLp);
329 hev->xCorrelationToken = (u64)req;
330 bevent->version = VIOVERSION;
331 bevent->disk = DEVICE_NO(d);
332 bevent->u.rw_data.offset = start;
333
334 /*
335 * Copy just the dma information from the sg list
336 * into the request
337 */
338 for (sgindex = 0; sgindex < nsg; sgindex++) {
339 bevent->u.rw_data.dma_info[sgindex].token =
340 sg_dma_address(&sg[sgindex]);
341 bevent->u.rw_data.dma_info[sgindex].len =
342 sg_dma_len(&sg[sgindex]);
343 }
344
345 /* Send the request */
346 hvrc = HvCallEvent_signalLpEvent(&bevent->event);
347 vio_free_event_buffer(viomajorsubtype_blockio, bevent);
348 }
349
350 if (hvrc != HvLpEvent_Rc_Good) {
351 pr_warning("error sending disk event to OS/400 (rc %d)\n",
352 (int)hvrc);
353 goto error_ret;
354 }
355 spin_unlock_irqrestore(&viodasd_spinlock, flags);
356 return 0;
357
358error_ret:
359 num_req_outstanding--;
360 spin_unlock_irqrestore(&viodasd_spinlock, flags);
361 dma_unmap_sg(d->dev, sg, nsg, direction);
362 return -1;
363}
364
365/*
366 * This is the external request processing routine
367 */
368static void do_viodasd_request(struct request_queue *q)
369{
370 struct request *req;
371
372 /*
373 * If we already have the maximum number of requests
374 * outstanding to OS/400 just bail out. We'll come
375 * back later.
376 */
377 while (num_req_outstanding < VIOMAXREQ) {
378 req = blk_fetch_request(q);
379 if (req == NULL)
380 return;
381 /* check that request contains a valid command */
382 if (req->cmd_type != REQ_TYPE_FS) {
383 viodasd_end_request(req, -EIO, blk_rq_sectors(req));
384 continue;
385 }
386 /* Try sending the request */
387 if (send_request(req) != 0)
388 viodasd_end_request(req, -EIO, blk_rq_sectors(req));
389 }
390}
391
392/*
393 * Probe a single disk and fill in the viodasd_device structure
394 * for it.
395 */
396static int probe_disk(struct viodasd_device *d)
397{
398 HvLpEvent_Rc hvrc;
399 struct viodasd_waitevent we;
400 int dev_no = DEVICE_NO(d);
401 struct gendisk *g;
402 struct request_queue *q;
403 u16 flags = 0;
404
405retry:
406 init_completion(&we.com);
407
408 /* Send the open event to OS/400 */
409 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
410 HvLpEvent_Type_VirtualIo,
411 viomajorsubtype_blockio | vioblockopen,
412 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
413 viopath_sourceinst(viopath_hostLp),
414 viopath_targetinst(viopath_hostLp),
415 (u64)(unsigned long)&we, VIOVERSION << 16,
416 ((u64)dev_no << 48) | ((u64)flags<< 32),
417 0, 0, 0);
418 if (hvrc != 0) {
419 pr_warning("bad rc on HV open %d\n", (int)hvrc);
420 return 0;
421 }
422
423 wait_for_completion(&we.com);
424
425 if (we.rc != 0) {
426 if (flags != 0)
427 return 0;
428 /* try again with read only flag set */
429 flags = vioblockflags_ro;
430 goto retry;
431 }
432 if (we.max_disk > (MAX_DISKNO - 1)) {
433 printk_once(KERN_INFO pr_fmt("Only examining the first %d of %d disks connected\n"),
434 MAX_DISKNO, we.max_disk + 1);
435 }
436
437 /* Send the close event to OS/400. We DON'T expect a response */
438 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
439 HvLpEvent_Type_VirtualIo,
440 viomajorsubtype_blockio | vioblockclose,
441 HvLpEvent_AckInd_NoAck, HvLpEvent_AckType_ImmediateAck,
442 viopath_sourceinst(viopath_hostLp),
443 viopath_targetinst(viopath_hostLp),
444 0, VIOVERSION << 16,
445 ((u64)dev_no << 48) | ((u64)flags << 32),
446 0, 0, 0);
447 if (hvrc != 0) {
448 pr_warning("bad rc sending event to OS/400 %d\n", (int)hvrc);
449 return 0;
450 }
451
452 if (d->dev == NULL) {
453 /* this is when we reprobe for new disks */
454 if (vio_create_viodasd(dev_no) == NULL) {
455 pr_warning("cannot allocate virtual device for disk %d\n",
456 dev_no);
457 return 0;
458 }
459 /*
460 * The vio_create_viodasd will have recursed into this
461 * routine with d->dev set to the new vio device and
462 * will finish the setup of the disk below.
463 */
464 return 1;
465 }
466
467 /* create the request queue for the disk */
468 spin_lock_init(&d->q_lock);
469 q = blk_init_queue(do_viodasd_request, &d->q_lock);
470 if (q == NULL) {
471 pr_warning("cannot allocate queue for disk %d\n", dev_no);
472 return 0;
473 }
474 g = alloc_disk(1 << PARTITION_SHIFT);
475 if (g == NULL) {
476 pr_warning("cannot allocate disk structure for disk %d\n",
477 dev_no);
478 blk_cleanup_queue(q);
479 return 0;
480 }
481
482 d->disk = g;
483 blk_queue_max_segments(q, VIOMAXBLOCKDMA);
484 blk_queue_max_hw_sectors(q, VIODASD_MAXSECTORS);
485 g->major = VIODASD_MAJOR;
486 g->first_minor = dev_no << PARTITION_SHIFT;
487 if (dev_no >= 26)
488 snprintf(g->disk_name, sizeof(g->disk_name),
489 VIOD_GENHD_NAME "%c%c",
490 'a' + (dev_no / 26) - 1, 'a' + (dev_no % 26));
491 else
492 snprintf(g->disk_name, sizeof(g->disk_name),
493 VIOD_GENHD_NAME "%c", 'a' + (dev_no % 26));
494 g->fops = &viodasd_fops;
495 g->queue = q;
496 g->private_data = d;
497 g->driverfs_dev = d->dev;
498 set_capacity(g, d->size >> 9);
499
500 pr_info("disk %d: %lu sectors (%lu MB) CHS=%d/%d/%d sector size %d%s\n",
501 dev_no, (unsigned long)(d->size >> 9),
502 (unsigned long)(d->size >> 20),
503 (int)d->cylinders, (int)d->tracks,
504 (int)d->sectors, (int)d->bytes_per_sector,
505 d->read_only ? " (RO)" : "");
506
507 /* register us in the global list */
508 add_disk(g);
509 return 1;
510}
511
512/* returns the total number of scatterlist elements converted */
513static int block_event_to_scatterlist(const struct vioblocklpevent *bevent,
514 struct scatterlist *sg, int *total_len)
515{
516 int i, numsg;
517 const struct rw_data *rw_data = &bevent->u.rw_data;
518 static const int offset =
519 offsetof(struct vioblocklpevent, u.rw_data.dma_info);
520 static const int element_size = sizeof(rw_data->dma_info[0]);
521
522 numsg = ((bevent->event.xSizeMinus1 + 1) - offset) / element_size;
523 if (numsg > VIOMAXBLOCKDMA)
524 numsg = VIOMAXBLOCKDMA;
525
526 *total_len = 0;
527 sg_init_table(sg, VIOMAXBLOCKDMA);
528 for (i = 0; (i < numsg) && (rw_data->dma_info[i].len > 0); ++i) {
529 sg_dma_address(&sg[i]) = rw_data->dma_info[i].token;
530 sg_dma_len(&sg[i]) = rw_data->dma_info[i].len;
531 *total_len += rw_data->dma_info[i].len;
532 }
533 return i;
534}
535
536/*
537 * Restart all queues, starting with the one _after_ the disk given,
538 * thus reducing the chance of starvation of higher numbered disks.
539 */
540static void viodasd_restart_all_queues_starting_from(int first_index)
541{
542 int i;
543
544 for (i = first_index + 1; i < MAX_DISKNO; ++i)
545 if (viodasd_devices[i].disk)
546 blk_run_queue(viodasd_devices[i].disk->queue);
547 for (i = 0; i <= first_index; ++i)
548 if (viodasd_devices[i].disk)
549 blk_run_queue(viodasd_devices[i].disk->queue);
550}
551
552/*
553 * For read and write requests, decrement the number of outstanding requests,
554 * Free the DMA buffers we allocated.
555 */
556static int viodasd_handle_read_write(struct vioblocklpevent *bevent)
557{
558 int num_sg, num_sect, pci_direction, total_len;
559 struct request *req;
560 struct scatterlist sg[VIOMAXBLOCKDMA];
561 struct HvLpEvent *event = &bevent->event;
562 unsigned long irq_flags;
563 struct viodasd_device *d;
564 int error;
565 spinlock_t *qlock;
566
567 num_sg = block_event_to_scatterlist(bevent, sg, &total_len);
568 num_sect = total_len >> 9;
569 if (event->xSubtype == (viomajorsubtype_blockio | vioblockread))
570 pci_direction = DMA_FROM_DEVICE;
571 else
572 pci_direction = DMA_TO_DEVICE;
573 req = (struct request *)bevent->event.xCorrelationToken;
574 d = req->rq_disk->private_data;
575
576 dma_unmap_sg(d->dev, sg, num_sg, pci_direction);
577
578 /*
579 * Since this is running in interrupt mode, we need to make sure
580 * we're not stepping on any global I/O operations
581 */
582 spin_lock_irqsave(&viodasd_spinlock, irq_flags);
583 num_req_outstanding--;
584 spin_unlock_irqrestore(&viodasd_spinlock, irq_flags);
585
586 error = (event->xRc == HvLpEvent_Rc_Good) ? 0 : -EIO;
587 if (error) {
588 const struct vio_error_entry *err;
589 err = vio_lookup_rc(viodasd_err_table, bevent->sub_result);
590 pr_warning("read/write error %d:0x%04x (%s)\n",
591 event->xRc, bevent->sub_result, err->msg);
592 num_sect = blk_rq_sectors(req);
593 }
594 qlock = req->q->queue_lock;
595 spin_lock_irqsave(qlock, irq_flags);
596 viodasd_end_request(req, error, num_sect);
597 spin_unlock_irqrestore(qlock, irq_flags);
598
599 /* Finally, try to get more requests off of this device's queue */
600 viodasd_restart_all_queues_starting_from(DEVICE_NO(d));
601
602 return 0;
603}
604
605/* This routine handles incoming block LP events */
606static void handle_block_event(struct HvLpEvent *event)
607{
608 struct vioblocklpevent *bevent = (struct vioblocklpevent *)event;
609 struct viodasd_waitevent *pwe;
610
611 if (event == NULL)
612 /* Notification that a partition went away! */
613 return;
614 /* First, we should NEVER get an int here...only acks */
615 if (hvlpevent_is_int(event)) {
616 pr_warning("Yikes! got an int in viodasd event handler!\n");
617 if (hvlpevent_need_ack(event)) {
618 event->xRc = HvLpEvent_Rc_InvalidSubtype;
619 HvCallEvent_ackLpEvent(event);
620 }
621 }
622
623 switch (event->xSubtype & VIOMINOR_SUBTYPE_MASK) {
624 case vioblockopen:
625 /*
626 * Handle a response to an open request. We get all the
627 * disk information in the response, so update it. The
628 * correlation token contains a pointer to a waitevent
629 * structure that has a completion in it. update the
630 * return code in the waitevent structure and post the
631 * completion to wake up the guy who sent the request
632 */
633 pwe = (struct viodasd_waitevent *)event->xCorrelationToken;
634 pwe->rc = event->xRc;
635 pwe->sub_result = bevent->sub_result;
636 if (event->xRc == HvLpEvent_Rc_Good) {
637 const struct open_data *data = &bevent->u.open_data;
638 struct viodasd_device *device =
639 &viodasd_devices[bevent->disk];
640 device->read_only =
641 bevent->flags & vioblockflags_ro;
642 device->size = data->disk_size;
643 device->cylinders = data->cylinders;
644 device->tracks = data->tracks;
645 device->sectors = data->sectors;
646 device->bytes_per_sector = data->bytes_per_sector;
647 pwe->max_disk = data->max_disk;
648 }
649 complete(&pwe->com);
650 break;
651 case vioblockclose:
652 break;
653 case vioblockread:
654 case vioblockwrite:
655 viodasd_handle_read_write(bevent);
656 break;
657
658 default:
659 pr_warning("invalid subtype!");
660 if (hvlpevent_need_ack(event)) {
661 event->xRc = HvLpEvent_Rc_InvalidSubtype;
662 HvCallEvent_ackLpEvent(event);
663 }
664 }
665}
666
667/*
668 * Get the driver to reprobe for more disks.
669 */
670static ssize_t probe_disks(struct device_driver *drv, const char *buf,
671 size_t count)
672{
673 struct viodasd_device *d;
674
675 for (d = viodasd_devices; d < &viodasd_devices[MAX_DISKNO]; d++) {
676 if (d->disk == NULL)
677 probe_disk(d);
678 }
679 return count;
680}
681static DRIVER_ATTR(probe, S_IWUSR, NULL, probe_disks);
682
683static int viodasd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
684{
685 struct viodasd_device *d = &viodasd_devices[vdev->unit_address];
686
687 d->dev = &vdev->dev;
688 if (!probe_disk(d))
689 return -ENODEV;
690 return 0;
691}
692
693static int viodasd_remove(struct vio_dev *vdev)
694{
695 struct viodasd_device *d;
696
697 d = &viodasd_devices[vdev->unit_address];
698 if (d->disk) {
699 del_gendisk(d->disk);
700 blk_cleanup_queue(d->disk->queue);
701 put_disk(d->disk);
702 d->disk = NULL;
703 }
704 d->dev = NULL;
705 return 0;
706}
707
708/**
709 * viodasd_device_table: Used by vio.c to match devices that we
710 * support.
711 */
712static struct vio_device_id viodasd_device_table[] __devinitdata = {
713 { "block", "IBM,iSeries-viodasd" },
714 { "", "" }
715};
716MODULE_DEVICE_TABLE(vio, viodasd_device_table);
717
718static struct vio_driver viodasd_driver = {
719 .id_table = viodasd_device_table,
720 .probe = viodasd_probe,
721 .remove = viodasd_remove,
722 .driver = {
723 .name = "viodasd",
724 .owner = THIS_MODULE,
725 }
726};
727
728static int need_delete_probe;
729
730/*
731 * Initialize the whole device driver. Handle module and non-module
732 * versions
733 */
734static int __init viodasd_init(void)
735{
736 int rc;
737
738 if (!firmware_has_feature(FW_FEATURE_ISERIES)) {
739 rc = -ENODEV;
740 goto early_fail;
741 }
742
743 /* Try to open to our host lp */
744 if (viopath_hostLp == HvLpIndexInvalid)
745 vio_set_hostlp();
746
747 if (viopath_hostLp == HvLpIndexInvalid) {
748 pr_warning("invalid hosting partition\n");
749 rc = -EIO;
750 goto early_fail;
751 }
752
753 pr_info("vers " VIOD_VERS ", hosting partition %d\n", viopath_hostLp);
754
755 /* register the block device */
756 rc = register_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME);
757 if (rc) {
758 pr_warning("Unable to get major number %d for %s\n",
759 VIODASD_MAJOR, VIOD_GENHD_NAME);
760 goto early_fail;
761 }
762 /* Actually open the path to the hosting partition */
763 rc = viopath_open(viopath_hostLp, viomajorsubtype_blockio,
764 VIOMAXREQ + 2);
765 if (rc) {
766 pr_warning("error opening path to host partition %d\n",
767 viopath_hostLp);
768 goto unregister_blk;
769 }
770
771 /* Initialize our request handler */
772 vio_setHandler(viomajorsubtype_blockio, handle_block_event);
773
774 rc = vio_register_driver(&viodasd_driver);
775 if (rc) {
776 pr_warning("vio_register_driver failed\n");
777 goto unset_handler;
778 }
779
780 /*
781 * If this call fails, it just means that we cannot dynamically
782 * add virtual disks, but the driver will still work fine for
783 * all existing disk, so ignore the failure.
784 */
785 if (!driver_create_file(&viodasd_driver.driver, &driver_attr_probe))
786 need_delete_probe = 1;
787
788 return 0;
789
790unset_handler:
791 vio_clearHandler(viomajorsubtype_blockio);
792 viopath_close(viopath_hostLp, viomajorsubtype_blockio, VIOMAXREQ + 2);
793unregister_blk:
794 unregister_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME);
795early_fail:
796 return rc;
797}
798module_init(viodasd_init);
799
800void __exit viodasd_exit(void)
801{
802 if (need_delete_probe)
803 driver_remove_file(&viodasd_driver.driver, &driver_attr_probe);
804 vio_unregister_driver(&viodasd_driver);
805 vio_clearHandler(viomajorsubtype_blockio);
806 viopath_close(viopath_hostLp, viomajorsubtype_blockio, VIOMAXREQ + 2);
807 unregister_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME);
808}
809module_exit(viodasd_exit);
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
deleted file mode 100644
index 7878da89d29e..000000000000
--- a/drivers/cdrom/viocd.c
+++ /dev/null
@@ -1,739 +0,0 @@
1/* -*- linux-c -*-
2 * drivers/cdrom/viocd.c
3 *
4 * iSeries Virtual CD Rom
5 *
6 * Authors: Dave Boutcher <boutcher@us.ibm.com>
7 * Ryan Arnold <ryanarn@us.ibm.com>
8 * Colin Devilbiss <devilbis@us.ibm.com>
9 * Stephen Rothwell
10 *
11 * (C) Copyright 2000-2004 IBM Corporation
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License as
15 * published by the Free Software Foundation; either version 2 of the
16 * License, or (at your option) anyu later version.
17 *
18 * This program is distributed in the hope that it will be useful, but
19 * WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software Foundation,
25 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 *
27 * This routine provides access to CD ROM drives owned and managed by an
28 * OS/400 partition running on the same box as this Linux partition.
29 *
30 * All operations are performed by sending messages back and forth to
31 * the OS/400 partition.
32 */
33
34#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35
36#include <linux/major.h>
37#include <linux/blkdev.h>
38#include <linux/cdrom.h>
39#include <linux/errno.h>
40#include <linux/init.h>
41#include <linux/dma-mapping.h>
42#include <linux/module.h>
43#include <linux/completion.h>
44#include <linux/proc_fs.h>
45#include <linux/mutex.h>
46#include <linux/seq_file.h>
47#include <linux/scatterlist.h>
48
49#include <asm/vio.h>
50#include <asm/iseries/hv_types.h>
51#include <asm/iseries/hv_lp_event.h>
52#include <asm/iseries/vio.h>
53#include <asm/firmware.h>
54
55#define VIOCD_DEVICE "iseries/vcd"
56
57#define VIOCD_VERS "1.06"
58
59/*
60 * Should probably make this a module parameter....sigh
61 */
62#define VIOCD_MAX_CD HVMAXARCHITECTEDVIRTUALCDROMS
63
64static DEFINE_MUTEX(viocd_mutex);
65static const struct vio_error_entry viocd_err_table[] = {
66 {0x0201, EINVAL, "Invalid Range"},
67 {0x0202, EINVAL, "Invalid Token"},
68 {0x0203, EIO, "DMA Error"},
69 {0x0204, EIO, "Use Error"},
70 {0x0205, EIO, "Release Error"},
71 {0x0206, EINVAL, "Invalid CD"},
72 {0x020C, EROFS, "Read Only Device"},
73 {0x020D, ENOMEDIUM, "Changed or Missing Volume (or Varied Off?)"},
74 {0x020E, EIO, "Optical System Error (Varied Off?)"},
75 {0x02FF, EIO, "Internal Error"},
76 {0x3010, EIO, "Changed Volume"},
77 {0xC100, EIO, "Optical System Error"},
78 {0x0000, 0, NULL},
79};
80
81/*
82 * This is the structure we use to exchange info between driver and interrupt
83 * handler
84 */
85struct viocd_waitevent {
86 struct completion com;
87 int rc;
88 u16 sub_result;
89 int changed;
90};
91
92/* this is a lookup table for the true capabilities of a device */
93struct capability_entry {
94 char *type;
95 int capability;
96};
97
98static struct capability_entry capability_table[] __initdata = {
99 { "6330", CDC_LOCK | CDC_DVD_RAM | CDC_RAM },
100 { "6331", CDC_LOCK | CDC_DVD_RAM | CDC_RAM },
101 { "6333", CDC_LOCK | CDC_DVD_RAM | CDC_RAM },
102 { "632A", CDC_LOCK | CDC_DVD_RAM | CDC_RAM },
103 { "6321", CDC_LOCK },
104 { "632B", 0 },
105 { NULL , CDC_LOCK },
106};
107
108/* These are our internal structures for keeping track of devices */
109static int viocd_numdev;
110
111struct disk_info {
112 struct gendisk *viocd_disk;
113 struct cdrom_device_info viocd_info;
114 struct device *dev;
115 const char *rsrcname;
116 const char *type;
117 const char *model;
118};
119static struct disk_info viocd_diskinfo[VIOCD_MAX_CD];
120
121#define DEVICE_NR(di) ((di) - &viocd_diskinfo[0])
122
123static spinlock_t viocd_reqlock;
124
125#define MAX_CD_REQ 1
126
127/* procfs support */
128static int proc_viocd_show(struct seq_file *m, void *v)
129{
130 int i;
131
132 for (i = 0; i < viocd_numdev; i++) {
133 seq_printf(m, "viocd device %d is iSeries resource %10.10s"
134 "type %4.4s, model %3.3s\n",
135 i, viocd_diskinfo[i].rsrcname,
136 viocd_diskinfo[i].type,
137 viocd_diskinfo[i].model);
138 }
139 return 0;
140}
141
142static int proc_viocd_open(struct inode *inode, struct file *file)
143{
144 return single_open(file, proc_viocd_show, NULL);
145}
146
147static const struct file_operations proc_viocd_operations = {
148 .owner = THIS_MODULE,
149 .open = proc_viocd_open,
150 .read = seq_read,
151 .llseek = seq_lseek,
152 .release = single_release,
153};
154
155static int viocd_blk_open(struct block_device *bdev, fmode_t mode)
156{
157 struct disk_info *di = bdev->bd_disk->private_data;
158 int ret;
159
160 mutex_lock(&viocd_mutex);
161 ret = cdrom_open(&di->viocd_info, bdev, mode);
162 mutex_unlock(&viocd_mutex);
163
164 return ret;
165}
166
167static int viocd_blk_release(struct gendisk *disk, fmode_t mode)
168{
169 struct disk_info *di = disk->private_data;
170 mutex_lock(&viocd_mutex);
171 cdrom_release(&di->viocd_info, mode);
172 mutex_unlock(&viocd_mutex);
173 return 0;
174}
175
176static int viocd_blk_ioctl(struct block_device *bdev, fmode_t mode,
177 unsigned cmd, unsigned long arg)
178{
179 struct disk_info *di = bdev->bd_disk->private_data;
180 int ret;
181
182 mutex_lock(&viocd_mutex);
183 ret = cdrom_ioctl(&di->viocd_info, bdev, mode, cmd, arg);
184 mutex_unlock(&viocd_mutex);
185
186 return ret;
187}
188
189static unsigned int viocd_blk_check_events(struct gendisk *disk,
190 unsigned int clearing)
191{
192 struct disk_info *di = disk->private_data;
193 return cdrom_check_events(&di->viocd_info, clearing);
194}
195
196static const struct block_device_operations viocd_fops = {
197 .owner = THIS_MODULE,
198 .open = viocd_blk_open,
199 .release = viocd_blk_release,
200 .ioctl = viocd_blk_ioctl,
201 .check_events = viocd_blk_check_events,
202};
203
204static int viocd_open(struct cdrom_device_info *cdi, int purpose)
205{
206 struct disk_info *diskinfo = cdi->handle;
207 int device_no = DEVICE_NR(diskinfo);
208 HvLpEvent_Rc hvrc;
209 struct viocd_waitevent we;
210
211 init_completion(&we.com);
212 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
213 HvLpEvent_Type_VirtualIo,
214 viomajorsubtype_cdio | viocdopen,
215 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
216 viopath_sourceinst(viopath_hostLp),
217 viopath_targetinst(viopath_hostLp),
218 (u64)&we, VIOVERSION << 16, ((u64)device_no << 48),
219 0, 0, 0);
220 if (hvrc != 0) {
221 pr_warning("bad rc on HvCallEvent_signalLpEventFast %d\n",
222 (int)hvrc);
223 return -EIO;
224 }
225
226 wait_for_completion(&we.com);
227
228 if (we.rc) {
229 const struct vio_error_entry *err =
230 vio_lookup_rc(viocd_err_table, we.sub_result);
231 pr_warning("bad rc %d:0x%04X on open: %s\n",
232 we.rc, we.sub_result, err->msg);
233 return -err->errno;
234 }
235
236 return 0;
237}
238
239static void viocd_release(struct cdrom_device_info *cdi)
240{
241 int device_no = DEVICE_NR((struct disk_info *)cdi->handle);
242 HvLpEvent_Rc hvrc;
243
244 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
245 HvLpEvent_Type_VirtualIo,
246 viomajorsubtype_cdio | viocdclose,
247 HvLpEvent_AckInd_NoAck, HvLpEvent_AckType_ImmediateAck,
248 viopath_sourceinst(viopath_hostLp),
249 viopath_targetinst(viopath_hostLp), 0,
250 VIOVERSION << 16, ((u64)device_no << 48), 0, 0, 0);
251 if (hvrc != 0)
252 pr_warning("bad rc on HvCallEvent_signalLpEventFast %d\n",
253 (int)hvrc);
254}
255
256/* Send a read or write request to OS/400 */
257static int send_request(struct request *req)
258{
259 HvLpEvent_Rc hvrc;
260 struct disk_info *diskinfo = req->rq_disk->private_data;
261 u64 len;
262 dma_addr_t dmaaddr;
263 int direction;
264 u16 cmd;
265 struct scatterlist sg;
266
267 BUG_ON(req->nr_phys_segments > 1);
268
269 if (rq_data_dir(req) == READ) {
270 direction = DMA_FROM_DEVICE;
271 cmd = viomajorsubtype_cdio | viocdread;
272 } else {
273 direction = DMA_TO_DEVICE;
274 cmd = viomajorsubtype_cdio | viocdwrite;
275 }
276
277 sg_init_table(&sg, 1);
278 if (blk_rq_map_sg(req->q, req, &sg) == 0) {
279 pr_warning("error setting up scatter/gather list\n");
280 return -1;
281 }
282
283 if (dma_map_sg(diskinfo->dev, &sg, 1, direction) == 0) {
284 pr_warning("error allocating sg tce\n");
285 return -1;
286 }
287 dmaaddr = sg_dma_address(&sg);
288 len = sg_dma_len(&sg);
289
290 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
291 HvLpEvent_Type_VirtualIo, cmd,
292 HvLpEvent_AckInd_DoAck,
293 HvLpEvent_AckType_ImmediateAck,
294 viopath_sourceinst(viopath_hostLp),
295 viopath_targetinst(viopath_hostLp),
296 (u64)req, VIOVERSION << 16,
297 ((u64)DEVICE_NR(diskinfo) << 48) | dmaaddr,
298 (u64)blk_rq_pos(req) * 512, len, 0);
299 if (hvrc != HvLpEvent_Rc_Good) {
300 pr_warning("hv error on op %d\n", (int)hvrc);
301 return -1;
302 }
303
304 return 0;
305}
306
307static int rwreq;
308
309static void do_viocd_request(struct request_queue *q)
310{
311 struct request *req;
312
313 while ((rwreq == 0) && ((req = blk_fetch_request(q)) != NULL)) {
314 if (req->cmd_type != REQ_TYPE_FS)
315 __blk_end_request_all(req, -EIO);
316 else if (send_request(req) < 0) {
317 pr_warning("unable to send message to OS/400!\n");
318 __blk_end_request_all(req, -EIO);
319 } else
320 rwreq++;
321 }
322}
323
324static unsigned int viocd_check_events(struct cdrom_device_info *cdi,
325 unsigned int clearing, int disc_nr)
326{
327 struct viocd_waitevent we;
328 HvLpEvent_Rc hvrc;
329 int device_no = DEVICE_NR((struct disk_info *)cdi->handle);
330
331 init_completion(&we.com);
332
333 /* Send the open event to OS/400 */
334 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
335 HvLpEvent_Type_VirtualIo,
336 viomajorsubtype_cdio | viocdcheck,
337 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
338 viopath_sourceinst(viopath_hostLp),
339 viopath_targetinst(viopath_hostLp),
340 (u64)&we, VIOVERSION << 16, ((u64)device_no << 48),
341 0, 0, 0);
342 if (hvrc != 0) {
343 pr_warning("bad rc on HvCallEvent_signalLpEventFast %d\n",
344 (int)hvrc);
345 return 0;
346 }
347
348 wait_for_completion(&we.com);
349
350 /* Check the return code. If bad, assume no change */
351 if (we.rc) {
352 const struct vio_error_entry *err =
353 vio_lookup_rc(viocd_err_table, we.sub_result);
354 pr_warning("bad rc %d:0x%04X on check_change: %s; Assuming no change\n",
355 we.rc, we.sub_result, err->msg);
356 return 0;
357 }
358
359 return we.changed ? DISK_EVENT_MEDIA_CHANGE : 0;
360}
361
362static int viocd_lock_door(struct cdrom_device_info *cdi, int locking)
363{
364 HvLpEvent_Rc hvrc;
365 u64 device_no = DEVICE_NR((struct disk_info *)cdi->handle);
366 /* NOTE: flags is 1 or 0 so it won't overwrite the device_no */
367 u64 flags = !!locking;
368 struct viocd_waitevent we;
369
370 init_completion(&we.com);
371
372 /* Send the lockdoor event to OS/400 */
373 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
374 HvLpEvent_Type_VirtualIo,
375 viomajorsubtype_cdio | viocdlockdoor,
376 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
377 viopath_sourceinst(viopath_hostLp),
378 viopath_targetinst(viopath_hostLp),
379 (u64)&we, VIOVERSION << 16,
380 (device_no << 48) | (flags << 32), 0, 0, 0);
381 if (hvrc != 0) {
382 pr_warning("bad rc on HvCallEvent_signalLpEventFast %d\n",
383 (int)hvrc);
384 return -EIO;
385 }
386
387 wait_for_completion(&we.com);
388
389 if (we.rc != 0)
390 return -EIO;
391 return 0;
392}
393
394static int viocd_packet(struct cdrom_device_info *cdi,
395 struct packet_command *cgc)
396{
397 unsigned int buflen = cgc->buflen;
398 int ret = -EIO;
399
400 switch (cgc->cmd[0]) {
401 case GPCMD_READ_DISC_INFO:
402 {
403 disc_information *di = (disc_information *)cgc->buffer;
404
405 if (buflen >= 2) {
406 di->disc_information_length = cpu_to_be16(1);
407 ret = 0;
408 }
409 if (buflen >= 3)
410 di->erasable =
411 (cdi->ops->capability & ~cdi->mask
412 & (CDC_DVD_RAM | CDC_RAM)) != 0;
413 }
414 break;
415 case GPCMD_GET_CONFIGURATION:
416 if (cgc->cmd[3] == CDF_RWRT) {
417 struct rwrt_feature_desc *rfd = (struct rwrt_feature_desc *)(cgc->buffer + sizeof(struct feature_header));
418
419 if ((buflen >=
420 (sizeof(struct feature_header) + sizeof(*rfd))) &&
421 (cdi->ops->capability & ~cdi->mask
422 & (CDC_DVD_RAM | CDC_RAM))) {
423 rfd->feature_code = cpu_to_be16(CDF_RWRT);
424 rfd->curr = 1;
425 ret = 0;
426 }
427 }
428 break;
429 default:
430 if (cgc->sense) {
431 /* indicate Unknown code */
432 cgc->sense->sense_key = 0x05;
433 cgc->sense->asc = 0x20;
434 cgc->sense->ascq = 0x00;
435 }
436 break;
437 }
438
439 cgc->stat = ret;
440 return ret;
441}
442
443static void restart_all_queues(int first_index)
444{
445 int i;
446
447 for (i = first_index + 1; i < viocd_numdev; i++)
448 if (viocd_diskinfo[i].viocd_disk)
449 blk_run_queue(viocd_diskinfo[i].viocd_disk->queue);
450 for (i = 0; i <= first_index; i++)
451 if (viocd_diskinfo[i].viocd_disk)
452 blk_run_queue(viocd_diskinfo[i].viocd_disk->queue);
453}
454
455/* This routine handles incoming CD LP events */
456static void vio_handle_cd_event(struct HvLpEvent *event)
457{
458 struct viocdlpevent *bevent;
459 struct viocd_waitevent *pwe;
460 struct disk_info *di;
461 unsigned long flags;
462 struct request *req;
463
464
465 if (event == NULL)
466 /* Notification that a partition went away! */
467 return;
468 /* First, we should NEVER get an int here...only acks */
469 if (hvlpevent_is_int(event)) {
470 pr_warning("Yikes! got an int in viocd event handler!\n");
471 if (hvlpevent_need_ack(event)) {
472 event->xRc = HvLpEvent_Rc_InvalidSubtype;
473 HvCallEvent_ackLpEvent(event);
474 }
475 }
476
477 bevent = (struct viocdlpevent *)event;
478
479 switch (event->xSubtype & VIOMINOR_SUBTYPE_MASK) {
480 case viocdopen:
481 if (event->xRc == 0) {
482 di = &viocd_diskinfo[bevent->disk];
483 blk_queue_logical_block_size(di->viocd_disk->queue,
484 bevent->block_size);
485 set_capacity(di->viocd_disk,
486 bevent->media_size *
487 bevent->block_size / 512);
488 }
489 /* FALLTHROUGH !! */
490 case viocdlockdoor:
491 pwe = (struct viocd_waitevent *)event->xCorrelationToken;
492return_complete:
493 pwe->rc = event->xRc;
494 pwe->sub_result = bevent->sub_result;
495 complete(&pwe->com);
496 break;
497
498 case viocdcheck:
499 pwe = (struct viocd_waitevent *)event->xCorrelationToken;
500 pwe->changed = bevent->flags;
501 goto return_complete;
502
503 case viocdclose:
504 break;
505
506 case viocdwrite:
507 case viocdread:
508 /*
509 * Since this is running in interrupt mode, we need to
510 * make sure we're not stepping on any global I/O operations
511 */
512 di = &viocd_diskinfo[bevent->disk];
513 spin_lock_irqsave(&viocd_reqlock, flags);
514 dma_unmap_single(di->dev, bevent->token, bevent->len,
515 ((event->xSubtype & VIOMINOR_SUBTYPE_MASK) == viocdread)
516 ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
517 req = (struct request *)bevent->event.xCorrelationToken;
518 rwreq--;
519
520 if (event->xRc != HvLpEvent_Rc_Good) {
521 const struct vio_error_entry *err =
522 vio_lookup_rc(viocd_err_table,
523 bevent->sub_result);
524 pr_warning("request %p failed with rc %d:0x%04X: %s\n",
525 req, event->xRc,
526 bevent->sub_result, err->msg);
527 __blk_end_request_all(req, -EIO);
528 } else
529 __blk_end_request_all(req, 0);
530
531 /* restart handling of incoming requests */
532 spin_unlock_irqrestore(&viocd_reqlock, flags);
533 restart_all_queues(bevent->disk);
534 break;
535
536 default:
537 pr_warning("message with invalid subtype %0x04X!\n",
538 event->xSubtype & VIOMINOR_SUBTYPE_MASK);
539 if (hvlpevent_need_ack(event)) {
540 event->xRc = HvLpEvent_Rc_InvalidSubtype;
541 HvCallEvent_ackLpEvent(event);
542 }
543 }
544}
545
546static int viocd_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd,
547 void *arg)
548{
549 return -EINVAL;
550}
551
552static struct cdrom_device_ops viocd_dops = {
553 .open = viocd_open,
554 .release = viocd_release,
555 .check_events = viocd_check_events,
556 .lock_door = viocd_lock_door,
557 .generic_packet = viocd_packet,
558 .audio_ioctl = viocd_audio_ioctl,
559 .capability = CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | CDC_SELECT_SPEED | CDC_SELECT_DISC | CDC_MULTI_SESSION | CDC_MCN | CDC_MEDIA_CHANGED | CDC_PLAY_AUDIO | CDC_RESET | CDC_DRIVE_STATUS | CDC_GENERIC_PACKET | CDC_CD_R | CDC_CD_RW | CDC_DVD | CDC_DVD_R | CDC_DVD_RAM | CDC_RAM
560};
561
562static int find_capability(const char *type)
563{
564 struct capability_entry *entry;
565
566 for(entry = capability_table; entry->type; ++entry)
567 if(!strncmp(entry->type, type, 4))
568 break;
569 return entry->capability;
570}
571
572static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
573{
574 struct gendisk *gendisk;
575 int deviceno;
576 struct disk_info *d;
577 struct cdrom_device_info *c;
578 struct request_queue *q;
579 struct device_node *node = vdev->dev.of_node;
580
581 deviceno = vdev->unit_address;
582 if (deviceno >= VIOCD_MAX_CD)
583 return -ENODEV;
584 if (!node)
585 return -ENODEV;
586
587 if (deviceno >= viocd_numdev)
588 viocd_numdev = deviceno + 1;
589
590 d = &viocd_diskinfo[deviceno];
591 d->rsrcname = of_get_property(node, "linux,vio_rsrcname", NULL);
592 d->type = of_get_property(node, "linux,vio_type", NULL);
593 d->model = of_get_property(node, "linux,vio_model", NULL);
594
595 c = &d->viocd_info;
596
597 c->ops = &viocd_dops;
598 c->speed = 4;
599 c->capacity = 1;
600 c->handle = d;
601 c->mask = ~find_capability(d->type);
602 sprintf(c->name, VIOCD_DEVICE "%c", 'a' + deviceno);
603
604 if (register_cdrom(c) != 0) {
605 pr_warning("Cannot register viocd CD-ROM %s!\n", c->name);
606 goto out;
607 }
608 pr_info("cd %s is iSeries resource %10.10s type %4.4s, model %3.3s\n",
609 c->name, d->rsrcname, d->type, d->model);
610 q = blk_init_queue(do_viocd_request, &viocd_reqlock);
611 if (q == NULL) {
612 pr_warning("Cannot allocate queue for %s!\n", c->name);
613 goto out_unregister_cdrom;
614 }
615 gendisk = alloc_disk(1);
616 if (gendisk == NULL) {
617 pr_warning("Cannot create gendisk for %s!\n", c->name);
618 goto out_cleanup_queue;
619 }
620 gendisk->major = VIOCD_MAJOR;
621 gendisk->first_minor = deviceno;
622 strncpy(gendisk->disk_name, c->name,
623 sizeof(gendisk->disk_name));
624 blk_queue_max_segments(q, 1);
625 blk_queue_max_hw_sectors(q, 4096 / 512);
626 gendisk->queue = q;
627 gendisk->fops = &viocd_fops;
628 gendisk->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE |
629 GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
630 set_capacity(gendisk, 0);
631 gendisk->private_data = d;
632 d->viocd_disk = gendisk;
633 d->dev = &vdev->dev;
634 gendisk->driverfs_dev = d->dev;
635 add_disk(gendisk);
636 return 0;
637
638out_cleanup_queue:
639 blk_cleanup_queue(q);
640out_unregister_cdrom:
641 unregister_cdrom(c);
642out:
643 return -ENODEV;
644}
645
646static int viocd_remove(struct vio_dev *vdev)
647{
648 struct disk_info *d = &viocd_diskinfo[vdev->unit_address];
649
650 unregister_cdrom(&d->viocd_info);
651 del_gendisk(d->viocd_disk);
652 blk_cleanup_queue(d->viocd_disk->queue);
653 put_disk(d->viocd_disk);
654 return 0;
655}
656
657/**
658 * viocd_device_table: Used by vio.c to match devices that we
659 * support.
660 */
661static struct vio_device_id viocd_device_table[] __devinitdata = {
662 { "block", "IBM,iSeries-viocd" },
663 { "", "" }
664};
665MODULE_DEVICE_TABLE(vio, viocd_device_table);
666
667static struct vio_driver viocd_driver = {
668 .id_table = viocd_device_table,
669 .probe = viocd_probe,
670 .remove = viocd_remove,
671 .driver = {
672 .name = "viocd",
673 .owner = THIS_MODULE,
674 }
675};
676
677static int __init viocd_init(void)
678{
679 int ret = 0;
680
681 if (!firmware_has_feature(FW_FEATURE_ISERIES))
682 return -ENODEV;
683
684 if (viopath_hostLp == HvLpIndexInvalid) {
685 vio_set_hostlp();
686 /* If we don't have a host, bail out */
687 if (viopath_hostLp == HvLpIndexInvalid)
688 return -ENODEV;
689 }
690
691 pr_info("vers " VIOCD_VERS ", hosting partition %d\n", viopath_hostLp);
692
693 if (register_blkdev(VIOCD_MAJOR, VIOCD_DEVICE) != 0) {
694 pr_warning("Unable to get major %d for %s\n",
695 VIOCD_MAJOR, VIOCD_DEVICE);
696 return -EIO;
697 }
698
699 ret = viopath_open(viopath_hostLp, viomajorsubtype_cdio,
700 MAX_CD_REQ + 2);
701 if (ret) {
702 pr_warning("error opening path to host partition %d\n",
703 viopath_hostLp);
704 goto out_unregister;
705 }
706
707 /* Initialize our request handler */
708 vio_setHandler(viomajorsubtype_cdio, vio_handle_cd_event);
709
710 spin_lock_init(&viocd_reqlock);
711
712 ret = vio_register_driver(&viocd_driver);
713 if (ret)
714 goto out_free_info;
715
716 proc_create("iSeries/viocd", S_IFREG|S_IRUGO, NULL,
717 &proc_viocd_operations);
718 return 0;
719
720out_free_info:
721 vio_clearHandler(viomajorsubtype_cdio);
722 viopath_close(viopath_hostLp, viomajorsubtype_cdio, MAX_CD_REQ + 2);
723out_unregister:
724 unregister_blkdev(VIOCD_MAJOR, VIOCD_DEVICE);
725 return ret;
726}
727
728static void __exit viocd_exit(void)
729{
730 remove_proc_entry("iSeries/viocd", NULL);
731 vio_unregister_driver(&viocd_driver);
732 viopath_close(viopath_hostLp, viomajorsubtype_cdio, MAX_CD_REQ + 2);
733 vio_clearHandler(viomajorsubtype_cdio);
734 unregister_blkdev(VIOCD_MAJOR, VIOCD_DEVICE);
735}
736
737module_init(viocd_init);
738module_exit(viocd_exit);
739MODULE_LICENSE("GPL");
diff --git a/drivers/char/viotape.c b/drivers/char/viotape.c
deleted file mode 100644
index ad6e64a2912d..000000000000
--- a/drivers/char/viotape.c
+++ /dev/null
@@ -1,1041 +0,0 @@
1/* -*- linux-c -*-
2 * drivers/char/viotape.c
3 *
4 * iSeries Virtual Tape
5 *
6 * Authors: Dave Boutcher <boutcher@us.ibm.com>
7 * Ryan Arnold <ryanarn@us.ibm.com>
8 * Colin Devilbiss <devilbis@us.ibm.com>
9 * Stephen Rothwell
10 *
11 * (C) Copyright 2000-2004 IBM Corporation
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License as
15 * published by the Free Software Foundation; either version 2 of the
16 * License, or (at your option) anyu later version.
17 *
18 * This program is distributed in the hope that it will be useful, but
19 * WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software Foundation,
25 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 *
27 * This routine provides access to tape drives owned and managed by an OS/400
28 * partition running on the same box as this Linux partition.
29 *
30 * All tape operations are performed by sending messages back and forth to
31 * the OS/400 partition. The format of the messages is defined in
32 * iseries/vio.h
33 */
34#include <linux/module.h>
35#include <linux/kernel.h>
36#include <linux/errno.h>
37#include <linux/init.h>
38#include <linux/wait.h>
39#include <linux/spinlock.h>
40#include <linux/mtio.h>
41#include <linux/device.h>
42#include <linux/dma-mapping.h>
43#include <linux/fs.h>
44#include <linux/cdev.h>
45#include <linux/major.h>
46#include <linux/completion.h>
47#include <linux/proc_fs.h>
48#include <linux/seq_file.h>
49#include <linux/mutex.h>
50#include <linux/slab.h>
51
52#include <asm/uaccess.h>
53#include <asm/ioctls.h>
54#include <asm/firmware.h>
55#include <asm/vio.h>
56#include <asm/iseries/vio.h>
57#include <asm/iseries/hv_lp_event.h>
58#include <asm/iseries/hv_call_event.h>
59#include <asm/iseries/hv_lp_config.h>
60
61#define VIOTAPE_VERSION "1.2"
62#define VIOTAPE_MAXREQ 1
63
64#define VIOTAPE_KERN_WARN KERN_WARNING "viotape: "
65#define VIOTAPE_KERN_INFO KERN_INFO "viotape: "
66
67static DEFINE_MUTEX(proc_viotape_mutex);
68static int viotape_numdev;
69
70/*
71 * The minor number follows the conventions of the SCSI tape drives. The
72 * rewind and mode are encoded in the minor #. We use this struct to break
73 * them out
74 */
75struct viot_devinfo_struct {
76 int devno;
77 int mode;
78 int rewind;
79};
80
81#define VIOTAPOP_RESET 0
82#define VIOTAPOP_FSF 1
83#define VIOTAPOP_BSF 2
84#define VIOTAPOP_FSR 3
85#define VIOTAPOP_BSR 4
86#define VIOTAPOP_WEOF 5
87#define VIOTAPOP_REW 6
88#define VIOTAPOP_NOP 7
89#define VIOTAPOP_EOM 8
90#define VIOTAPOP_ERASE 9
91#define VIOTAPOP_SETBLK 10
92#define VIOTAPOP_SETDENSITY 11
93#define VIOTAPOP_SETPOS 12
94#define VIOTAPOP_GETPOS 13
95#define VIOTAPOP_SETPART 14
96#define VIOTAPOP_UNLOAD 15
97
98enum viotaperc {
99 viotape_InvalidRange = 0x0601,
100 viotape_InvalidToken = 0x0602,
101 viotape_DMAError = 0x0603,
102 viotape_UseError = 0x0604,
103 viotape_ReleaseError = 0x0605,
104 viotape_InvalidTape = 0x0606,
105 viotape_InvalidOp = 0x0607,
106 viotape_TapeErr = 0x0608,
107
108 viotape_AllocTimedOut = 0x0640,
109 viotape_BOTEnc = 0x0641,
110 viotape_BlankTape = 0x0642,
111 viotape_BufferEmpty = 0x0643,
112 viotape_CleanCartFound = 0x0644,
113 viotape_CmdNotAllowed = 0x0645,
114 viotape_CmdNotSupported = 0x0646,
115 viotape_DataCheck = 0x0647,
116 viotape_DecompressErr = 0x0648,
117 viotape_DeviceTimeout = 0x0649,
118 viotape_DeviceUnavail = 0x064a,
119 viotape_DeviceBusy = 0x064b,
120 viotape_EndOfMedia = 0x064c,
121 viotape_EndOfTape = 0x064d,
122 viotape_EquipCheck = 0x064e,
123 viotape_InsufficientRs = 0x064f,
124 viotape_InvalidLogBlk = 0x0650,
125 viotape_LengthError = 0x0651,
126 viotape_LibDoorOpen = 0x0652,
127 viotape_LoadFailure = 0x0653,
128 viotape_NotCapable = 0x0654,
129 viotape_NotOperational = 0x0655,
130 viotape_NotReady = 0x0656,
131 viotape_OpCancelled = 0x0657,
132 viotape_PhyLinkErr = 0x0658,
133 viotape_RdyNotBOT = 0x0659,
134 viotape_TapeMark = 0x065a,
135 viotape_WriteProt = 0x065b
136};
137
138static const struct vio_error_entry viotape_err_table[] = {
139 { viotape_InvalidRange, EIO, "Internal error" },
140 { viotape_InvalidToken, EIO, "Internal error" },
141 { viotape_DMAError, EIO, "DMA error" },
142 { viotape_UseError, EIO, "Internal error" },
143 { viotape_ReleaseError, EIO, "Internal error" },
144 { viotape_InvalidTape, EIO, "Invalid tape device" },
145 { viotape_InvalidOp, EIO, "Invalid operation" },
146 { viotape_TapeErr, EIO, "Tape error" },
147 { viotape_AllocTimedOut, EBUSY, "Allocate timed out" },
148 { viotape_BOTEnc, EIO, "Beginning of tape encountered" },
149 { viotape_BlankTape, EIO, "Blank tape" },
150 { viotape_BufferEmpty, EIO, "Buffer empty" },
151 { viotape_CleanCartFound, ENOMEDIUM, "Cleaning cartridge found" },
152 { viotape_CmdNotAllowed, EIO, "Command not allowed" },
153 { viotape_CmdNotSupported, EIO, "Command not supported" },
154 { viotape_DataCheck, EIO, "Data check" },
155 { viotape_DecompressErr, EIO, "Decompression error" },
156 { viotape_DeviceTimeout, EBUSY, "Device timeout" },
157 { viotape_DeviceUnavail, EIO, "Device unavailable" },
158 { viotape_DeviceBusy, EBUSY, "Device busy" },
159 { viotape_EndOfMedia, ENOSPC, "End of media" },
160 { viotape_EndOfTape, ENOSPC, "End of tape" },
161 { viotape_EquipCheck, EIO, "Equipment check" },
162 { viotape_InsufficientRs, EOVERFLOW, "Insufficient tape resources" },
163 { viotape_InvalidLogBlk, EIO, "Invalid logical block location" },
164 { viotape_LengthError, EOVERFLOW, "Length error" },
165 { viotape_LibDoorOpen, EBUSY, "Door open" },
166 { viotape_LoadFailure, ENOMEDIUM, "Load failure" },
167 { viotape_NotCapable, EIO, "Not capable" },
168 { viotape_NotOperational, EIO, "Not operational" },
169 { viotape_NotReady, EIO, "Not ready" },
170 { viotape_OpCancelled, EIO, "Operation cancelled" },
171 { viotape_PhyLinkErr, EIO, "Physical link error" },
172 { viotape_RdyNotBOT, EIO, "Ready but not beginning of tape" },
173 { viotape_TapeMark, EIO, "Tape mark" },
174 { viotape_WriteProt, EROFS, "Write protection error" },
175 { 0, 0, NULL },
176};
177
178/* Maximum number of tapes we support */
179#define VIOTAPE_MAX_TAPE HVMAXARCHITECTEDVIRTUALTAPES
180#define MAX_PARTITIONS 4
181
182/* defines for current tape state */
183#define VIOT_IDLE 0
184#define VIOT_READING 1
185#define VIOT_WRITING 2
186
187/* Our info on the tapes */
188static struct {
189 const char *rsrcname;
190 const char *type;
191 const char *model;
192} viotape_unitinfo[VIOTAPE_MAX_TAPE];
193
194static struct mtget viomtget[VIOTAPE_MAX_TAPE];
195
196static struct class *tape_class;
197
198static struct device *tape_device[VIOTAPE_MAX_TAPE];
199
200/*
201 * maintain the current state of each tape (and partition)
202 * so that we know when to write EOF marks.
203 */
204static struct {
205 unsigned char cur_part;
206 unsigned char part_stat_rwi[MAX_PARTITIONS];
207} state[VIOTAPE_MAX_TAPE];
208
209/* We single-thread */
210static struct semaphore reqSem;
211
212/*
213 * When we send a request, we use this struct to get the response back
214 * from the interrupt handler
215 */
216struct op_struct {
217 void *buffer;
218 dma_addr_t dmaaddr;
219 size_t count;
220 int rc;
221 int non_blocking;
222 struct completion com;
223 struct device *dev;
224 struct op_struct *next;
225};
226
227static spinlock_t op_struct_list_lock;
228static struct op_struct *op_struct_list;
229
230/* forward declaration to resolve interdependence */
231static int chg_state(int index, unsigned char new_state, struct file *file);
232
233/* procfs support */
234static int proc_viotape_show(struct seq_file *m, void *v)
235{
236 int i;
237
238 seq_printf(m, "viotape driver version " VIOTAPE_VERSION "\n");
239 for (i = 0; i < viotape_numdev; i++) {
240 seq_printf(m, "viotape device %d is iSeries resource %10.10s"
241 "type %4.4s, model %3.3s\n",
242 i, viotape_unitinfo[i].rsrcname,
243 viotape_unitinfo[i].type,
244 viotape_unitinfo[i].model);
245 }
246 return 0;
247}
248
249static int proc_viotape_open(struct inode *inode, struct file *file)
250{
251 return single_open(file, proc_viotape_show, NULL);
252}
253
254static const struct file_operations proc_viotape_operations = {
255 .owner = THIS_MODULE,
256 .open = proc_viotape_open,
257 .read = seq_read,
258 .llseek = seq_lseek,
259 .release = single_release,
260};
261
262/* Decode the device minor number into its parts */
263void get_dev_info(struct inode *ino, struct viot_devinfo_struct *devi)
264{
265 devi->devno = iminor(ino) & 0x1F;
266 devi->mode = (iminor(ino) & 0x60) >> 5;
267 /* if bit is set in the minor, do _not_ rewind automatically */
268 devi->rewind = (iminor(ino) & 0x80) == 0;
269}
270
271/* This is called only from the exit and init paths, so no need for locking */
272static void clear_op_struct_pool(void)
273{
274 while (op_struct_list) {
275 struct op_struct *toFree = op_struct_list;
276 op_struct_list = op_struct_list->next;
277 kfree(toFree);
278 }
279}
280
281/* Likewise, this is only called from the init path */
282static int add_op_structs(int structs)
283{
284 int i;
285
286 for (i = 0; i < structs; ++i) {
287 struct op_struct *new_struct =
288 kmalloc(sizeof(*new_struct), GFP_KERNEL);
289 if (!new_struct) {
290 clear_op_struct_pool();
291 return -ENOMEM;
292 }
293 new_struct->next = op_struct_list;
294 op_struct_list = new_struct;
295 }
296 return 0;
297}
298
299/* Allocate an op structure from our pool */
300static struct op_struct *get_op_struct(void)
301{
302 struct op_struct *retval;
303 unsigned long flags;
304
305 spin_lock_irqsave(&op_struct_list_lock, flags);
306 retval = op_struct_list;
307 if (retval)
308 op_struct_list = retval->next;
309 spin_unlock_irqrestore(&op_struct_list_lock, flags);
310 if (retval) {
311 memset(retval, 0, sizeof(*retval));
312 init_completion(&retval->com);
313 }
314
315 return retval;
316}
317
318/* Return an op structure to our pool */
319static void free_op_struct(struct op_struct *op_struct)
320{
321 unsigned long flags;
322
323 spin_lock_irqsave(&op_struct_list_lock, flags);
324 op_struct->next = op_struct_list;
325 op_struct_list = op_struct;
326 spin_unlock_irqrestore(&op_struct_list_lock, flags);
327}
328
329/* Map our tape return codes to errno values */
330int tape_rc_to_errno(int tape_rc, char *operation, int tapeno)
331{
332 const struct vio_error_entry *err;
333
334 if (tape_rc == 0)
335 return 0;
336
337 err = vio_lookup_rc(viotape_err_table, tape_rc);
338 printk(VIOTAPE_KERN_WARN "error(%s) 0x%04x on Device %d (%-10s): %s\n",
339 operation, tape_rc, tapeno,
340 viotape_unitinfo[tapeno].rsrcname, err->msg);
341 return -err->errno;
342}
343
344/* Write */
345static ssize_t viotap_write(struct file *file, const char *buf,
346 size_t count, loff_t * ppos)
347{
348 HvLpEvent_Rc hvrc;
349 unsigned short flags = file->f_flags;
350 int noblock = ((flags & O_NONBLOCK) != 0);
351 ssize_t ret;
352 struct viot_devinfo_struct devi;
353 struct op_struct *op = get_op_struct();
354
355 if (op == NULL)
356 return -ENOMEM;
357
358 get_dev_info(file->f_path.dentry->d_inode, &devi);
359
360 /*
361 * We need to make sure we can send a request. We use
362 * a semaphore to keep track of # requests in use. If
363 * we are non-blocking, make sure we don't block on the
364 * semaphore
365 */
366 if (noblock) {
367 if (down_trylock(&reqSem)) {
368 ret = -EWOULDBLOCK;
369 goto free_op;
370 }
371 } else
372 down(&reqSem);
373
374 /* Allocate a DMA buffer */
375 op->dev = tape_device[devi.devno];
376 op->buffer = dma_alloc_coherent(op->dev, count, &op->dmaaddr,
377 GFP_ATOMIC);
378
379 if (op->buffer == NULL) {
380 printk(VIOTAPE_KERN_WARN
381 "error allocating dma buffer for len %ld\n",
382 count);
383 ret = -EFAULT;
384 goto up_sem;
385 }
386
387 /* Copy the data into the buffer */
388 if (copy_from_user(op->buffer, buf, count)) {
389 printk(VIOTAPE_KERN_WARN "tape: error on copy from user\n");
390 ret = -EFAULT;
391 goto free_dma;
392 }
393
394 op->non_blocking = noblock;
395 init_completion(&op->com);
396 op->count = count;
397
398 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
399 HvLpEvent_Type_VirtualIo,
400 viomajorsubtype_tape | viotapewrite,
401 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
402 viopath_sourceinst(viopath_hostLp),
403 viopath_targetinst(viopath_hostLp),
404 (u64)(unsigned long)op, VIOVERSION << 16,
405 ((u64)devi.devno << 48) | op->dmaaddr, count, 0, 0);
406 if (hvrc != HvLpEvent_Rc_Good) {
407 printk(VIOTAPE_KERN_WARN "hv error on op %d\n",
408 (int)hvrc);
409 ret = -EIO;
410 goto free_dma;
411 }
412
413 if (noblock)
414 return count;
415
416 wait_for_completion(&op->com);
417
418 if (op->rc)
419 ret = tape_rc_to_errno(op->rc, "write", devi.devno);
420 else {
421 chg_state(devi.devno, VIOT_WRITING, file);
422 ret = op->count;
423 }
424
425free_dma:
426 dma_free_coherent(op->dev, count, op->buffer, op->dmaaddr);
427up_sem:
428 up(&reqSem);
429free_op:
430 free_op_struct(op);
431 return ret;
432}
433
434/* read */
435static ssize_t viotap_read(struct file *file, char *buf, size_t count,
436 loff_t *ptr)
437{
438 HvLpEvent_Rc hvrc;
439 unsigned short flags = file->f_flags;
440 struct op_struct *op = get_op_struct();
441 int noblock = ((flags & O_NONBLOCK) != 0);
442 ssize_t ret;
443 struct viot_devinfo_struct devi;
444
445 if (op == NULL)
446 return -ENOMEM;
447
448 get_dev_info(file->f_path.dentry->d_inode, &devi);
449
450 /*
451 * We need to make sure we can send a request. We use
452 * a semaphore to keep track of # requests in use. If
453 * we are non-blocking, make sure we don't block on the
454 * semaphore
455 */
456 if (noblock) {
457 if (down_trylock(&reqSem)) {
458 ret = -EWOULDBLOCK;
459 goto free_op;
460 }
461 } else
462 down(&reqSem);
463
464 chg_state(devi.devno, VIOT_READING, file);
465
466 /* Allocate a DMA buffer */
467 op->dev = tape_device[devi.devno];
468 op->buffer = dma_alloc_coherent(op->dev, count, &op->dmaaddr,
469 GFP_ATOMIC);
470 if (op->buffer == NULL) {
471 ret = -EFAULT;
472 goto up_sem;
473 }
474
475 op->count = count;
476 init_completion(&op->com);
477
478 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
479 HvLpEvent_Type_VirtualIo,
480 viomajorsubtype_tape | viotaperead,
481 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
482 viopath_sourceinst(viopath_hostLp),
483 viopath_targetinst(viopath_hostLp),
484 (u64)(unsigned long)op, VIOVERSION << 16,
485 ((u64)devi.devno << 48) | op->dmaaddr, count, 0, 0);
486 if (hvrc != HvLpEvent_Rc_Good) {
487 printk(VIOTAPE_KERN_WARN "tape hv error on op %d\n",
488 (int)hvrc);
489 ret = -EIO;
490 goto free_dma;
491 }
492
493 wait_for_completion(&op->com);
494
495 if (op->rc)
496 ret = tape_rc_to_errno(op->rc, "read", devi.devno);
497 else {
498 ret = op->count;
499 if (ret && copy_to_user(buf, op->buffer, ret)) {
500 printk(VIOTAPE_KERN_WARN "error on copy_to_user\n");
501 ret = -EFAULT;
502 }
503 }
504
505free_dma:
506 dma_free_coherent(op->dev, count, op->buffer, op->dmaaddr);
507up_sem:
508 up(&reqSem);
509free_op:
510 free_op_struct(op);
511 return ret;
512}
513
514/* ioctl */
515static int viotap_ioctl(struct inode *inode, struct file *file,
516 unsigned int cmd, unsigned long arg)
517{
518 HvLpEvent_Rc hvrc;
519 int ret;
520 struct viot_devinfo_struct devi;
521 struct mtop mtc;
522 u32 myOp;
523 struct op_struct *op = get_op_struct();
524
525 if (op == NULL)
526 return -ENOMEM;
527
528 get_dev_info(file->f_path.dentry->d_inode, &devi);
529
530 down(&reqSem);
531
532 ret = -EINVAL;
533
534 switch (cmd) {
535 case MTIOCTOP:
536 ret = -EFAULT;
537 /*
538 * inode is null if and only if we (the kernel)
539 * made the request
540 */
541 if (inode == NULL)
542 memcpy(&mtc, (void *) arg, sizeof(struct mtop));
543 else if (copy_from_user((char *)&mtc, (char *)arg,
544 sizeof(struct mtop)))
545 goto free_op;
546
547 ret = -EIO;
548 switch (mtc.mt_op) {
549 case MTRESET:
550 myOp = VIOTAPOP_RESET;
551 break;
552 case MTFSF:
553 myOp = VIOTAPOP_FSF;
554 break;
555 case MTBSF:
556 myOp = VIOTAPOP_BSF;
557 break;
558 case MTFSR:
559 myOp = VIOTAPOP_FSR;
560 break;
561 case MTBSR:
562 myOp = VIOTAPOP_BSR;
563 break;
564 case MTWEOF:
565 myOp = VIOTAPOP_WEOF;
566 break;
567 case MTREW:
568 myOp = VIOTAPOP_REW;
569 break;
570 case MTNOP:
571 myOp = VIOTAPOP_NOP;
572 break;
573 case MTEOM:
574 myOp = VIOTAPOP_EOM;
575 break;
576 case MTERASE:
577 myOp = VIOTAPOP_ERASE;
578 break;
579 case MTSETBLK:
580 myOp = VIOTAPOP_SETBLK;
581 break;
582 case MTSETDENSITY:
583 myOp = VIOTAPOP_SETDENSITY;
584 break;
585 case MTTELL:
586 myOp = VIOTAPOP_GETPOS;
587 break;
588 case MTSEEK:
589 myOp = VIOTAPOP_SETPOS;
590 break;
591 case MTSETPART:
592 myOp = VIOTAPOP_SETPART;
593 break;
594 case MTOFFL:
595 myOp = VIOTAPOP_UNLOAD;
596 break;
597 default:
598 printk(VIOTAPE_KERN_WARN "MTIOCTOP called "
599 "with invalid op 0x%x\n", mtc.mt_op);
600 goto free_op;
601 }
602
603 /*
604 * if we moved the head, we are no longer
605 * reading or writing
606 */
607 switch (mtc.mt_op) {
608 case MTFSF:
609 case MTBSF:
610 case MTFSR:
611 case MTBSR:
612 case MTTELL:
613 case MTSEEK:
614 case MTREW:
615 chg_state(devi.devno, VIOT_IDLE, file);
616 }
617
618 init_completion(&op->com);
619 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
620 HvLpEvent_Type_VirtualIo,
621 viomajorsubtype_tape | viotapeop,
622 HvLpEvent_AckInd_DoAck,
623 HvLpEvent_AckType_ImmediateAck,
624 viopath_sourceinst(viopath_hostLp),
625 viopath_targetinst(viopath_hostLp),
626 (u64)(unsigned long)op,
627 VIOVERSION << 16,
628 ((u64)devi.devno << 48), 0,
629 (((u64)myOp) << 32) | mtc.mt_count, 0);
630 if (hvrc != HvLpEvent_Rc_Good) {
631 printk(VIOTAPE_KERN_WARN "hv error on op %d\n",
632 (int)hvrc);
633 goto free_op;
634 }
635 wait_for_completion(&op->com);
636 ret = tape_rc_to_errno(op->rc, "tape operation", devi.devno);
637 goto free_op;
638
639 case MTIOCGET:
640 ret = -EIO;
641 init_completion(&op->com);
642 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
643 HvLpEvent_Type_VirtualIo,
644 viomajorsubtype_tape | viotapegetstatus,
645 HvLpEvent_AckInd_DoAck,
646 HvLpEvent_AckType_ImmediateAck,
647 viopath_sourceinst(viopath_hostLp),
648 viopath_targetinst(viopath_hostLp),
649 (u64)(unsigned long)op, VIOVERSION << 16,
650 ((u64)devi.devno << 48), 0, 0, 0);
651 if (hvrc != HvLpEvent_Rc_Good) {
652 printk(VIOTAPE_KERN_WARN "hv error on op %d\n",
653 (int)hvrc);
654 goto free_op;
655 }
656 wait_for_completion(&op->com);
657
658 /* Operation is complete - grab the error code */
659 ret = tape_rc_to_errno(op->rc, "get status", devi.devno);
660 free_op_struct(op);
661 up(&reqSem);
662
663 if ((ret == 0) && copy_to_user((void *)arg,
664 &viomtget[devi.devno],
665 sizeof(viomtget[0])))
666 ret = -EFAULT;
667 return ret;
668 case MTIOCPOS:
669 printk(VIOTAPE_KERN_WARN "Got an (unsupported) MTIOCPOS\n");
670 break;
671 default:
672 printk(VIOTAPE_KERN_WARN "got an unsupported ioctl 0x%0x\n",
673 cmd);
674 break;
675 }
676
677free_op:
678 free_op_struct(op);
679 up(&reqSem);
680 return ret;
681}
682
683static long viotap_unlocked_ioctl(struct file *file,
684 unsigned int cmd, unsigned long arg)
685{
686 long rc;
687
688 mutex_lock(&proc_viotape_mutex);
689 rc = viotap_ioctl(file->f_path.dentry->d_inode, file, cmd, arg);
690 mutex_unlock(&proc_viotape_mutex);
691 return rc;
692}
693
694static int viotap_open(struct inode *inode, struct file *file)
695{
696 HvLpEvent_Rc hvrc;
697 struct viot_devinfo_struct devi;
698 int ret;
699 struct op_struct *op = get_op_struct();
700
701 if (op == NULL)
702 return -ENOMEM;
703
704 mutex_lock(&proc_viotape_mutex);
705 get_dev_info(file->f_path.dentry->d_inode, &devi);
706
707 /* Note: We currently only support one mode! */
708 if ((devi.devno >= viotape_numdev) || (devi.mode)) {
709 ret = -ENODEV;
710 goto free_op;
711 }
712
713 init_completion(&op->com);
714
715 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
716 HvLpEvent_Type_VirtualIo,
717 viomajorsubtype_tape | viotapeopen,
718 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
719 viopath_sourceinst(viopath_hostLp),
720 viopath_targetinst(viopath_hostLp),
721 (u64)(unsigned long)op, VIOVERSION << 16,
722 ((u64)devi.devno << 48), 0, 0, 0);
723 if (hvrc != 0) {
724 printk(VIOTAPE_KERN_WARN "bad rc on signalLpEvent %d\n",
725 (int) hvrc);
726 ret = -EIO;
727 goto free_op;
728 }
729
730 wait_for_completion(&op->com);
731 ret = tape_rc_to_errno(op->rc, "open", devi.devno);
732
733free_op:
734 free_op_struct(op);
735 mutex_unlock(&proc_viotape_mutex);
736 return ret;
737}
738
739
740static int viotap_release(struct inode *inode, struct file *file)
741{
742 HvLpEvent_Rc hvrc;
743 struct viot_devinfo_struct devi;
744 int ret = 0;
745 struct op_struct *op = get_op_struct();
746
747 if (op == NULL)
748 return -ENOMEM;
749 init_completion(&op->com);
750
751 get_dev_info(file->f_path.dentry->d_inode, &devi);
752
753 if (devi.devno >= viotape_numdev) {
754 ret = -ENODEV;
755 goto free_op;
756 }
757
758 chg_state(devi.devno, VIOT_IDLE, file);
759
760 if (devi.rewind) {
761 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
762 HvLpEvent_Type_VirtualIo,
763 viomajorsubtype_tape | viotapeop,
764 HvLpEvent_AckInd_DoAck,
765 HvLpEvent_AckType_ImmediateAck,
766 viopath_sourceinst(viopath_hostLp),
767 viopath_targetinst(viopath_hostLp),
768 (u64)(unsigned long)op, VIOVERSION << 16,
769 ((u64)devi.devno << 48), 0,
770 ((u64)VIOTAPOP_REW) << 32, 0);
771 wait_for_completion(&op->com);
772
773 tape_rc_to_errno(op->rc, "rewind", devi.devno);
774 }
775
776 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
777 HvLpEvent_Type_VirtualIo,
778 viomajorsubtype_tape | viotapeclose,
779 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
780 viopath_sourceinst(viopath_hostLp),
781 viopath_targetinst(viopath_hostLp),
782 (u64)(unsigned long)op, VIOVERSION << 16,
783 ((u64)devi.devno << 48), 0, 0, 0);
784 if (hvrc != 0) {
785 printk(VIOTAPE_KERN_WARN "bad rc on signalLpEvent %d\n",
786 (int) hvrc);
787 ret = -EIO;
788 goto free_op;
789 }
790
791 wait_for_completion(&op->com);
792
793 if (op->rc)
794 printk(VIOTAPE_KERN_WARN "close failed\n");
795
796free_op:
797 free_op_struct(op);
798 return ret;
799}
800
801const struct file_operations viotap_fops = {
802 .owner = THIS_MODULE,
803 .read = viotap_read,
804 .write = viotap_write,
805 .unlocked_ioctl = viotap_unlocked_ioctl,
806 .open = viotap_open,
807 .release = viotap_release,
808 .llseek = noop_llseek,
809};
810
811/* Handle interrupt events for tape */
812static void vioHandleTapeEvent(struct HvLpEvent *event)
813{
814 int tapeminor;
815 struct op_struct *op;
816 struct viotapelpevent *tevent = (struct viotapelpevent *)event;
817
818 if (event == NULL) {
819 /* Notification that a partition went away! */
820 if (!viopath_isactive(viopath_hostLp)) {
821 /* TODO! Clean up */
822 }
823 return;
824 }
825
826 tapeminor = event->xSubtype & VIOMINOR_SUBTYPE_MASK;
827 op = (struct op_struct *)event->xCorrelationToken;
828 switch (tapeminor) {
829 case viotapeopen:
830 case viotapeclose:
831 op->rc = tevent->sub_type_result;
832 complete(&op->com);
833 break;
834 case viotaperead:
835 op->rc = tevent->sub_type_result;
836 op->count = tevent->len;
837 complete(&op->com);
838 break;
839 case viotapewrite:
840 if (op->non_blocking) {
841 dma_free_coherent(op->dev, op->count,
842 op->buffer, op->dmaaddr);
843 free_op_struct(op);
844 up(&reqSem);
845 } else {
846 op->rc = tevent->sub_type_result;
847 op->count = tevent->len;
848 complete(&op->com);
849 }
850 break;
851 case viotapeop:
852 case viotapegetpos:
853 case viotapesetpos:
854 case viotapegetstatus:
855 if (op) {
856 op->count = tevent->u.op.count;
857 op->rc = tevent->sub_type_result;
858 if (!op->non_blocking)
859 complete(&op->com);
860 }
861 break;
862 default:
863 printk(VIOTAPE_KERN_WARN "weird ack\n");
864 }
865}
866
867static int viotape_probe(struct vio_dev *vdev, const struct vio_device_id *id)
868{
869 int i = vdev->unit_address;
870 int j;
871 struct device_node *node = vdev->dev.of_node;
872
873 if (i >= VIOTAPE_MAX_TAPE)
874 return -ENODEV;
875 if (!node)
876 return -ENODEV;
877
878 if (i >= viotape_numdev)
879 viotape_numdev = i + 1;
880
881 tape_device[i] = &vdev->dev;
882 viotape_unitinfo[i].rsrcname = of_get_property(node,
883 "linux,vio_rsrcname", NULL);
884 viotape_unitinfo[i].type = of_get_property(node, "linux,vio_type",
885 NULL);
886 viotape_unitinfo[i].model = of_get_property(node, "linux,vio_model",
887 NULL);
888
889 state[i].cur_part = 0;
890 for (j = 0; j < MAX_PARTITIONS; ++j)
891 state[i].part_stat_rwi[j] = VIOT_IDLE;
892 device_create(tape_class, NULL, MKDEV(VIOTAPE_MAJOR, i), NULL,
893 "iseries!vt%d", i);
894 device_create(tape_class, NULL, MKDEV(VIOTAPE_MAJOR, i | 0x80), NULL,
895 "iseries!nvt%d", i);
896 printk(VIOTAPE_KERN_INFO "tape iseries/vt%d is iSeries "
897 "resource %10.10s type %4.4s, model %3.3s\n",
898 i, viotape_unitinfo[i].rsrcname,
899 viotape_unitinfo[i].type, viotape_unitinfo[i].model);
900 return 0;
901}
902
903static int viotape_remove(struct vio_dev *vdev)
904{
905 int i = vdev->unit_address;
906
907 device_destroy(tape_class, MKDEV(VIOTAPE_MAJOR, i | 0x80));
908 device_destroy(tape_class, MKDEV(VIOTAPE_MAJOR, i));
909 return 0;
910}
911
912/**
913 * viotape_device_table: Used by vio.c to match devices that we
914 * support.
915 */
916static struct vio_device_id viotape_device_table[] __devinitdata = {
917 { "byte", "IBM,iSeries-viotape" },
918 { "", "" }
919};
920MODULE_DEVICE_TABLE(vio, viotape_device_table);
921
922static struct vio_driver viotape_driver = {
923 .id_table = viotape_device_table,
924 .probe = viotape_probe,
925 .remove = viotape_remove,
926 .driver = {
927 .name = "viotape",
928 .owner = THIS_MODULE,
929 }
930};
931
932
933int __init viotap_init(void)
934{
935 int ret;
936
937 if (!firmware_has_feature(FW_FEATURE_ISERIES))
938 return -ENODEV;
939
940 op_struct_list = NULL;
941 if ((ret = add_op_structs(VIOTAPE_MAXREQ)) < 0) {
942 printk(VIOTAPE_KERN_WARN "couldn't allocate op structs\n");
943 return ret;
944 }
945 spin_lock_init(&op_struct_list_lock);
946
947 sema_init(&reqSem, VIOTAPE_MAXREQ);
948
949 if (viopath_hostLp == HvLpIndexInvalid) {
950 vio_set_hostlp();
951 if (viopath_hostLp == HvLpIndexInvalid) {
952 ret = -ENODEV;
953 goto clear_op;
954 }
955 }
956
957 ret = viopath_open(viopath_hostLp, viomajorsubtype_tape,
958 VIOTAPE_MAXREQ + 2);
959 if (ret) {
960 printk(VIOTAPE_KERN_WARN
961 "error on viopath_open to hostlp %d\n", ret);
962 ret = -EIO;
963 goto clear_op;
964 }
965
966 printk(VIOTAPE_KERN_INFO "vers " VIOTAPE_VERSION
967 ", hosting partition %d\n", viopath_hostLp);
968
969 vio_setHandler(viomajorsubtype_tape, vioHandleTapeEvent);
970
971 ret = register_chrdev(VIOTAPE_MAJOR, "viotape", &viotap_fops);
972 if (ret < 0) {
973 printk(VIOTAPE_KERN_WARN "Error registering viotape device\n");
974 goto clear_handler;
975 }
976
977 tape_class = class_create(THIS_MODULE, "tape");
978 if (IS_ERR(tape_class)) {
979 printk(VIOTAPE_KERN_WARN "Unable to allocat class\n");
980 ret = PTR_ERR(tape_class);
981 goto unreg_chrdev;
982 }
983
984 ret = vio_register_driver(&viotape_driver);
985 if (ret)
986 goto unreg_class;
987
988 proc_create("iSeries/viotape", S_IFREG|S_IRUGO, NULL,
989 &proc_viotape_operations);
990
991 return 0;
992
993unreg_class:
994 class_destroy(tape_class);
995unreg_chrdev:
996 unregister_chrdev(VIOTAPE_MAJOR, "viotape");
997clear_handler:
998 vio_clearHandler(viomajorsubtype_tape);
999 viopath_close(viopath_hostLp, viomajorsubtype_tape, VIOTAPE_MAXREQ + 2);
1000clear_op:
1001 clear_op_struct_pool();
1002 return ret;
1003}
1004
1005/* Give a new state to the tape object */
1006static int chg_state(int index, unsigned char new_state, struct file *file)
1007{
1008 unsigned char *cur_state =
1009 &state[index].part_stat_rwi[state[index].cur_part];
1010 int rc = 0;
1011
1012 /* if the same state, don't bother */
1013 if (*cur_state == new_state)
1014 return 0;
1015
1016 /* write an EOF if changing from writing to some other state */
1017 if (*cur_state == VIOT_WRITING) {
1018 struct mtop write_eof = { MTWEOF, 1 };
1019
1020 rc = viotap_ioctl(NULL, file, MTIOCTOP,
1021 (unsigned long)&write_eof);
1022 }
1023 *cur_state = new_state;
1024 return rc;
1025}
1026
1027/* Cleanup */
1028static void __exit viotap_exit(void)
1029{
1030 remove_proc_entry("iSeries/viotape", NULL);
1031 vio_unregister_driver(&viotape_driver);
1032 class_destroy(tape_class);
1033 unregister_chrdev(VIOTAPE_MAJOR, "viotape");
1034 viopath_close(viopath_hostLp, viomajorsubtype_tape, VIOTAPE_MAXREQ + 2);
1035 vio_clearHandler(viomajorsubtype_tape);
1036 clear_op_struct_pool();
1037}
1038
1039MODULE_LICENSE("GPL");
1040module_init(viotap_init);
1041module_exit(viotap_exit);